LLVM: lib/Support/rpmalloc/rpmalloc.c Source File (original) (raw)

1

2

3

4

5

6

7

8

9

10

11

12

13

15

16

17

18

19

20

21

22#if defined(__clang__)

23#pragma clang diagnostic ignored "-Wunused-macros"

24#pragma clang diagnostic ignored "-Wunused-function"

25#if __has_warning("-Wreserved-identifier")

26#pragma clang diagnostic ignored "-Wreserved-identifier"

27#endif

28#if __has_warning("-Wstatic-in-inline")

29#pragma clang diagnostic ignored "-Wstatic-in-inline"

30#endif

31#elif defined(__GNUC__)

32#pragma GCC diagnostic ignored "-Wunused-macros"

33#pragma GCC diagnostic ignored "-Wunused-function"

34#endif

35

36#if !defined(__has_builtin)

37#define __has_builtin(b) 0

38#endif

39

40#if defined(__GNUC__) || defined(__clang__)

41

42#if __has_builtin(__builtin_memcpy_inline)

43#define _rpmalloc_memcpy_const(x, y, s) __builtin_memcpy_inline(x, y, s)

44#else

45#define _rpmalloc_memcpy_const(x, y, s) \

46 do { \

47 _Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), \

48 "len must be a constant integer"); \

49 memcpy(x, y, s); \

50 } while (0)

51#endif

52

53#if __has_builtin(__builtin_memset_inline)

54#define _rpmalloc_memset_const(x, y, s) __builtin_memset_inline(x, y, s)

55#else

56#define _rpmalloc_memset_const(x, y, s) \

57 do { \

58 _Static_assert(__builtin_choose_expr(__builtin_constant_p(s), 1, 0), \

59 "len must be a constant integer"); \

60 memset(x, y, s); \

61 } while (0)

62#endif

63#else

64#define _rpmalloc_memcpy_const(x, y, s) memcpy(x, y, s)

65#define _rpmalloc_memset_const(x, y, s) memset(x, y, s)

66#endif

67

68#if __has_builtin(__builtin_assume)

69#define rpmalloc_assume(cond) __builtin_assume(cond)

70#elif defined(__GNUC__)

71#define rpmalloc_assume(cond) \

72 do { \

73 if (!__builtin_expect(cond, 0)) \

74 __builtin_unreachable(); \

75 } while (0)

76#elif defined(_MSC_VER)

77#define rpmalloc_assume(cond) __assume(cond)

78#else

79#define rpmalloc_assume(cond) 0

80#endif

81

82#ifndef HEAP_ARRAY_SIZE

83

84#define HEAP_ARRAY_SIZE 47

85#endif

86#ifndef ENABLE_THREAD_CACHE

87

88#define ENABLE_THREAD_CACHE 1

89#endif

90#ifndef ENABLE_GLOBAL_CACHE

91

92#define ENABLE_GLOBAL_CACHE 1

93#endif

94#ifndef ENABLE_VALIDATE_ARGS

95

96#define ENABLE_VALIDATE_ARGS 0

97#endif

98#ifndef ENABLE_STATISTICS

99

100#define ENABLE_STATISTICS 0

101#endif

102#ifndef ENABLE_ASSERTS

103

104#define ENABLE_ASSERTS 0

105#endif

106#ifndef ENABLE_OVERRIDE

107

108#define ENABLE_OVERRIDE 0

109#endif

110#ifndef ENABLE_PRELOAD

111

112#define ENABLE_PRELOAD 0

113#endif

114#ifndef DISABLE_UNMAP

115

116#define DISABLE_UNMAP 0

117#endif

118#ifndef ENABLE_UNLIMITED_CACHE

119

120#define ENABLE_UNLIMITED_CACHE 0

121#endif

122#ifndef ENABLE_ADAPTIVE_THREAD_CACHE

123

124#define ENABLE_ADAPTIVE_THREAD_CACHE 0

125#endif

126#ifndef DEFAULT_SPAN_MAP_COUNT

127

128

129#define DEFAULT_SPAN_MAP_COUNT 64

130#endif

131#ifndef GLOBAL_CACHE_MULTIPLIER

132

133#define GLOBAL_CACHE_MULTIPLIER 8

134#endif

135

136#if DISABLE_UNMAP && !ENABLE_GLOBAL_CACHE

137#error Must use global cache if unmap is disabled

138#endif

139

140#if DISABLE_UNMAP

141#undef ENABLE_UNLIMITED_CACHE

142#define ENABLE_UNLIMITED_CACHE 1

143#endif

144

145#if !ENABLE_GLOBAL_CACHE

146#undef ENABLE_UNLIMITED_CACHE

147#define ENABLE_UNLIMITED_CACHE 0

148#endif

149

150#if !ENABLE_THREAD_CACHE

151#undef ENABLE_ADAPTIVE_THREAD_CACHE

152#define ENABLE_ADAPTIVE_THREAD_CACHE 0

153#endif

154

155#if defined(_WIN32) || defined(__WIN32__) || defined(_WIN64)

156#define PLATFORM_WINDOWS 1

157#define PLATFORM_POSIX 0

158#else

159#define PLATFORM_WINDOWS 0

160#define PLATFORM_POSIX 1

161#endif

162

163

164#if defined(_MSC_VER) && !defined(__clang__)

165#pragma warning(disable : 5105)

166#ifndef FORCEINLINE

167#define FORCEINLINE inline __forceinline

168#endif

169#define _Static_assert static_assert

170#else

171#ifndef FORCEINLINE

172#define FORCEINLINE inline __attribute__((__always_inline__))

173#endif

174#endif

175#if PLATFORM_WINDOWS

176#ifndef WIN32_LEAN_AND_MEAN

177#define WIN32_LEAN_AND_MEAN

178#endif

179#include <windows.h>

180#if ENABLE_VALIDATE_ARGS

181#include <intsafe.h>

182#endif

183#else

184#include <stdio.h>

185#include <stdlib.h>

186#include <time.h>

187#include <unistd.h>

188#if defined(__linux__) || defined(__ANDROID__)

189#include <sys/prctl.h>

190#if !defined(PR_SET_VMA)

191#define PR_SET_VMA 0x53564d41

192#define PR_SET_VMA_ANON_NAME 0

193#endif

194#endif

195#if defined(__APPLE__)

196#include <TargetConditionals.h>

197#if !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR

198#include <mach/mach_vm.h>

199#include <mach/vm_statistics.h>

200#endif

201#include <pthread.h>

202#endif

203#if defined(__HAIKU__) || defined(__TINYC__)

204#include <pthread.h>

205#endif

206#endif

207

208#include <errno.h>

209#include <stdint.h>

210#include <string.h>

211

212#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

213#include <fibersapi.h>

214static DWORD fls_key;

215#endif

216

217#if PLATFORM_POSIX

218#include <sched.h>

219#include <sys/mman.h>

220#ifdef __FreeBSD__

221#include <sys/sysctl.h>

222#define MAP_HUGETLB MAP_ALIGNED_SUPER

223#ifndef PROT_MAX

224#define PROT_MAX(f) 0

225#endif

226#else

227#define PROT_MAX(f) 0

228#endif

229#ifdef __sun

230extern int madvise(caddr_t, size_t, int);

231#endif

232#ifndef MAP_UNINITIALIZED

233#define MAP_UNINITIALIZED 0

234#endif

235#endif

236#include <errno.h>

237

238#if ENABLE_ASSERTS

239#undef NDEBUG

240#if defined(_MSC_VER) && !defined(_DEBUG)

241#define _DEBUG

242#endif

243#include <assert.h>

244#define RPMALLOC_TOSTRING_M(x) #x

245#define RPMALLOC_TOSTRING(x) RPMALLOC_TOSTRING_M(x)

246#define rpmalloc_assert(truth, message) \

247 do { \

248 if (!(truth)) { \

249 if (_memory_config.error_callback) { \

250 _memory_config.error_callback(message " (" RPMALLOC_TOSTRING( \

251 truth) ") at " __FILE__ ":" RPMALLOC_TOSTRING(__LINE__)); \

252 } else { \

253 assert((truth) && message); \

254 } \

255 } \

256 } while (0)

257#else

258#define rpmalloc_assert(truth, message) \

259 do { \

260 } while (0)

261#endif

262#if ENABLE_STATISTICS

263#include <stdio.h>

264#endif

265

266

267

268

269

270

271

272#if defined(_MSC_VER) && !defined(__clang__)

273

274typedef volatile long atomic32_t;

275typedef volatile long long atomic64_t;

276typedef volatile void *atomicptr_t;

277

278static FORCEINLINE int32_t atomic_load32(atomic32_t *src) { return *src; }

280 *dst = val;

281}

283 return (int32_t)InterlockedIncrement(val);

284}

286 return (int32_t)InterlockedDecrement(val);

287}

289 return (int32_t)InterlockedExchangeAdd(val, add) + add;

290}

292 int32_t ref) {

293 return (InterlockedCompareExchange(dst, val, ref) == ref) ? 1 : 0;

294}

296 *dst = val;

297}

300 return (int64_t)InterlockedExchangeAdd64(val, add) + add;

301}

303 return (void *)*src;

304}

306 *dst = val;

307}

309 *dst = val;

310}

312 void *val) {

313 return (void *)InterlockedExchangePointer((void *volatile *)dst, val);

314}

316 return (InterlockedCompareExchangePointer((void *volatile *)dst, val, ref) ==

317 ref)

318 ? 1

319 : 0;

320}

321

322#define EXPECTED(x) (x)

323#define UNEXPECTED(x) (x)

324

325#else

326

327#include <stdatomic.h>

328

329typedef volatile _Atomic(int32_t) atomic32_t;

330typedef volatile _Atomic(int64_t) atomic64_t;

331typedef volatile _Atomic(void *) atomicptr_t;

332

333static FORCEINLINE int32_t atomic_load32(atomic32_t *src) {

334 return atomic_load_explicit(src, memory_order_relaxed);

335}

337 atomic_store_explicit(dst, val, memory_order_relaxed);

338}

340 return atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;

341}

343 return atomic_fetch_add_explicit(val, -1, memory_order_relaxed) - 1;

344}

346 return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add;

347}

349 int32_t ref) {

350 return atomic_compare_exchange_weak_explicit(

351 dst, &ref, val, memory_order_acquire, memory_order_relaxed);

352}

354 atomic_store_explicit(dst, val, memory_order_release);

355}

357 return atomic_load_explicit(val, memory_order_relaxed);

358}

360 return atomic_fetch_add_explicit(val, add, memory_order_relaxed) + add;

361}

363 return atomic_load_explicit(src, memory_order_relaxed);

364}

366 atomic_store_explicit(dst, val, memory_order_relaxed);

367}

369 atomic_store_explicit(dst, val, memory_order_release);

370}

372 void *val) {

373 return atomic_exchange_explicit(dst, val, memory_order_acquire);

374}

376 return atomic_compare_exchange_weak_explicit(

377 dst, &ref, val, memory_order_relaxed, memory_order_relaxed);

378}

379

380#define EXPECTED(x) __builtin_expect((x), 1)

381#define UNEXPECTED(x) __builtin_expect((x), 0)

382

383#endif

384

385

386

387

388

389

390

391

392#if ENABLE_STATISTICS

393#define _rpmalloc_stat_inc(counter) atomic_incr32(counter)

394#define _rpmalloc_stat_dec(counter) atomic_decr32(counter)

395#define _rpmalloc_stat_add(counter, value) \

396 atomic_add32(counter, (int32_t)(value))

397#define _rpmalloc_stat_add64(counter, value) \

398 atomic_add64(counter, (int64_t)(value))

399#define _rpmalloc_stat_add_peak(counter, value, peak) \

400 do { \

401 int32_t _cur_count = atomic_add32(counter, (int32_t)(value)); \

402 if (_cur_count > (peak)) \

403 peak = _cur_count; \

404 } while (0)

405#define _rpmalloc_stat_sub(counter, value) \

406 atomic_add32(counter, -(int32_t)(value))

407#define _rpmalloc_stat_inc_alloc(heap, class_idx) \

408 do { \

409 int32_t alloc_current = \

410 atomic_incr32(&heap->size_class_use[class_idx].alloc_current); \

411 if (alloc_current > heap->size_class_use[class_idx].alloc_peak) \

412 heap->size_class_use[class_idx].alloc_peak = alloc_current; \

413 atomic_incr32(&heap->size_class_use[class_idx].alloc_total); \

414 } while (0)

415#define _rpmalloc_stat_inc_free(heap, class_idx) \

416 do { \

417 atomic_decr32(&heap->size_class_use[class_idx].alloc_current); \

418 atomic_incr32(&heap->size_class_use[class_idx].free_total); \

419 } while (0)

420#else

421#define _rpmalloc_stat_inc(counter) \

422 do { \

423 } while (0)

424#define _rpmalloc_stat_dec(counter) \

425 do { \

426 } while (0)

427#define _rpmalloc_stat_add(counter, value) \

428 do { \

429 } while (0)

430#define _rpmalloc_stat_add64(counter, value) \

431 do { \

432 } while (0)

433#define _rpmalloc_stat_add_peak(counter, value, peak) \

434 do { \

435 } while (0)

436#define _rpmalloc_stat_sub(counter, value) \

437 do { \

438 } while (0)

439#define _rpmalloc_stat_inc_alloc(heap, class_idx) \

440 do { \

441 } while (0)

442#define _rpmalloc_stat_inc_free(heap, class_idx) \

443 do { \

444 } while (0)

445#endif

446

447

448

449

450

451

452#define SMALL_GRANULARITY 16

453

454#define SMALL_GRANULARITY_SHIFT 4

455

456#define SMALL_CLASS_COUNT 65

457

458#define SMALL_SIZE_LIMIT (SMALL_GRANULARITY * (SMALL_CLASS_COUNT - 1))

459

460#define MEDIUM_GRANULARITY 512

461

462#define MEDIUM_GRANULARITY_SHIFT 9

463

464#define MEDIUM_CLASS_COUNT 61

465

466#define SIZE_CLASS_COUNT (SMALL_CLASS_COUNT + MEDIUM_CLASS_COUNT)

467

468#define LARGE_CLASS_COUNT 63

469

470#define MEDIUM_SIZE_LIMIT \

471 (SMALL_SIZE_LIMIT + (MEDIUM_GRANULARITY * MEDIUM_CLASS_COUNT))

472

473#define LARGE_SIZE_LIMIT \

474 ((LARGE_CLASS_COUNT * _memory_span_size) - SPAN_HEADER_SIZE)

475

476

477#define SPAN_HEADER_SIZE 128

478

479#define MAX_THREAD_SPAN_CACHE 400

480

481#define THREAD_SPAN_CACHE_TRANSFER 64

482

483

484#define MAX_THREAD_SPAN_LARGE_CACHE 100

485

486#define THREAD_SPAN_LARGE_CACHE_TRANSFER 6

487

489 "Small granularity must be power of two");

491 "Span header size must be power of two");

492

493#if ENABLE_VALIDATE_ARGS

494

495#undef MAX_ALLOC_SIZE

496#define MAX_ALLOC_SIZE (((size_t) - 1) - _memory_span_size)

497#endif

498

499#define pointer_offset(ptr, ofs) (void *)((char *)(ptr) + (ptrdiff_t)(ofs))

500#define pointer_diff(first, second) \

501 (ptrdiff_t)((const char *)(first) - (const char *)(second))

502

503#define INVALID_POINTER ((void *)((uintptr_t) - 1))

504

505#define SIZE_CLASS_LARGE SIZE_CLASS_COUNT

506#define SIZE_CLASS_HUGE ((uint32_t) - 1)

507

508

509

510

511

512

513

514

516

518

520

522

524

526

527

528#define SPAN_FLAG_MASTER 1U

529

530#define SPAN_FLAG_SUBSPAN 2U

531

532#define SPAN_FLAG_ALIGNED_BLOCKS 4U

533

534#define SPAN_FLAG_UNMAPPED_MASTER 8U

535

536#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS

537struct span_use_t {

538

539 atomic32_t current;

540

541 atomic32_t high;

542#if ENABLE_STATISTICS

543

544 atomic32_t spans_deferred;

545

546 atomic32_t spans_to_global;

547

548 atomic32_t spans_from_global;

549

550 atomic32_t spans_to_cache;

551

552 atomic32_t spans_from_cache;

553

554 atomic32_t spans_to_reserved;

555

556 atomic32_t spans_from_reserved;

557

558 atomic32_t spans_map_calls;

559#endif

560};

561typedef struct span_use_t span_use_t;

562#endif

563

564#if ENABLE_STATISTICS

565struct size_class_use_t {

566

567 atomic32_t alloc_current;

568

569 int32_t alloc_peak;

570

571 atomic32_t alloc_total;

572

573 atomic32_t free_total;

574

575 atomic32_t spans_current;

576

577 int32_t spans_peak;

578

579 atomic32_t spans_to_cache;

580

581 atomic32_t spans_from_cache;

582

583 atomic32_t spans_from_reserved;

584

585 atomic32_t spans_map_calls;

586 int32_t unused;

587};

588typedef struct size_class_use_t size_class_use_t;

589#endif

590

591

592

593

594

595

596

597

598

599

600

601

602

604

606

608

610

612

614

616

618

620

622

624

626

628

630

632

634

636

638};

640

644};

646

650};

652

654

656

657

659

661};

663

664

665

667

669

671#if ENABLE_THREAD_CACHE

672

674#endif

675

677

679

681

683

685

687

689

691

693

695

697#if ENABLE_THREAD_CACHE

698

700#endif

701#if RPMALLOC_FIRST_CLASS_HEAPS

702

703

704

706

707 span_t *large_huge_span;

708#endif

709#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS

710

712#endif

713#if ENABLE_STATISTICS

714

716

717 atomic64_t thread_to_global;

718

719 atomic64_t global_to_thread;

720#endif

721};

722

723

725

727

729

731};

733

735

737

739#if ENABLE_STATISTICS

740

741 size_t insert_count;

742

743 size_t extract_count;

744#endif

745

747

749};

750

751

752

753

754

755

756

757

758#define _memory_default_span_size (64 * 1024)

759#define _memory_default_span_size_shift 16

760#define _memory_default_span_mask (~((uintptr_t)(_memory_span_size - 1)))

761

762

764

766

768

770

772

774#if RPMALLOC_CONFIGURABLE

775

777

779

781#else

782

783#define _memory_span_size _memory_default_span_size

784#define _memory_span_size_shift _memory_default_span_size_shift

785#define _memory_span_mask _memory_default_span_mask

786#endif

787

789

791

793

795

797

799#if ENABLE_GLOBAL_CACHE

800

802#endif

803

805

807

809

811

813

815#if RPMALLOC_FIRST_CLASS_HEAPS

816

817static heap_t *_memory_first_class_orphan_heaps;

818#endif

819#if ENABLE_STATISTICS

820

821static atomic64_t _allocation_counter;

822

823static atomic64_t _deallocation_counter;

824

825static atomic32_t _memory_active_heaps;

826

827static atomic32_t _mapped_pages;

828

829static int32_t _mapped_pages_peak;

830

831static atomic32_t _master_spans;

832

833static atomic32_t _unmapped_master_spans;

834

835static atomic32_t _mapped_total;

836

837static atomic32_t _unmapped_total;

838

839static atomic32_t _mapped_pages_os;

840

841static atomic32_t _huge_pages_current;

842

843static int32_t _huge_pages_peak;

844#endif

845

846

847

848

849

850

851

852

853#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \

854 defined(__TINYC__)

855static pthread_key_t _memory_thread_heap;

856#else

857#ifdef _MSC_VER

858#define _Thread_local __declspec(thread)

859#define TLS_MODEL

860#else

861#ifndef __HAIKU__

862#define TLS_MODEL __attribute__((tls_model("initial-exec")))

863#else

864#define TLS_MODEL

865#endif

866#if !defined(__clang__) && defined(__GNUC__)

867#define _Thread_local __thread

868#endif

869#endif

871#endif

872

874#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD

875 return pthread_getspecific(_memory_thread_heap);

876#else

877 return _memory_thread_heap;

878#endif

879}

880

881

884#if ENABLE_PRELOAD

886 return heap;

889#else

890 return heap;

891#endif

892}

893

894

896#if defined(_WIN32)

897 return (uintptr_t)((void *)NtCurrentTeb());

898#elif (defined(__GNUC__) || defined(__clang__)) && !defined(__CYGWIN__)

899 uintptr_t tid;

900#if defined(__i386__)

901 __asm__("movl %%gs:0, %0" : "=r"(tid) : :);

902#elif defined(__x86_64__)

903#if defined(__MACH__)

904 __asm__("movq %%gs:0, %0" : "=r"(tid) : :);

905#else

906 __asm__("movq %%fs:0, %0" : "=r"(tid) : :);

907#endif

908#elif defined(__arm__)

909 __asm__ volatile("mrc p15, 0, %0, c13, c0, 3" : "=r"(tid));

910#elif defined(__aarch64__)

911#if defined(__MACH__)

912

913 __asm__ volatile("mrs %0, tpidrro_el0" : "=r"(tid));

914#else

915 __asm__ volatile("mrs %0, tpidr_el0" : "=r"(tid));

916#endif

917#else

918#error This platform needs implementation of get_thread_id()

919#endif

920 return tid;

921#else

922#error This platform needs implementation of get_thread_id()

923#endif

924}

925

926

928#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \

929 defined(__TINYC__)

930 pthread_setspecific(_memory_thread_heap, heap);

931#else

932 _memory_thread_heap = heap;

933#endif

934 if (heap)

936}

937

938

940

943}

944

946#if defined(_MSC_VER)

947#if defined(_M_ARM64)

948 __yield();

949#else

950 _mm_pause();

951#endif

952#elif defined(__x86_64__) || defined(__i386__)

953 __asm__ volatile("pause" ::: "memory");

954#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)

955 __asm__ volatile("yield" ::: "memory");

956#elif defined(__powerpc__) || defined(__powerpc64__)

957

958 __asm__ volatile("or 27,27,27");

959#elif defined(__sparc__)

960 __asm__ volatile("rd %ccr, %g0 \n\trd %ccr, %g0 \n\trd %ccr, %g0");

961#else

962 struct timespec ts = {0};

963 nanosleep(&ts, 0);

964#endif

965}

966

967#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

968static void NTAPI _rpmalloc_thread_destructor(void *value) {

969#if ENABLE_OVERRIDE

970

971

973 return;

974#endif

977}

978#endif

979

980

981

982

983

984

985

987#if defined(__linux__) || defined(__ANDROID__)

990 if (address == MAP_FAILED || name)

991 return;

992

993

994 (void)prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (uintptr_t)address, size,

995 (uintptr_t)name);

996#else

997 (void)sizeof(size);

998 (void)sizeof(address);

999#endif

1000}

1001

1002

1003

1004

1005

1010 if (EXPECTED(address != 0)) {

1012 _mapped_pages_peak);

1014 }

1015 return address;

1016}

1017

1018

1019

1020

1021

1022

1023

1025 size_t release) {

1026 rpmalloc_assert(!release || (release >= size), "Invalid unmap size");

1028 "Invalid unmap size");

1029 if (release) {

1033 }

1035}

1036

1037

1039

1040

1044 : 0;

1046#if PLATFORM_WINDOWS

1047

1048

1049 void *ptr = VirtualAlloc(0, size + padding,

1051 MEM_RESERVE | MEM_COMMIT,

1052 PAGE_READWRITE);

1053 if (!ptr) {

1057 } else {

1058 rpmalloc_assert(ptr, "Failed to map virtual memory block");

1059 }

1060 return 0;

1061 }

1062#else

1063 int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED;

1064#if defined(__APPLE__) && !TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR

1065 int fd = (int)VM_MAKE_TAG(240U);

1067 fd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;

1068 void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, fd, 0);

1069#elif defined(MAP_HUGETLB)

1070 void *ptr = mmap(0, size + padding,

1071 PROT_READ | PROT_WRITE | PROT_MAX(PROT_READ | PROT_WRITE),

1073#if defined(MADV_HUGEPAGE)

1074

1075

1076

1078 ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);

1079 if (ptr && ptr != MAP_FAILED) {

1080 int prm = madvise(ptr, size + padding, MADV_HUGEPAGE);

1081 (void)prm;

1082 rpmalloc_assert((prm == 0), "Failed to promote the page to THP");

1083 }

1084 }

1085#endif

1087#elif defined(MAP_ALIGNED)

1088 const size_t align =

1089 (sizeof(size_t) * 8) - (size_t)(__builtin_clzl(size - 1));

1090 void *ptr =

1091 mmap(0, size + padding, PROT_READ | PROT_WRITE,

1093#elif defined(MAP_ALIGN)

1095 void *ptr = mmap(base, size + padding, PROT_READ | PROT_WRITE,

1097#else

1098 void *ptr = mmap(0, size + padding, PROT_READ | PROT_WRITE, flags, -1, 0);

1099#endif

1100 if ((ptr == MAP_FAILED) || !ptr) {

1104 } else if (errno != ENOMEM) {

1106 "Failed to map virtual memory block");

1107 }

1108 return 0;

1109 }

1110#endif

1113 if (padding) {

1114 size_t final_padding = padding - ((uintptr_t)ptr & ~_memory_span_mask);

1116 "Internal failure in padding");

1117 rpmalloc_assert(final_padding <= padding, "Internal failure in padding");

1118 rpmalloc_assert(!(final_padding % 8), "Internal failure in padding");

1120 *offset = final_padding >> 3;

1121 }

1124 "Internal failure in padding");

1125 return ptr;

1126}

1127

1128

1130 size_t release) {

1131 rpmalloc_assert(release || (offset == 0), "Invalid unmap size");

1133 "Invalid unmap size");

1135 if (release && offset) {

1136 offset <<= 3;

1137 address = pointer_offset(address, -(int32_t)offset);

1140

1142 }

1143 }

1144#if !DISABLE_UNMAP

1145#if PLATFORM_WINDOWS

1146 if (!VirtualFree(address, release ? 0 : size,

1147 release ? MEM_RELEASE : MEM_DECOMMIT)) {

1148 rpmalloc_assert(0, "Failed to unmap virtual memory block");

1149 }

1150#else

1151 if (release) {

1152 if (munmap(address, release)) {

1153 rpmalloc_assert(0, "Failed to unmap virtual memory block");

1154 }

1155 } else {

1156#if defined(MADV_FREE_REUSABLE)

1157 int ret;

1158 while ((ret = madvise(address, size, MADV_FREE_REUSABLE)) == -1 &&

1159 (errno == EAGAIN))

1160 errno = 0;

1161 if ((ret == -1) && (errno != 0)) {

1162#elif defined(MADV_DONTNEED)

1163 if (madvise(address, size, MADV_DONTNEED)) {

1164#elif defined(MADV_PAGEOUT)

1165 if (madvise(address, size, MADV_PAGEOUT)) {

1166#elif defined(MADV_FREE)

1167 if (madvise(address, size, MADV_FREE)) {

1168#else

1169 if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {

1170#endif

1171 rpmalloc_assert(0, "Failed to madvise virtual memory block as free");

1172 }

1173 }

1174#endif

1175#endif

1176 if (release)

1178}

1179

1182 size_t span_count);

1183

1184

1185

1189 span, span_count);

1194 else

1196 return span;

1197}

1198

1199

1200

1202 size_t reserve_span_count) {

1206}

1207

1208

1209

1210

1211

1212

1213

1214

1216 if (*head)

1217 (*head)->prev = span;

1218 span->next = *head;

1219 *head = span;

1220}

1221

1222

1225 rpmalloc_assert(*head == span, "Linked list corrupted");

1226 span = *head;

1227 *head = span->next;

1228}

1229

1230

1234 if (*head == span) {

1235 *head = span->next;

1236 } else {

1239 prev_span->next = next_span;

1240 if (EXPECTED(next_span != 0))

1241 next_span->prev = prev_span;

1242 }

1243}

1244

1245

1246

1247

1248

1249

1250

1252

1254

1257 size_t reserve_span_count);

1258

1259

1260

1263 size_t span_count) {

1265 "Span master pointer and/or flag mismatch");

1266 if (subspan != master) {

1272 }

1274}

1275

1276

1277

1279 size_t span_count) {

1280

1285

1287 span_count);

1289 _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_from_reserved);

1290

1291 return span;

1292}

1293

1294

1295

1298 ? span_count

1302 request_count +=

1304 return request_count;

1305}

1306

1307

1309 size_t span_count, size_t align_offset) {

1315}

1316

1318

1319

1320

1322 size_t span_count) {

1323

1324

1325

1327 size_t align_offset = 0;

1330 if (!span)

1331 return 0;

1335 _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_map_calls);

1336 if (aligned_span_count > span_count) {

1337 span_t *reserved_spans =

1339 size_t reserved_count = aligned_span_count - span_count;

1344 }

1346

1347

1349 "Global spin lock not held as expected");

1359 }

1361 }

1363 reserved_count);

1364 }

1365 return span;

1366}

1367

1368

1369

1371 if (span_count <= heap->spans_reserved)

1374 int use_global_reserve =

1377 if (use_global_reserve) {

1378

1382 size_t reserve_count =

1387 if (span) {

1388 if (reserve_count > span_count) {

1392 reserved_span,

1393 reserve_count - span_count);

1394 }

1395

1397 }

1398 }

1399 }

1400 if (!span)

1402 if (use_global_reserve)

1404 return span;

1405}

1406

1407

1408

1412 "Span flag corrupted");

1415 "Span flag corrupted");

1416

1419 is_master ? span

1424 "Span flag corrupted");

1426

1427 size_t span_count = span->span_count;

1428 if (!is_master) {

1429

1430

1434 } else {

1435

1436

1440 }

1441

1443

1444

1447 "Span flag corrupted");

1448 size_t unmap_count = master->span_count;

1456 }

1457}

1458

1459

1460

1464 "Invalid span size class");

1466#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS

1468#endif

1477 } else {

1479 }

1480}

1481

1482

1483

1485 void *page_start, void *block_start,

1489 *first_block = block_start;

1490 if (block_count > 1) {

1491 void *free_block = pointer_offset(block_start, block_size);

1492 void *block_end =

1493 pointer_offset(block_start, (size_t)block_size * block_count);

1494

1495

1498 if (page_end < block_end)

1499 block_end = page_end;

1500 }

1501 *list = free_block;

1502 block_count = 2;

1503 void *next_block = pointer_offset(free_block, block_size);

1504 while (next_block < block_end) {

1505 *((void **)free_block) = next_block;

1506 free_block = next_block;

1507 ++block_count;

1508 next_block = pointer_offset(next_block, block_size);

1509 }

1510 *((void **)free_block) = 0;

1511 } else {

1512 *list = 0;

1513 }

1514 return block_count;

1515}

1516

1517

1518

1525 span->heap = heap;

1526 span->flags &= ~SPAN_FLAG_ALIGNED_BLOCKS;

1532

1533

1534

1540

1541

1545 } else {

1546#if RPMALLOC_FIRST_CLASS_HEAPS

1548#endif

1551 }

1553}

1554

1556

1557

1558

1559 do {

1566}

1567

1570 "Span free list corrupted");

1572}

1573

1575 span_t **list_head) {

1578 if (span == class_span) {

1579

1581 void *last_block = 0;

1583 last_block = block;

1585 }

1587 block = free_list;

1589 ++free_count;

1591 }

1592 if (last_block) {

1593 *((void **)last_block) = free_list;

1594 } else {

1596 }

1599 }

1600

1605

1606 if (list_head)

1609 return 1;

1610 }

1611 return 0;

1612}

1613

1614

1615

1616

1617

1618

1619

1620#if ENABLE_GLOBAL_CACHE

1621

1622

1623static void _rpmalloc_global_cache_finalize(global_cache_t *cache) {

1626

1627 for (size_t ispan = 0; ispan < cache->count; ++ispan)

1629 cache->count = 0;

1630

1635 }

1636

1638}

1639

1640static void _rpmalloc_global_cache_insert_spans(span_t **span,

1641 size_t span_count,

1642 size_t count) {

1643 const size_t cache_limit =

1647

1648 global_cache_t *cache = &_memory_span_cache[span_count - 1];

1649

1650 size_t insert_count = count;

1653

1654#if ENABLE_STATISTICS

1655 cache->insert_count += count;

1656#endif

1657 if ((cache->count + insert_count) > cache_limit)

1658 insert_count = cache_limit - cache->count;

1659

1660 memcpy(cache->span + cache->count, span, sizeof(span_t *) * insert_count);

1662

1663#if ENABLE_UNLIMITED_CACHE

1664 while (insert_count < count) {

1665#else

1666

1667

1668

1670#endif

1671 span_t *current_span = span[insert_count++];

1673 cache->overflow = current_span;

1674 }

1676

1678 for (size_t ispan = insert_count; ispan < count; ++ispan) {

1679 span_t *current_span = span[ispan];

1680

1683 (int32_t)current_span->span_count)) {

1684 current_span->next = keep;

1685 keep = current_span;

1686 } else {

1688 }

1689 }

1690

1691 if (keep) {

1694

1695 size_t islot = 0;

1696 while (keep) {

1697 for (; islot < cache->count; ++islot) {

1698 span_t *current_span = cache->span[islot];

1702 (int32_t)current_span->span_count))) {

1704 cache->span[islot] = keep;

1705 break;

1706 }

1707 }

1708 if (islot == cache->count)

1709 break;

1710 keep = keep->next;

1711 }

1712

1713 if (keep) {

1714 span_t *tail = keep;

1715 while (tail->next)

1716 tail = tail->next;

1719 }

1720

1722 }

1723}

1724

1725static size_t _rpmalloc_global_cache_extract_spans(span_t **span,

1726 size_t span_count,

1727 size_t count) {

1728 global_cache_t *cache = &_memory_span_cache[span_count - 1];

1729

1730 size_t extract_count = 0;

1733

1734#if ENABLE_STATISTICS

1735 cache->extract_count += count;

1736#endif

1737 size_t want = count - extract_count;

1738 if (want > cache->count)

1739 want = cache->count;

1740

1741 memcpy(span + extract_count, cache->span + (cache->count - want),

1742 sizeof(span_t *) * want);

1744 extract_count += want;

1745

1746 while ((extract_count < count) && cache->overflow) {

1748 span[extract_count++] = current_span;

1750 }

1751

1752#if ENABLE_ASSERTS

1753 for (size_t ispan = 0; ispan < extract_count; ++ispan) {

1755 "Global cache span count mismatch");

1756 }

1757#endif

1758

1760

1761 return extract_count;

1762}

1763

1764#endif

1765

1766

1767

1768

1769

1770

1771

1773

1774

1777 size_t reserve_span_count) {

1781}

1782

1783

1784

1786 span_t **single_span) {

1789 while (span) {

1796#if RPMALLOC_FIRST_CLASS_HEAPS

1798 span);

1799#endif

1802 if (single_span && !*single_span)

1803 *single_span = span;

1804 else

1806 } else {

1809 } else {

1811 "Span size class invalid");

1814#if RPMALLOC_FIRST_CLASS_HEAPS

1816#endif

1820 if (!idx && single_span && !*single_span)

1821 *single_span = span;

1822 else

1824 }

1825 }

1826 span = next_span;

1827 }

1828}

1829

1835 }

1836 } else {

1839 }

1840 }

1841}

1842

1846 return;

1847 }

1848

1850

1851#if ENABLE_THREAD_CACHE

1854 if (!iclass)

1855 span_cache = &heap->span_cache;

1856 else

1857 span_cache = (span_cache_t *)(heap->span_large_cache + (iclass - 1));

1858 for (size_t ispan = 0; ispan < span_cache->count; ++ispan)

1860 span_cache->count = 0;

1861 }

1862#endif

1863

1866 return;

1867 }

1868

1869 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

1873 return;

1874 }

1875 }

1876

1879 if (list_heap == heap) {

1881 } else {

1882 while (list_heap->next_heap != heap)

1883 list_heap = list_heap->next_heap;

1885 }

1886

1888}

1889

1890

1891

1896 return;

1897 }

1898#if ENABLE_THREAD_CACHE

1899 size_t span_count = span->span_count;

1901 if (span_count == 1) {

1902 span_cache_t *span_cache = &heap->span_cache;

1903 span_cache->span[span_cache->count++] = span;

1905 const size_t remain_count =

1907#if ENABLE_GLOBAL_CACHE

1912 _rpmalloc_global_cache_insert_spans(span_cache->span + remain_count,

1913 span_count,

1915#else

1918#endif

1919 span_cache->count = remain_count;

1920 }

1921 } else {

1922 size_t cache_idx = span_count - 2;

1923 span_large_cache_t *span_cache = heap->span_large_cache + cache_idx;

1924 span_cache->span[span_cache->count++] = span;

1925 const size_t cache_limit =

1927 if (span_cache->count == cache_limit) {

1928 const size_t transfer_limit = 2 + (cache_limit >> 2);

1929 const size_t transfer_count =

1932 : transfer_limit);

1933 const size_t remain_count = cache_limit - transfer_count;

1934#if ENABLE_GLOBAL_CACHE

1938 transfer_count);

1939 _rpmalloc_global_cache_insert_spans(span_cache->span + remain_count,

1940 span_count, transfer_count);

1941#else

1942 for (size_t ispan = 0; ispan < transfer_count; ++ispan)

1944#endif

1945 span_cache->count = remain_count;

1946 }

1947 }

1948#else

1949 (void)sizeof(heap);

1951#endif

1952}

1953

1954

1956 size_t span_count) {

1958#if ENABLE_THREAD_CACHE

1960 if (span_count == 1)

1961 span_cache = &heap->span_cache;

1962 else

1963 span_cache = (span_cache_t *)(heap->span_large_cache + (span_count - 2));

1964 if (span_cache->count) {

1965 _rpmalloc_stat_inc(&heap->span_use[span_count - 1].spans_from_cache);

1966 return span_cache->span[--span_cache->count];

1967 }

1968#endif

1969 return span;

1970}

1971

1973 size_t span_count) {

1975 if (span_count == 1) {

1977 } else {

1980 }

1981 return span;

1982}

1983

1985 size_t span_count) {

1988 return 0;

1989}

1990

1991

1993 size_t span_count) {

1994#if ENABLE_GLOBAL_CACHE

1995#if ENABLE_THREAD_CACHE

1997 size_t wanted_count;

1998 if (span_count == 1) {

1999 span_cache = &heap->span_cache;

2001 } else {

2002 span_cache = (span_cache_t *)(heap->span_large_cache + (span_count - 2));

2004 }

2005 span_cache->count = _rpmalloc_global_cache_extract_spans(

2006 span_cache->span, span_count, wanted_count);

2007 if (span_cache->count) {

2010 _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_from_global,

2011 span_cache->count);

2012 return span_cache->span[--span_cache->count];

2013 }

2014#else

2016 size_t count = _rpmalloc_global_cache_extract_spans(&span, span_count, 1);

2017 if (count) {

2020 _rpmalloc_stat_add(&heap->span_use[span_count - 1].spans_from_global,

2021 count);

2022 return span;

2023 }

2024#endif

2025#endif

2026 (void)sizeof(heap);

2027 (void)sizeof(span_count);

2028 return 0;

2029}

2030

2033 (void)sizeof(heap);

2034 (void)sizeof(span_count);

2035 (void)sizeof(class_idx);

2036#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS

2040 if (current_count > (uint32_t)atomic_load32(&heap->span_use[idx].high))

2041 atomic_store32(&heap->span_use[idx].high, (int32_t)current_count);

2043 heap->size_class_use[class_idx].spans_peak);

2044#endif

2045}

2046

2047

2048

2052 size_t span_count, uint32_t class_idx) {

2054#if ENABLE_THREAD_CACHE

2055 if (heap_size_class && heap_size_class->cache) {

2056 span = heap_size_class->cache;

2057 heap_size_class->cache =

2058 (heap->span_cache.count

2059 ? heap->span_cache.span[--heap->span_cache.count]

2060 : 0);

2062 return span;

2063 }

2064#endif

2065 (void)sizeof(class_idx);

2066

2067 size_t base_span_count = span_count;

2068 size_t limit_span_count =

2069 (span_count > 2) ? (span_count + (span_count >> 1)) : span_count;

2072 do {

2075 _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);

2077 return span;

2078 }

2081 _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);

2083 return span;

2084 }

2087 _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_cache);

2089 return span;

2090 }

2093 _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_from_reserved);

2095 return span;

2096 }

2097 ++span_count;

2098 } while (span_count <= limit_span_count);

2099

2102 _rpmalloc_stat_inc(&heap->size_class_use[class_idx].spans_map_calls);

2103 return span;

2104}

2105

2108

2110

2111

2115}

2116

2119#if RPMALLOC_FIRST_CLASS_HEAPS

2120 heap_t **heap_list =

2122#else

2123 (void)sizeof(first_class);

2125#endif

2127 *heap_list = heap;

2128}

2129

2130

2132

2133

2134

2135

2136 size_t heap_size = sizeof(heap_t);

2137 size_t aligned_heap_size = 16 * ((heap_size + 15) / 16);

2138 size_t request_heap_count = 16;

2139 size_t heap_span_count = ((aligned_heap_size * request_heap_count) +

2143 size_t span_count = heap_span_count;

2145

2148 }

2149 if (!span) {

2153

2154

2155 size_t possible_heap_count =

2156 (block_size - sizeof(span_t)) / aligned_heap_size;

2157 if (possible_heap_count >= (request_heap_count * 16))

2158 request_heap_count *= 16;

2159 else if (possible_heap_count < request_heap_count)

2160 request_heap_count = possible_heap_count;

2161 heap_span_count = ((aligned_heap_size * request_heap_count) +

2164 }

2165

2166 size_t align_offset = 0;

2168 if (!span)

2169 return 0;

2170

2171

2174 }

2175

2179

2180

2181 size_t num_heaps = remain_size / aligned_heap_size;

2182 if (num_heaps < request_heap_count)

2183 num_heaps = request_heap_count;

2184 atomic_store32(&heap->child_count, (int32_t)num_heaps - 1);

2186 while (num_heaps > 1) {

2191 --num_heaps;

2192 }

2193

2194 if (span_count > heap_span_count) {

2195

2196 size_t remain_count = span_count - heap_span_count;

2197 size_t reserve_count =

2199 : remain_count);

2200 span_t *remain_span =

2203

2204 if (remain_count > reserve_count) {

2205

2208 reserve_count = remain_count - reserve_count;

2210 }

2211 }

2212

2213 return heap;

2214}

2215

2217 heap_t *heap = *heap_list;

2218 *heap_list = (heap ? heap->next_orphan : 0);

2219 return heap;

2220}

2221

2222

2227 if (first_class == 0)

2229#if RPMALLOC_FIRST_CLASS_HEAPS

2230 if (!heap)

2232#endif

2233 if (!heap)

2236 if (heap)

2238 return heap;

2239}

2240

2242 int release_cache) {

2244 if (!heap)

2245 return;

2246

2248 if (release_cache || heap->finalize) {

2249#if ENABLE_THREAD_CACHE

2252 if (!iclass)

2253 span_cache = &heap->span_cache;

2254 else

2255 span_cache = (span_cache_t *)(heap->span_large_cache + (iclass - 1));

2256 if (!span_cache->count)

2257 continue;

2258#if ENABLE_GLOBAL_CACHE

2260 for (size_t ispan = 0; ispan < span_cache->count; ++ispan)

2262 } else {

2264 (iclass + 1) *

2267 span_cache->count);

2268 _rpmalloc_global_cache_insert_spans(span_cache->span, iclass + 1,

2269 span_cache->count);

2270 }

2271#else

2272 for (size_t ispan = 0; ispan < span_cache->count; ++ispan)

2274#endif

2275 span_cache->count = 0;

2276 }

2277#endif

2278 }

2279

2282

2283#if ENABLE_STATISTICS

2285 rpmalloc_assert(atomic_load32(&_memory_active_heaps) >= 0,

2286 "Still active heaps during finalization");

2287#endif

2288

2289

2290

2294 }

2297}

2298

2301}

2302

2305}

2306

2312 }

2313

2315

2316 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

2321 while (span) {

2325 span = next;

2326 }

2327

2329 span_t *class_span =

2333#if RPMALLOC_FIRST_CLASS_HEAPS

2334 list = &heap->full_span[iclass];

2335#endif

2338 if (list)

2342 }

2343 }

2344 }

2345

2346#if ENABLE_THREAD_CACHE

2349 if (!iclass)

2350 span_cache = &heap->span_cache;

2351 else

2352 span_cache = (span_cache_t *)(heap->span_large_cache + (iclass - 1));

2353 for (size_t ispan = 0; ispan < span_cache->count; ++ispan)

2355 span_cache->count = 0;

2356 }

2357#endif

2359 "Heaps still active during finalization");

2360}

2361

2362

2363

2364

2365

2366

2367

2368

2370 void *block = *list;

2371 *list = *((void **)block);

2373}

2374

2375

2383 "Span block count corrupted");

2385 "Internal failure");

2388

2392 } else {

2393

2394

2403 }

2405 "Span block count corrupted");

2407

2408

2411

2412

2413

2416

2417

2418

2420 span);

2421#if RPMALLOC_FIRST_CLASS_HEAPS

2423#endif

2426 }

2427

2428

2431

2433 class_idx);

2434 }

2435

2436 return 0;

2437}

2438

2439

2442

2450 class_idx);

2451}

2452

2453

2456

2457

2467 class_idx);

2468}

2469

2470

2473

2474

2475

2479 ++span_count;

2480

2481

2484 if (!span)

2485 return span;

2486

2487

2490 span->heap = heap;

2491

2492#if RPMALLOC_FIRST_CLASS_HEAPS

2494#endif

2496

2498}

2499

2500

2507 ++num_pages;

2508 size_t align_offset = 0;

2511 if (!span)

2512 return span;

2513

2514

2518 span->heap = heap;

2520

2521#if RPMALLOC_FIRST_CLASS_HEAPS

2523#endif

2525

2527}

2528

2529

2539}

2540

2542 size_t size) {

2545

2546#if ENABLE_VALIDATE_ARGS

2547 if ((size + alignment) < size) {

2548 errno = EINVAL;

2549 return 0;

2550 }

2551 if (alignment & (alignment - 1)) {

2552 errno = EINVAL;

2553 return 0;

2554 }

2555#endif

2556

2559

2560

2561

2562 size_t multiple_size = size ? (size + (SPAN_HEADER_SIZE - 1)) &

2566 "Failed alignment calculation");

2567 if (multiple_size <= (size + alignment))

2569 }

2570

2571 void *ptr = 0;

2572 size_t align_mask = alignment - 1;

2575 if ((uintptr_t)ptr & align_mask) {

2576 ptr = (void *)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);

2577

2580 }

2581 return ptr;

2582 }

2583

2584

2585

2586

2587

2588

2589

2590

2591

2592

2593 if (alignment & align_mask) {

2594 errno = EINVAL;

2595 return 0;

2596 }

2598 errno = EINVAL;

2599 return 0;

2600 }

2601

2603

2604

2607 ++num_pages;

2608

2609 if (extra_pages > num_pages)

2610 num_pages = 1 + extra_pages;

2611

2612 size_t original_pages = num_pages;

2614 if (limit_pages < (original_pages * 2))

2615 limit_pages = original_pages * 2;

2616

2617 size_t mapped_size, align_offset;

2619

2620retry:

2621 align_offset = 0;

2623

2625 if (!span) {

2626 errno = ENOMEM;

2627 return 0;

2628 }

2630

2631 if ((uintptr_t)ptr & align_mask)

2632 ptr = (void *)(((uintptr_t)ptr & ~(uintptr_t)align_mask) + alignment);

2633

2637 _rpmalloc_unmap(span, mapped_size, align_offset, mapped_size);

2638 ++num_pages;

2639 if (num_pages > limit_pages) {

2640 errno = EINVAL;

2641 return 0;

2642 }

2643 goto retry;

2644 }

2645

2646

2650 span->heap = heap;

2652

2653#if RPMALLOC_FIRST_CLASS_HEAPS

2655#endif

2657

2659

2660 return ptr;

2661}

2662

2663

2664

2665

2666

2667

2668

2669

2670

2676 "Internal failure");

2677

2680#if RPMALLOC_FIRST_CLASS_HEAPS

2682 span);

2683#endif

2687 }

2692

2693

2695

2696

2697

2698 void *free_list;

2699 do {

2704 }

2708 }

2709}

2710

2714

2715 do {

2718}

2719

2720

2723

2724

2725

2726

2727

2728 void *free_list;

2729 do {

2730 free_list =

2733 *((void **)block) = free_list;

2735 int all_deferred_free = (free_count == span->block_count);

2737 if (all_deferred_free) {

2738

2739

2740

2742 }

2743}

2744

2748

2752 }

2753

2754#if RPMALLOC_FIRST_CLASS_HEAPS

2755 int defer =

2758#else

2759 int defer =

2761#endif

2762 if (!defer)

2764 else

2766}

2767

2768

2773 "Span flag corrupted");

2776 "Span flag corrupted");

2777

2778

2779#if RPMALLOC_FIRST_CLASS_HEAPS

2780 int defer =

2783#else

2784 int defer =

2786#endif

2787 if (defer) {

2789 return;

2790 }

2793#if RPMALLOC_FIRST_CLASS_HEAPS

2795#endif

2796#if ENABLE_ADAPTIVE_THREAD_CACHE || ENABLE_STATISTICS

2797

2800#endif

2803#if ENABLE_THREAD_CACHE

2804 const int set_as_reserved =

2805 ((span->span_count > 1) && (heap->span_cache.count == 0) &&

2807#else

2808 const int set_as_reserved =

2810#endif

2811 if (set_as_reserved) {

2816 } else {

2818 span,

2824 "Master span count corrupted");

2825 }

2827 } else {

2828

2830 }

2831}

2832

2833

2836#if RPMALLOC_FIRST_CLASS_HEAPS

2837 int defer =

2840#else

2841 int defer =

2843#endif

2844 if (defer) {

2846 return;

2847 }

2850#if RPMALLOC_FIRST_CLASS_HEAPS

2852#endif

2853

2854

2855 size_t num_pages = span->span_count;

2859}

2860

2861

2864

2867 return;

2872 else

2874}

2875

2876

2877

2878

2879

2880

2881

2883

2884

2886 size_t oldsize, unsigned int flags) {

2887 if (p) {

2888

2891

2898 if (!oldsize)

2899 oldsize =

2901 if ((size_t)span->block_size >= size) {

2902

2903

2905 memmove(block, p, oldsize);

2907 }

2909

2913 ++num_spans;

2914 size_t current_spans = span->span_count;

2916 if (!oldsize)

2919 if ((current_spans >= num_spans) && (total_size >= (oldsize / 2))) {

2920

2921

2923 memmove(block, p, oldsize);

2925 }

2926 } else {

2927

2931 ++num_pages;

2932

2933 size_t current_pages = span->span_count;

2935 if (!oldsize)

2938 if ((current_pages >= num_pages) && (num_pages >= (current_pages / 2))) {

2939

2940

2942 memmove(block, p, oldsize);

2944 }

2945 }

2946 } else {

2947 oldsize = 0;

2948 }

2949

2951 return 0;

2952

2953

2954

2955

2956 size_t lower_bound = oldsize + (oldsize >> 2) + (oldsize >> 3);

2957 size_t new_size =

2958 (size > lower_bound) ? size : ((size > oldsize) ? lower_bound : size);

2960 if (p && block) {

2962 memcpy(block, p, oldsize < new_size ? oldsize : new_size);

2964 }

2965

2967}

2968

2970 size_t alignment, size_t size,

2971 size_t oldsize, unsigned int flags) {

2974

2977 if ((usablesize >= size) && !((uintptr_t)ptr & (alignment - 1))) {

2978 if (no_alloc || (size >= (usablesize / 2)))

2979 return ptr;

2980 }

2981

2986 if (!oldsize)

2987 oldsize = usablesize;

2988 memcpy(block, ptr, oldsize < size ? oldsize : size);

2989 }

2991 }

2993}

2994

2995

2996

2997

2998

2999

3000

3001

3003

3006

3010 }

3012

3013 size_t current_spans = span->span_count;

3015 }

3016

3017 size_t current_pages = span->span_count;

3019}

3020

3021

3025

3028

3029

3031 size_t prevclass = iclass;

3032 while (prevclass > 0) {

3033 --prevclass;

3034

3040 else

3041 break;

3042 }

3043 }

3044}

3045

3046

3050 return 0;

3051 }

3053}

3054

3058 return 0;

3059 }

3061

3062 if (config)

3064 else

3066

3070 }

3071

3072#if PLATFORM_WINDOWS

3073 SYSTEM_INFO system_info;

3074 memset(&system_info, 0, sizeof(system_info));

3075 GetSystemInfo(&system_info);

3077#else

3079#endif

3080

3081#if RPMALLOC_CONFIGURABLE

3083#else

3085#endif

3088#if PLATFORM_WINDOWS

3090#else

3093#if defined(__linux__)

3094 size_t huge_page_size = 0;

3095 FILE *meminfo = fopen("/proc/meminfo", "r");

3096 if (meminfo) {

3097 char line[128];

3098 while (!huge_page_size && fgets(line, sizeof(line) - 1, meminfo)) {

3099 line[sizeof(line) - 1] = 0;

3100 if (strstr(line, "Hugepagesize:"))

3101 huge_page_size = (size_t)strtol(line + 13, 0, 10) * 1024;

3102 }

3103 fclose(meminfo);

3104 }

3105 if (huge_page_size) {

3109 }

3110#elif defined(__FreeBSD__)

3111 int rc;

3112 size_t sz = sizeof(rc);

3113

3114 if (sysctlbyname("vm.pmap.pg_ps_enabled", &rc, &sz, NULL, 0) == 0 &&

3115 rc == 1) {

3116 static size_t defsize = 2 * 1024 * 1024;

3117 int nsize = 0;

3118 size_t sizes[4] = {0};

3121 if ((nsize = getpagesizes(sizes, 4)) >= 2) {

3122 nsize--;

3123 for (size_t csize = sizes[nsize]; nsize >= 0 && csize;

3124 --nsize, csize = sizes[nsize]) {

3125

3126 rpmalloc_assert(!(csize & (csize - 1)) && !(csize % 1024),

3127 "Invalid page size");

3128 if (defsize < csize) {

3130 break;

3131 }

3132 }

3133 }

3135 }

3136#elif defined(__APPLE__) || defined(__NetBSD__)

3140#endif

3141 }

3142#endif

3143 } else {

3146 }

3147

3148#if PLATFORM_WINDOWS

3150 HANDLE token = 0;

3151 size_t large_page_minimum = GetLargePageMinimum();

3152 if (large_page_minimum)

3153 OpenProcessToken(GetCurrentProcess(),

3154 TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);

3155 if (token) {

3156 LUID luid;

3157 if (LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &luid)) {

3158 TOKEN_PRIVILEGES token_privileges;

3159 memset(&token_privileges, 0, sizeof(token_privileges));

3160 token_privileges.PrivilegeCount = 1;

3161 token_privileges.Privileges[0].Luid = luid;

3162 token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;

3163 if (AdjustTokenPrivileges(token, FALSE, &token_privileges, 0, 0, 0)) {

3164 if (GetLastError() == ERROR_SUCCESS)

3166 }

3167 }

3168 CloseHandle(token);

3169 }

3175 }

3176 }

3177#endif

3178

3179 size_t min_span_size = 256;

3180 size_t max_page_size;

3181#if UINTPTR_MAX > 0xFFFFFFFF

3182 max_page_size = 4096ULL * 1024ULL * 1024ULL;

3183#else

3184 max_page_size = 4 * 1024 * 1024;

3185#endif

3192 while (page_size_bit != 1) {

3194 page_size_bit >>= 1;

3195 }

3197

3198#if RPMALLOC_CONFIGURABLE

3203 } else {

3205 if (span_size > (256 * 1024))

3206 span_size = (256 * 1024);

3212 }

3214 }

3215#endif

3216

3228

3233

3234#if ((defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD) || \

3235 defined(__TINYC__)

3237 return -1;

3238#endif

3239#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

3240 fls_key = FlsAlloc(&_rpmalloc_thread_destructor);

3241#endif

3242

3243

3244 size_t iclass = 0;

3251 }

3252

3261 break;

3262 }

3265 }

3266

3268#if RPMALLOC_FIRST_CLASS_HEAPS

3269 _memory_first_class_orphan_heaps = 0;

3270#endif

3271#if ENABLE_STATISTICS

3274 _mapped_pages_peak = 0;

3280 _huge_pages_peak = 0;

3281#endif

3284

3286

3287

3289 return 0;

3290}

3291

3292

3295

3296

3303 }

3305

3306

3307 for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {

3309 while (heap) {

3313 heap = next_heap;

3314 }

3315 }

3316

3317#if ENABLE_GLOBAL_CACHE

3318

3320 _rpmalloc_global_cache_finalize(&_memory_span_cache[iclass]);

3321#endif

3322

3323#if (defined(__APPLE__) || defined(__HAIKU__)) && ENABLE_PRELOAD

3324 pthread_key_delete(_memory_thread_heap);

3325#endif

3326#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

3327 FlsFree(fls_key);

3328 fls_key = 0;

3329#endif

3330#if ENABLE_STATISTICS

3331

3332

3333 rpmalloc_assert(atomic_load32(&_mapped_pages) == 0, "Memory leak detected");

3335 "Memory leak detected");

3336#endif

3337

3339}

3340

3341

3345 if (heap) {

3348#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

3349 FlsSetValue(fls_key, heap);

3350#endif

3351 }

3352 }

3353}

3354

3355

3358 if (heap)

3361#if defined(_WIN32) && (!defined(BUILD_DYNAMIC_LINK) || !BUILD_DYNAMIC_LINK)

3362 FlsSetValue(fls_key, 0);

3363#endif

3364}

3365

3368}

3369

3371

3372

3373

3375#if ENABLE_VALIDATE_ARGS

3376 if (size >= MAX_ALLOC_SIZE) {

3377 errno = EINVAL;

3378 return 0;

3379 }

3380#endif

3383}

3384

3386

3388 size_t total;

3389#if ENABLE_VALIDATE_ARGS

3390#if PLATFORM_WINDOWS

3391 int err = SizeTMult(num, size, &total);

3392 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {

3393 errno = EINVAL;

3394 return 0;

3395 }

3396#else

3397 int err = __builtin_umull_overflow(num, size, &total);

3398 if (err || (total >= MAX_ALLOC_SIZE)) {

3399 errno = EINVAL;

3400 return 0;

3401 }

3402#endif

3403#else

3404 total = num * size;

3405#endif

3409 memset(block, 0, total);

3411}

3412

3414#if ENABLE_VALIDATE_ARGS

3415 if (size >= MAX_ALLOC_SIZE) {

3416 errno = EINVAL;

3417 return ptr;

3418 }

3419#endif

3422}

3423

3425 size_t size, size_t oldsize,

3426 unsigned int flags) {

3427#if ENABLE_VALIDATE_ARGS

3428 if ((size + alignment < size) || (alignment > _memory_page_size)) {

3429 errno = EINVAL;

3430 return 0;

3431 }

3432#endif

3435 flags);

3436}

3437

3441}

3442

3445 size_t total;

3446#if ENABLE_VALIDATE_ARGS

3447#if PLATFORM_WINDOWS

3448 int err = SizeTMult(num, size, &total);

3449 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {

3450 errno = EINVAL;

3451 return 0;

3452 }

3453#else

3454 int err = __builtin_umull_overflow(num, size, &total);

3455 if (err || (total >= MAX_ALLOC_SIZE)) {

3456 errno = EINVAL;

3457 return 0;

3458 }

3459#endif

3460#else

3461 total = num * size;

3462#endif

3465 memset(block, 0, total);

3467}

3468

3470 size_t size) {

3472}

3473

3475 size_t size) {

3476 if (memptr)

3478 else

3479 return EINVAL;

3480 return *memptr ? 0 : ENOMEM;

3481}

3482

3485}

3486

3488

3492 if (!heap)

3493 return;

3494

3495 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

3498 while (span) {

3499 size_t free_count = span->list_size;

3500 size_t block_count = size_class->block_count;

3503 free_count += (block_count - span->used_count);

3504 stats->sizecache += free_count * size_class->block_size;

3505 span = span->next;

3506 }

3507 }

3508

3509#if ENABLE_THREAD_CACHE

3512 if (!iclass)

3513 span_cache = &heap->span_cache;

3514 else

3515 span_cache = (span_cache_t *)(heap->span_large_cache + (iclass - 1));

3517 }

3518#endif

3519

3521 while (deferred) {

3525 }

3526

3527#if ENABLE_STATISTICS

3528 stats->thread_to_global = (size_t)atomic_load64(&heap->thread_to_global);

3529 stats->global_to_thread = (size_t)atomic_load64(&heap->global_to_thread);

3530

3532 stats->span_use[iclass].current =

3533 (size_t)atomic_load32(&heap->span_use[iclass].current);

3534 stats->span_use[iclass].peak =

3535 (size_t)atomic_load32(&heap->span_use[iclass].high);

3536 stats->span_use[iclass].to_global =

3537 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_global);

3538 stats->span_use[iclass].from_global =

3539 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_global);

3540 stats->span_use[iclass].to_cache =

3541 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache);

3542 stats->span_use[iclass].from_cache =

3543 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache);

3544 stats->span_use[iclass].to_reserved =

3545 (size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved);

3546 stats->span_use[iclass].from_reserved =

3547 (size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved);

3548 stats->span_use[iclass].map_calls =

3549 (size_t)atomic_load32(&heap->span_use[iclass].spans_map_calls);

3550 }

3551 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

3552 stats->size_use[iclass].alloc_current =

3553 (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_current);

3554 stats->size_use[iclass].alloc_peak =

3555 (size_t)heap->size_class_use[iclass].alloc_peak;

3556 stats->size_use[iclass].alloc_total =

3557 (size_t)atomic_load32(&heap->size_class_use[iclass].alloc_total);

3558 stats->size_use[iclass].free_total =

3559 (size_t)atomic_load32(&heap->size_class_use[iclass].free_total);

3560 stats->size_use[iclass].spans_to_cache =

3561 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache);

3562 stats->size_use[iclass].spans_from_cache =

3563 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache);

3564 stats->size_use[iclass].spans_from_reserved = (size_t)atomic_load32(

3565 &heap->size_class_use[iclass].spans_from_reserved);

3566 stats->size_use[iclass].map_calls =

3567 (size_t)atomic_load32(&heap->size_class_use[iclass].spans_map_calls);

3568 }

3569#endif

3570}

3571

3574#if ENABLE_STATISTICS

3577 stats->mapped_total =

3579 stats->unmapped_total =

3581 stats->huge_alloc =

3582 (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;

3584#endif

3585#if ENABLE_GLOBAL_CACHE

3587 global_cache_t *cache = &_memory_span_cache[iclass];

3591#if ENABLE_UNLIMITED_CACHE

3593 while (current_span) {

3594 ++count;

3595 current_span = current_span->next;

3596 }

3597#endif

3600 }

3601#endif

3602}

3603

3604#if ENABLE_STATISTICS

3605

3606static void _memory_heap_dump_statistics(heap_t *heap, void *file) {

3607 fprintf(file, "Heap %d stats:\n", heap->id);

3608 fprintf(file, "Class CurAlloc PeakAlloc TotAlloc TotFree BlkSize "

3609 "BlkCount SpansCur SpansPeak PeakAllocMiB ToCacheMiB "

3610 "FromCacheMiB FromReserveMiB MmapCalls\n");

3611 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

3612 if (!atomic_load32(&heap->size_class_use[iclass].alloc_total))

3613 continue;

3614 fprintf(

3616 "%3u: %10u %10u %10u %10u %8u %8u %8d %9d %13zu %11zu %12zu %14zu "

3617 "%9u\n",

3619 atomic_load32(&heap->size_class_use[iclass].alloc_current),

3620 heap->size_class_use[iclass].alloc_peak,

3621 atomic_load32(&heap->size_class_use[iclass].alloc_total),

3622 atomic_load32(&heap->size_class_use[iclass].free_total),

3625 atomic_load32(&heap->size_class_use[iclass].spans_current),

3626 heap->size_class_use[iclass].spans_peak,

3627 ((size_t)heap->size_class_use[iclass].alloc_peak *

3629 (size_t)(1024 * 1024),

3630 ((size_t)atomic_load32(&heap->size_class_use[iclass].spans_to_cache) *

3632 (size_t)(1024 * 1024),

3633 ((size_t)atomic_load32(&heap->size_class_use[iclass].spans_from_cache) *

3635 (size_t)(1024 * 1024),

3636 ((size_t)atomic_load32(

3637 &heap->size_class_use[iclass].spans_from_reserved) *

3639 (size_t)(1024 * 1024),

3640 atomic_load32(&heap->size_class_use[iclass].spans_map_calls));

3641 }

3642 fprintf(file, "Spans Current Peak Deferred PeakMiB Cached ToCacheMiB "

3643 "FromCacheMiB ToReserveMiB FromReserveMiB ToGlobalMiB "

3644 "FromGlobalMiB MmapCalls\n");

3646 if (!atomic_load32(&heap->span_use[iclass].high) &&

3647 !atomic_load32(&heap->span_use[iclass].spans_map_calls))

3648 continue;

3649 fprintf(

3651 "%4u: %8d %8u %8u %8zu %7u %11zu %12zu %12zu %14zu %11zu %13zu %10u\n",

3652 (uint32_t)(iclass + 1), atomic_load32(&heap->span_use[iclass].current),

3653 atomic_load32(&heap->span_use[iclass].high),

3654 atomic_load32(&heap->span_use[iclass].spans_deferred),

3655 ((size_t)atomic_load32(&heap->span_use[iclass].high) *

3657 (size_t)(1024 * 1024),

3659 (unsigned int)(!iclass ? heap->span_cache.count

3660 : heap->span_large_cache[iclass - 1].count),

3661 ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_cache) *

3663 (size_t)(1024 * 1024),

3664 ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_cache) *

3666 (size_t)(1024 * 1024),

3667#else

3668 0, (size_t)0, (size_t)0,

3669#endif

3670 ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_reserved) *

3672 (size_t)(1024 * 1024),

3673 ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_reserved) *

3675 (size_t)(1024 * 1024),

3676 ((size_t)atomic_load32(&heap->span_use[iclass].spans_to_global) *

3678 (size_t)(1024 * 1024),

3679 ((size_t)atomic_load32(&heap->span_use[iclass].spans_from_global) *

3681 (size_t)(1024 * 1024),

3682 atomic_load32(&heap->span_use[iclass].spans_map_calls));

3683 }

3685 fprintf(file, "ThreadToGlobalMiB GlobalToThreadMiB\n");

3686 fprintf(

3687 file, "%17zu %17zu\n",

3688 (size_t)atomic_load64(&heap->thread_to_global) / (size_t)(1024 * 1024),

3689 (size_t)atomic_load64(&heap->global_to_thread) / (size_t)(1024 * 1024));

3690}

3691

3692#endif

3693

3695#if ENABLE_STATISTICS

3696 for (size_t list_idx = 0; list_idx < HEAP_ARRAY_SIZE; ++list_idx) {

3698 while (heap) {

3699 int need_dump = 0;

3700 for (size_t iclass = 0; !need_dump && (iclass < SIZE_CLASS_COUNT);

3701 ++iclass) {

3702 if (!atomic_load32(&heap->size_class_use[iclass].alloc_total)) {

3704 !atomic_load32(&heap->size_class_use[iclass].free_total),

3705 "Heap statistics counter mismatch");

3707 !atomic_load32(&heap->size_class_use[iclass].spans_map_calls),

3708 "Heap statistics counter mismatch");

3709 continue;

3710 }

3711 need_dump = 1;

3712 }

3713 for (size_t iclass = 0; !need_dump && (iclass < LARGE_CLASS_COUNT);

3714 ++iclass) {

3715 if (!atomic_load32(&heap->span_use[iclass].high) &&

3716 !atomic_load32(&heap->span_use[iclass].spans_map_calls))

3717 continue;

3718 need_dump = 1;

3719 }

3720 if (need_dump)

3721 _memory_heap_dump_statistics(heap, file);

3723 }

3724 }

3725 fprintf(file, "Global stats:\n");

3726 size_t huge_current =

3727 (size_t)atomic_load32(&_huge_pages_current) * _memory_page_size;

3728 size_t huge_peak = (size_t)_huge_pages_peak * _memory_page_size;

3729 fprintf(file, "HugeCurrentMiB HugePeakMiB\n");

3730 fprintf(file, "%14zu %11zu\n", huge_current / (size_t)(1024 * 1024),

3731 huge_peak / (size_t)(1024 * 1024));

3732

3733#if ENABLE_GLOBAL_CACHE

3734 fprintf(file, "GlobalCacheMiB\n");

3736 global_cache_t *cache = _memory_span_cache + iclass;

3738

3739 size_t global_overflow_cache = 0;

3743 span = span->next;

3744 }

3745 if (global_cache || global_overflow_cache || cache->insert_count ||

3746 cache->extract_count)

3747 fprintf(file,

3748 "%4zu: %8zuMiB (%8zuMiB overflow) %14zu insert %14zu extract\n",

3749 iclass + 1, global_cache / (size_t)(1024 * 1024),

3750 global_overflow_cache / (size_t)(1024 * 1024),

3751 cache->insert_count, cache->extract_count);

3752 }

3753#endif

3754

3755 size_t mapped = (size_t)atomic_load32(&_mapped_pages) * _memory_page_size;

3756 size_t mapped_os =

3758 size_t mapped_peak = (size_t)_mapped_pages_peak * _memory_page_size;

3759 size_t mapped_total =

3761 size_t unmapped_total =

3763 fprintf(

3765 "MappedMiB MappedOSMiB MappedPeakMiB MappedTotalMiB UnmappedTotalMiB\n");

3766 fprintf(file, "%9zu %11zu %13zu %14zu %16zu\n",

3767 mapped / (size_t)(1024 * 1024), mapped_os / (size_t)(1024 * 1024),

3768 mapped_peak / (size_t)(1024 * 1024),

3769 mapped_total / (size_t)(1024 * 1024),

3770 unmapped_total / (size_t)(1024 * 1024));

3771

3772 fprintf(file, "\n");

3773#if 0

3774 int64_t allocated = atomic_load64(&_allocation_counter);

3775 int64_t deallocated = atomic_load64(&_deallocation_counter);

3776 fprintf(file, "Allocation count: %lli\n", allocated);

3777 fprintf(file, "Deallocation count: %lli\n", deallocated);

3778 fprintf(file, "Current allocations: %lli\n", (allocated - deallocated));

3779 fprintf(file, "Master spans: %d\n", atomic_load32(&_master_spans));

3780 fprintf(file, "Dangling master spans: %d\n", atomic_load32(&_unmapped_master_spans));

3781#endif

3782#endif

3783 (void)sizeof(file);

3784}

3785

3786#if RPMALLOC_FIRST_CLASS_HEAPS

3787

3788extern inline rpmalloc_heap_t *rpmalloc_heap_acquire(void) {

3789

3790

3791

3792

3797 return heap;

3798}

3799

3800extern inline void rpmalloc_heap_release(rpmalloc_heap_t *heap) {

3801 if (heap)

3803}

3804

3806rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) {

3807#if ENABLE_VALIDATE_ARGS

3808 if (size >= MAX_ALLOC_SIZE) {

3809 errno = EINVAL;

3810 return 0;

3811 }

3812#endif

3814}

3815

3817rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment,

3818 size_t size) {

3819#if ENABLE_VALIDATE_ARGS

3820 if (size >= MAX_ALLOC_SIZE) {

3821 errno = EINVAL;

3822 return 0;

3823 }

3824#endif

3826}

3827

3829rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, size_t size) {

3830 return rpmalloc_heap_aligned_calloc(heap, 0, num, size);

3831}

3832

3834rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment,

3835 size_t num, size_t size) {

3836 size_t total;

3837#if ENABLE_VALIDATE_ARGS

3838#if PLATFORM_WINDOWS

3839 int err = SizeTMult(num, size, &total);

3840 if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {

3841 errno = EINVAL;

3842 return 0;

3843 }

3844#else

3845 int err = __builtin_umull_overflow(num, size, &total);

3846 if (err || (total >= MAX_ALLOC_SIZE)) {

3847 errno = EINVAL;

3848 return 0;

3849 }

3850#endif

3851#else

3852 total = num * size;

3853#endif

3856 memset(block, 0, total);

3858}

3859

3861rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size,

3862 unsigned int flags) {

3863#if ENABLE_VALIDATE_ARGS

3864 if (size >= MAX_ALLOC_SIZE) {

3865 errno = EINVAL;

3866 return ptr;

3867 }

3868#endif

3870}

3871

3873rpmalloc_heap_aligned_realloc(rpmalloc_heap_t *heap, void *ptr,

3874 size_t alignment, size_t size,

3875 unsigned int flags) {

3876#if ENABLE_VALIDATE_ARGS

3877 if ((size + alignment < size) || (alignment > _memory_page_size)) {

3878 errno = EINVAL;

3879 return 0;

3880 }

3881#endif

3883}

3884

3885extern inline void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr) {

3886 (void)sizeof(heap);

3888}

3889

3890extern inline void rpmalloc_heap_free_all(rpmalloc_heap_t *heap) {

3893

3895

3896 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

3897 span = heap->size_class[iclass].partial_span;

3898 while (span) {

3899 next_span = span->next;

3901 span = next_span;

3902 }

3903 heap->size_class[iclass].partial_span = 0;

3904 span = heap->full_span[iclass];

3905 while (span) {

3906 next_span = span->next;

3908 span = next_span;

3909 }

3910

3911 span = heap->size_class[iclass].cache;

3912 if (span)

3914 heap->size_class[iclass].cache = 0;

3915 }

3916 memset(heap->size_class, 0, sizeof(heap->size_class));

3917 memset(heap->full_span, 0, sizeof(heap->full_span));

3918

3919 span = heap->large_huge_span;

3920 while (span) {

3921 next_span = span->next;

3924 else

3926 span = next_span;

3927 }

3928 heap->large_huge_span = 0;

3929 heap->full_span_count = 0;

3930

3931#if ENABLE_THREAD_CACHE

3934 if (!iclass)

3935 span_cache = &heap->span_cache;

3936 else

3937 span_cache = (span_cache_t *)(heap->span_large_cache + (iclass - 1));

3938 if (!span_cache->count)

3939 continue;

3940#if ENABLE_GLOBAL_CACHE

3944 span_cache->count);

3945 _rpmalloc_global_cache_insert_spans(span_cache->span, iclass + 1,

3946 span_cache->count);

3947#else

3948 for (size_t ispan = 0; ispan < span_cache->count; ++ispan)

3950#endif

3951 span_cache->count = 0;

3952 }

3953#endif

3954

3955#if ENABLE_STATISTICS

3956 for (size_t iclass = 0; iclass < SIZE_CLASS_COUNT; ++iclass) {

3957 atomic_store32(&heap->size_class_use[iclass].alloc_current, 0);

3958 atomic_store32(&heap->size_class_use[iclass].spans_current, 0);

3959 }

3962 }

3963#endif

3964}

3965

3966extern inline void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap) {

3968 if (prev_heap != heap) {

3970 if (prev_heap)

3971 rpmalloc_heap_release(prev_heap);

3972 }

3973}

3974

3975extern inline rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr) {

3976

3978 if (span) {

3979 return span->heap;

3980 }

3981 return 0;

3982}

3983

3984#endif

3985

3986#if ENABLE_PRELOAD || ENABLE_OVERRIDE

3987

3989

3990#endif

3991

Given that RA is a live value

while(!ToSimplify.empty())

dot regions Print regions of function to dot file(with no function bodies)"

unify loop Fixup each natural loop to have a single exit block

auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)

Get the size of a range.

auto count(R &&Range, const E &Element)

Wrapper function around std::count to count the number of times an element Element occurs in the give...

#define SMALL_GRANULARITY

Preconfigured limits and sizes.

static FORCEINLINE void * atomic_exchange_ptr_acquire(atomicptr_t *dst, void *val)

#define LARGE_CLASS_COUNT

Number of large block size classes.

#define _memory_default_span_size

Global data.

static FORCEINLINE int64_t atomic_load64(atomic64_t *val)

#define _memory_span_size_shift

static void * _rpmalloc_allocate_from_heap_fallback(heap_t *heap, heap_size_class_t *heap_size_class, uint32_t class_idx)

Allocate a small/medium sized memory block from the given heap.

static heap_t * _rpmalloc_heap_allocate(int first_class)

Allocate a new heap, potentially reusing a previously orphaned heap.

#define MEDIUM_GRANULARITY

Granularity of a medium allocation block.

static void _rpmalloc_heap_unmap(heap_t *heap)

_Static_assert((SMALL_GRANULARITY &(SMALL_GRANULARITY - 1))==0, "Small granularity must be power of two")

#define SPAN_FLAG_UNMAPPED_MASTER

Flag indicating an unmapped master span.

static heap_t * get_thread_heap_raw(void)

void rpfree(void *ptr)

Free the given memory block.

#define rpmalloc_assume(cond)

static size_t _memory_global_reserve_count

Global reserved count.

static void _rpmalloc_span_unmap(span_t *span)

Unmap memory pages for the given number of spans (or mark as unused if no partial unmappings)

static void _rpmalloc_heap_release(void *heapptr, int first_class, int release_cache)

static atomic32_t _memory_global_lock

Used to restrict access to mapping memory for huge pages.

static span_t * _memory_global_reserve_master

Global reserved master.

static size_t _memory_map_granularity

Granularity at which memory pages are mapped by OS.

static FORCEINLINE int32_t atomic_incr32(atomic32_t *val)

static FORCEINLINE int atomic_cas32_acquire(atomic32_t *dst, int32_t val, int32_t ref)

static void _rpmalloc_adjust_size_class(size_t iclass)

Adjust and optimize the size class properties for the given class.

#define THREAD_SPAN_CACHE_TRANSFER

Number of spans to transfer between thread and global cache.

RPMALLOC_ALLOCATOR void * rpmalloc(size_t size)

Allocate a memory block of at least the given size.

static void * _rpmalloc_aligned_reallocate(heap_t *heap, void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags)

static void _rpmalloc_heap_finalize(heap_t *heap)

static span_t * _rpmalloc_span_map_aligned_count(heap_t *heap, size_t span_count)

Map an aligned set of spans, taking configured mapping granularity and the page size into account.

static FORCEINLINE void atomic_store32_release(atomic32_t *dst, int32_t val)

int rpmalloc_initialize_config(const rpmalloc_config_t *config)

Initialize allocator with given configuration.

#define THREAD_SPAN_LARGE_CACHE_TRANSFER

Number of spans to transfer between thread and global cache for large spans.

static void _rpmalloc_heap_global_finalize(heap_t *heap)

#define SPAN_FLAG_SUBSPAN

Flag indicating span is a secondary (sub) span of a split superspan.

static FORCEINLINE int atomic_cas_ptr(atomicptr_t *dst, void *val, void *ref)

static void _rpmalloc_spin(void)

static size_class_t _memory_size_class[SIZE_CLASS_COUNT]

Global size classes.

static size_t _memory_page_size

Memory page size.

static void _rpmalloc_heap_release_raw_fc(void *heapptr)

static void * _rpmalloc_allocate(heap_t *heap, size_t size)

Allocate a block of the given size.

#define TLS_MODEL

Thread local heap and ID.

static size_t _memory_span_map_count

Number of spans to map in each map call.

static void * _rpmalloc_allocate_small(heap_t *heap, size_t size)

Allocate a small sized memory block from the given heap.

#define _memory_default_span_size_shift

static heap_t * _memory_heaps[HEAP_ARRAY_SIZE]

All heaps.

#define _memory_span_mask

static span_t * _memory_global_reserve

Global reserved spans.

const rpmalloc_config_t * rpmalloc_config(void)

Get allocator configuration.

volatile _Atomic(int32_t)

Atomic access abstraction (since MSVC does not do C11 yet)

static FORCEINLINE int64_t atomic_add64(atomic64_t *val, int64_t add)

static void _rpmalloc_span_double_link_list_pop_head(span_t **head, span_t *span)

Pop head span from double linked list.

#define _rpmalloc_stat_add64(counter, value)

static void set_thread_heap(heap_t *heap)

Set the current thread heap.

#define _rpmalloc_stat_add(counter, value)

static void * _rpmalloc_aligned_allocate(heap_t *heap, size_t alignment, size_t size)

static int _memory_huge_pages

Huge page support.

static FORCEINLINE int32_t atomic_decr32(atomic32_t *val)

RPMALLOC_ALLOCATOR void * rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags)

Reallocate the given block to at least the given size and alignment,.

#define MAX_THREAD_SPAN_LARGE_CACHE

Number of spans in thread cache for large spans (must be greater than LARGE_CLASS_COUNT / 2)

void rpmalloc_set_main_thread(void)

Set main thread ID.

#define MEDIUM_CLASS_COUNT

Number of medium block size classes.

static void _rpmalloc_inc_span_statistics(heap_t *heap, size_t span_count, uint32_t class_idx)

RPMALLOC_ALLOCATOR void * rpcalloc(size_t num, size_t size)

static void _rpmalloc_deallocate_large(span_t *span)

Deallocate the given large memory block to the current heap.

#define _memory_span_size

Hardwired span size.

static void _rpmalloc_heap_initialize(heap_t *heap)

#define _rpmalloc_stat_add_peak(counter, value, peak)

static void _rpmalloc_heap_orphan(heap_t *heap, int first_class)

RPMALLOC_ALLOCATOR void * rpmemalign(size_t alignment, size_t size)

Allocate a memory block of at least the given size and alignment.

static void _rpmalloc_deallocate_huge(span_t *)

Global cache.

static void _rpmalloc_span_release_to_cache(heap_t *heap, span_t *span)

Move the span (used for small or medium allocations) to the heap thread cache.

static int _rpmalloc_initialized

Initialized flag.

#define SPAN_HEADER_SIZE

Size of a span header (must be a multiple of SMALL_GRANULARITY and a power of two)

static heap_t * _rpmalloc_heap_allocate_new(void)

Allocate a new heap from newly mapped memory pages.

static void * _rpmalloc_span_initialize_new(heap_t *heap, heap_size_class_t *heap_size_class, span_t *span, uint32_t class_idx)

Initialize an unused span (from cache or mapped) to be new active span, putting the initial free list...

#define SMALL_GRANULARITY_SHIFT

Small granularity shift count.

static void _rpmalloc_set_name(void *address, size_t size)

Low level memory map/unmap.

static size_t _rpmalloc_span_align_count(size_t span_count)

Get the aligned number of spans to map in based on wanted count, configured mapping granularity and t...

#define LARGE_SIZE_LIMIT

Maximum size of a large block.

static void _rpmalloc_unmap_os(void *address, size_t size, size_t offset, size_t release)

Default implementation to unmap pages from virtual memory.

#define ENABLE_THREAD_CACHE

Enable per-thread cache.

static void _rpmalloc_deallocate_direct_small_or_medium(span_t *span, void *block)

Deallocation entry points.

static void _rpmalloc_deallocate_defer_small_or_medium(span_t *span, void *block)

Put the block in the deferred free list of the owning span.

void rpmalloc_thread_collect(void)

Perform deferred deallocations pending for the calling thread heap.

void rpmalloc_thread_finalize(int release_caches)

Finalize thread, orphan heap.

int rpmalloc_is_thread_initialized(void)

Query if allocator is initialized for calling thread.

void rpmalloc_linker_reference(void)

Dummy empty function for forcing linker symbol inclusion.

#define FORCEINLINE

Platform and arch specifics.

void rpmalloc_dump_statistics(void *file)

Dump all statistics in human readable format to file (should be a FILE*)

RPMALLOC_ALLOCATOR void * rpaligned_alloc(size_t alignment, size_t size)

Allocate a memory block of at least the given size and alignment.

size_t rpmalloc_usable_size(void *ptr)

Query the usable size of the given memory block (from given pointer to the end of block)

static void _rpmalloc_deallocate_small_or_medium(span_t *span, void *p)

static span_t * _rpmalloc_global_get_reserved_spans(size_t span_count)

Use global reserved spans to fulfill a memory map request (reserve size must be checked by caller)

#define _rpmalloc_stat_sub(counter, value)

static void * _rpmalloc_mmap_os(size_t size, size_t *offset)

Default implementation to map new pages to virtual memory.

#define _rpmalloc_memcpy_const(x, y, s)

static uintptr_t get_thread_id(void)

Fast thread ID.

void rpmalloc_thread_initialize(void)

Initialize thread, assign heap.

static span_t * _rpmalloc_heap_thread_cache_deferred_extract(heap_t *heap, size_t span_count)

static void * _rpmalloc_allocate_large(heap_t *heap, size_t size)

Allocate a large sized memory block from the given heap.

RPMALLOC_ALLOCATOR void * rprealloc(void *ptr, size_t size)

Reallocate the given block to at least the given size.

static FORCEINLINE void * atomic_load_ptr(atomicptr_t *src)

static void _rpmalloc_span_mark_as_subspan_unless_master(span_t *master, span_t *subspan, size_t span_count)

Declare the span to be a subspan and store distance from master span and span count.

static FORCEINLINE void atomic_store_ptr_release(atomicptr_t *dst, void *val)

#define MEDIUM_GRANULARITY_SHIFT

Medium granularity shift count.

static size_t _memory_medium_size_limit

Run-time size limit of medium blocks.

#define SPAN_FLAG_MASTER

Flag indicating span is the first (master) span of a split superspan.

static void * _rpmalloc_reallocate(heap_t *heap, void *p, size_t size, size_t oldsize, unsigned int flags)

Reallocate the given block to the given size.

int rpmalloc_initialize(void)

Initialize the allocator and setup global data.

static span_t * _rpmalloc_heap_reserved_extract(heap_t *heap, size_t span_count)

static size_t _memory_page_size_shift

Shift to divide by page size.

static FORCEINLINE void atomic_store32(atomic32_t *dst, int32_t val)

static void _rpmalloc_heap_release_raw(void *heapptr, int release_cache)

static void _rpmalloc_global_set_reserved_spans(span_t *master, span_t *reserve, size_t reserve_span_count)

Store the given spans as global reserve (must only be called from within new heap allocation,...

#define SIZE_CLASS_COUNT

Total number of small + medium size classes.

static int _rpmalloc_span_finalize(heap_t *heap, size_t iclass, span_t *span, span_t **list_head)

#define _rpmalloc_stat_dec(counter)

static uintptr_t _rpmalloc_main_thread_id

Main thread ID.

#define _rpmalloc_stat_inc_free(heap, class_idx)

static span_t * _rpmalloc_heap_thread_cache_extract(heap_t *heap, size_t span_count)

Extract the given number of spans from the different cache levels.

#define pointer_offset(ptr, ofs)

#define _rpmalloc_stat_inc(counter)

Statistics related functions (evaluate to nothing when statistics not enabled)

#define _rpmalloc_memset_const(x, y, s)

static void * _rpmalloc_allocate_huge(heap_t *heap, size_t size)

Allocate a huge block by mapping memory pages directly.

#define _rpmalloc_stat_inc_alloc(heap, class_idx)

static void _rpmalloc_heap_cache_insert(heap_t *heap, span_t *span)

Span control.

static span_t * _rpmalloc_heap_global_cache_extract(heap_t *heap, size_t span_count)

Extract a span from the global cache.

static void _rpmalloc_unmap(void *address, size_t size, size_t offset, size_t release)

Unmap virtual memory.

static void _rpmalloc_span_double_link_list_add(span_t **head, span_t *span)

Span linked list management.

static void * _rpmalloc_allocate_medium(heap_t *heap, size_t size)

Allocate a medium sized memory block from the given heap.

#define HEAP_ARRAY_SIZE

Size of heap hashmap.

static void _rpmalloc_deallocate_defer_free_span(heap_t *heap, span_t *span)

static void * free_list_pop(void **list)

Allocation entry points.

void rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats)

Get per-thread statistics.

static heap_t * _rpmalloc_heap_extract_orphan(heap_t **heap_list)

static span_t * _rpmalloc_heap_extract_new_span(heap_t *heap, heap_size_class_t *heap_size_class, size_t span_count, uint32_t class_idx)

Get a span from one of the cache levels (thread cache, reserved, global cache) or fallback to mapping...

#define rpmalloc_assert(truth, message)

#define SPAN_FLAG_ALIGNED_BLOCKS

Flag indicating span has blocks with increased alignment.

static void _rpmalloc_span_initialize(span_t *span, size_t total_span_count, size_t span_count, size_t align_offset)

Setup a newly mapped span.

#define _memory_default_span_mask

static size_t _memory_heap_reserve_count

Number of spans to keep reserved in each heap.

static size_t _rpmalloc_usable_size(void *p)

Reallocation entry points.

static uint32_t free_list_partial_init(void **list, void **first_block, void *page_start, void *block_start, uint32_t block_count, uint32_t block_size)

Initialize a (partial) free list up to next system memory page, while reserving the first block as al...

static int _rpmalloc_span_is_fully_utilized(span_t *span)

static rpmalloc_config_t _memory_config

Configuration.

static FORCEINLINE void atomic_store_ptr(atomicptr_t *dst, void *val)

static void _rpmalloc_deallocate(void *p)

Deallocate the given block.

static heap_t * _memory_orphan_heaps

Orphaned heaps.

static span_t * _rpmalloc_span_map(heap_t *heap, size_t span_count)

Map in memory pages for the given number of spans (or use previously reserved pages)

static void _rpmalloc_heap_set_reserved_spans(heap_t *heap, span_t *master, span_t *reserve, size_t reserve_span_count)

Store the given spans as reserve in the given heap.

RPMALLOC_ALLOCATOR void * rpaligned_calloc(size_t alignment, size_t num, size_t size)

void rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats)

Get global statistics.

int rpposix_memalign(void **memptr, size_t alignment, size_t size)

Allocate a memory block of at least the given size and alignment.

#define MAX_THREAD_SPAN_CACHE

Number of spans in thread cache.

static void _rpmalloc_span_double_link_list_remove(span_t **head, span_t *span)

Remove a span from double linked list.

#define GLOBAL_CACHE_MULTIPLIER

Multiplier for global cache.

#define MEDIUM_SIZE_LIMIT

Maximum size of a medium block.

#define SMALL_SIZE_LIMIT

Maximum size of a small block.

static void _rpmalloc_span_extract_free_list_deferred(span_t *span)

struct span_list_t span_list_t

Span list.

static FORCEINLINE int32_t atomic_add32(atomic32_t *val, int32_t add)

static atomic32_t _memory_heap_id

Heap ID counter.

static void _rpmalloc_heap_cache_adopt_deferred(heap_t *heap, span_t **single_span)

Adopt the deferred span cache list, optionally extracting the first single span for immediate re-use.

#define pointer_diff(first, second)

struct span_active_t span_active_t

Span active data.

#define DEFAULT_SPAN_MAP_COUNT

Default number of spans to map in call to map more virtual memory (default values yield 4MiB here)

static span_t * _rpmalloc_span_map_from_reserve(heap_t *heap, size_t span_count)

Use reserved spans to fulfill a memory map request (reserve size must be checked by caller)

static void * _rpmalloc_mmap(size_t size, size_t *offset)

Map more virtual memory.

static heap_t * get_thread_heap(void)

Get the current thread heap.

#define SMALL_CLASS_COUNT

Number of small block size classes.

void rpmalloc_finalize(void)

Finalize the allocator.

#define RPMALLOC_ALLOCATOR

#define RPMALLOC_GROW_OR_FAIL

Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,...

#define RPMALLOC_NO_PRESERVE

Flag to rpaligned_realloc to not preserve content in reallocation.

uint32_t count

Cache count.

span_t * overflow

Unlimited cache overflow.

atomic32_t lock

Cache lock.

span_t * span[GLOBAL_CACHE_MULTIPLIER *MAX_THREAD_SPAN_CACHE]

Cached spans.

span_t * cache

Early level cache of fully free spans.

void * free_list

Free list of active span.

span_t * partial_span

Double linked list of partially used spans with free blocks.

int finalize

Finalization state flag.

heap_t * master_heap

Master heap owning the memory pages.

atomicptr_t span_free_deferred

List of deferred free spans (single linked list)

uintptr_t owner_thread

Owning thread ID.

atomic32_t child_count

Child count.

size_t full_span_count

Number of full spans.

heap_size_class_t size_class[SIZE_CLASS_COUNT]

Free lists for each size class.

heap_t * next_heap

Next heap in id list.

heap_t * next_orphan

Next heap in orphan list.

uint32_t spans_reserved

Number of mapped but unused spans.

span_t * span_reserve

Mapped but unused spans.

span_t * span_reserve_master

Master span for mapped but unused spans.

void(* memory_unmap)(void *address, size_t size, size_t offset, size_t release)

Unmap the memory pages starting at address and spanning the given number of bytes.

int(* map_fail_callback)(size_t size)

Called when a call to map memory pages fails (out of memory).

size_t span_map_count

Number of spans to map at each request to map new virtual memory blocks.

const char * page_name

Respectively allocated pages and huge allocated pages names for systems.

int enable_huge_pages

Enable use of large/huge pages.

const char * huge_page_name

size_t span_size

Size of a span of memory blocks.

void *(* memory_map)(size_t size, size_t *offset)

Map memory pages for the given number of bytes.

size_t page_size

Size of memory pages.

uint32_t block_size

Size of blocks in this class.

uint16_t class_idx

Class index this class is merged with.

uint16_t block_count

Number of blocks in each chunk.

span_t * span[MAX_THREAD_SPAN_CACHE]

span_t * span[MAX_THREAD_SPAN_LARGE_CACHE]

uint32_t offset_from_master

Offset from master span for subspans.

uint32_t align_offset

Alignment offset.

atomicptr_t free_list_deferred

Deferred free list.

heap_t * heap

Owning heap.

span_t * prev

Previous span.

atomic32_t remaining_spans

Remaining span counter, for master spans.

uint32_t flags

Flags and counters.

uint32_t size_class

Size class.

uint32_t block_count

Total block count of size class.

void * free_list

Free list.

uint32_t block_size

Size of a block.

uint32_t used_count

Number of used blocks remaining when in partial state.

uint32_t list_size

Size of deferred free list, or list of spans when part of a cache list.

uint32_t span_count

Number of spans.

uint32_t free_list_limit

Index of last block initialized in free list.

uint32_t total_spans

Total span counter for master spans.