LLVM: include/llvm/Support/Allocator.h Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef LLVM_SUPPORT_ALLOCATOR_H
18#define LLVM_SUPPORT_ALLOCATOR_H
19
25#include
26#include
27#include
28#include
29#include
30#include
31#include
32
33namespace llvm {
34
36
37
38
40 size_t BytesAllocated,
41 size_t TotalMemory);
42
43}
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
63 size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
65 : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
66 SizeThreshold, GrowthDelay>>,
69
70public:
71 static_assert(SizeThreshold <= SlabSize,
72 "The SizeThreshold must be at most the SlabSize to ensure "
73 "that objects larger than a slab go into their own memory "
74 "allocation.");
75 static_assert(GrowthDelay > 0,
76 "GrowthDelay must be at least 1 which already increases the"
77 "slab size after each allocated slab.");
78
80
81 template
84
85
86
89 End(Old.End), Slabs(std::move(Old.Slabs)),
90 CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
91 BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
92 Old.CurPtr = Old.End = nullptr;
93 Old.BytesAllocated = 0;
94 Old.Slabs.clear();
95 Old.CustomSizedSlabs.clear();
96 }
97
99 DeallocateSlabs(Slabs.begin(), Slabs.end());
100 DeallocateCustomSizedSlabs();
101 }
102
104 DeallocateSlabs(Slabs.begin(), Slabs.end());
105 DeallocateCustomSizedSlabs();
106
107 CurPtr = RHS.CurPtr;
108 End = RHS.End;
109 BytesAllocated = RHS.BytesAllocated;
110 RedZoneSize = RHS.RedZoneSize;
111 Slabs = std::move(RHS.Slabs);
112 CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
113 AllocTy::operator=(std::move(RHS.getAllocator()));
114
115 RHS.CurPtr = RHS.End = nullptr;
116 RHS.BytesAllocated = 0;
117 RHS.Slabs.clear();
118 RHS.CustomSizedSlabs.clear();
119 return *this;
120 }
121
122
123
125
126 DeallocateCustomSizedSlabs();
127 CustomSizedSlabs.clear();
128
129 if (Slabs.empty())
130 return;
131
132
133 BytesAllocated = 0;
134 CurPtr = (char *)Slabs.front();
135 End = CurPtr + SlabSize;
136
138 DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
139 Slabs.erase(std::next(Slabs.begin()), Slabs.end());
140 }
141
142
143
144
145
146
147
148
150
151 BytesAllocated += Size;
152
153 uintptr_t AlignedPtr = alignAddr(CurPtr, Alignment);
154
155 size_t SizeToAllocate = Size;
156#if LLVM_ADDRESS_SANITIZER_BUILD
157
158 SizeToAllocate += RedZoneSize;
159#endif
160
161 uintptr_t AllocEndPtr = AlignedPtr + SizeToAllocate;
162 assert(AllocEndPtr >= uintptr_t(CurPtr) &&
163 "Alignment + Size must not overflow");
164
165
166 if (LLVM_LIKELY(AllocEndPtr <= uintptr_t(End)
167
168 && CurPtr != nullptr)) {
169 CurPtr = reinterpret_cast<char *>(AllocEndPtr);
170
171
172
174
176 return reinterpret_cast<char *>(AlignedPtr);
177 }
178
180 }
181
184
185 size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
186 if (PaddedSize > SizeThreshold) {
187 void *NewSlab =
189
190
192 CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
193
194 uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
195 assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
196 char *AlignedPtr = (char*)AlignedAddr;
199 return AlignedPtr;
200 }
201
202
203 StartNewSlab();
204 uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
205 assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
206 "Unable to allocate memory!");
207 char *AlignedPtr = (char*)AlignedAddr;
208 CurPtr = AlignedPtr + SizeToAllocate;
211 return AlignedPtr;
212 }
213
216 assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.");
218 }
219
220
222
223
224
225
229
230
232
233 size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
234
235
236
237
238
239
241 const char *P = static_cast<const char *>(Ptr);
242 int64_t InSlabIdx = 0;
243 for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
244 const char *S = static_cast<const char *>(Slabs[Idx]);
245 if (P >= S && P < S + computeSlabSize(Idx))
246 return InSlabIdx + static_cast<int64_t>(P - S);
247 InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
248 }
249
250
251 int64_t InCustomSizedSlabIdx = -1;
252 for (const auto &Slab : CustomSizedSlabs) {
253 const char *S = static_cast<const char *>(Slab.first);
254 size_t Size = Slab.second;
255 if (P >= S && P < S + Size)
256 return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
257 InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
258 }
259 return std::nullopt;
260 }
261
262
263
264
265
268 assert(Out && "Wrong allocator used");
269 return *Out;
270 }
271
272
273
274
275
276
277
278
279
280
281
282 template
285 assert(Out % alignof(T) == 0 && "Wrong alignment information");
286 return Out / alignof(T);
287 }
288
290 size_t TotalMemory = 0;
291 for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
292 TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
293 for (const auto &PtrAndSize : CustomSizedSlabs)
294 TotalMemory += PtrAndSize.second;
295 return TotalMemory;
296 }
297
299
301 RedZoneSize = NewSize;
302 }
303
308
309private:
310
311
312
313 char *CurPtr = nullptr;
314
315
316 char *End = nullptr;
317
318
320
321
323
324
325
326
327 size_t BytesAllocated = 0;
328
329
330
331 size_t RedZoneSize = 1;
332
333 static size_t computeSlabSize(unsigned SlabIdx) {
334
335
336
337
338 return SlabSize *
339 ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
340 }
341
342
343
344 void StartNewSlab() {
345 size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
346
348 alignof(std::max_align_t));
349
350
352
354 CurPtr = (char *)(NewSlab);
355 End = ((char *)NewSlab) + AllocatedSlabSize;
356 }
357
358
362 size_t AllocatedSlabSize =
363 computeSlabSize(std::distance(Slabs.begin(), I));
365 alignof(std::max_align_t));
366 }
367 }
368
369
370 void DeallocateCustomSizedSlabs() {
371 for (auto &PtrAndSize : CustomSizedSlabs) {
372 void *Ptr = PtrAndSize.first;
373 size_t Size = PtrAndSize.second;
375 }
376 }
377
379};
380
381
382
384
385
386
387
388
389
392
393public:
395
396
397 Allocator.setRedZoneSize(0);
398 }
400 : Allocator(std::move(Old.Allocator)) {}
402
404 Allocator = std::move(RHS.Allocator);
405 return *this;
406 }
407
408
409
410
412 auto DestroyElements = [](char *Begin, char *End) {
414 for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
415 reinterpret_cast<T *>(Ptr)->~T();
416 };
417
418 for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
419 ++I) {
420 size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
421 std::distance(Allocator.Slabs.begin(), I));
423 char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
424 : (char *)*I + AllocatedSlabSize;
425
426 DestroyElements(Begin, End);
427 }
428
429 for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
430 void *Ptr = PtrAndSize.first;
431 size_t Size = PtrAndSize.second;
433 (char *)Ptr + Size);
434 }
435
436 Allocator.Reset();
437 }
438
439
440 T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
441
442
443
444
446 return Allocator.identifyObject(Ptr);
447 }
448};
449
450}
451
452template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
453 size_t GrowthDelay>
454void *
459 alignof(std::max_align_t)));
460}
461
462template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
463 size_t GrowthDelay>
464void operator delete(void *,
466 SizeThreshold, GrowthDelay> &) {
467}
468
469#endif
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines MallocAllocator.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define __asan_poison_memory_region(p, size)
#define __asan_unpoison_memory_region(p, size)
#define LLVM_ATTRIBUTE_NOINLINE
LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so, mark a method "not for inl...
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#define __msan_allocated_memory(p, size)
#define LLVM_LIKELY(EXPR)
This file defines the SmallVector class.
CRTP base class providing obvious overloads for the core Allocate() methods of LLVM-style allocators.
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition Allocator.h:67
size_t GetNumSlabs() const
Definition Allocator.h:233
void PrintStats() const
Definition Allocator.h:304
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
Definition Allocator.h:215
void setRedZoneSize(size_t NewSize)
Definition Allocator.h:300
std::optional< int64_t > identifyObject(const void *Ptr)
Definition Allocator.h:240
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
Definition Allocator.h:149
BumpPtrAllocatorImpl()=default
BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
Definition Allocator.h:87
int64_t identifyKnownAlignedObject(const void *Ptr)
A wrapper around identifyKnownObject.
Definition Allocator.h:283
size_t getBytesAllocated() const
Definition Allocator.h:298
friend class SpecificBumpPtrAllocator
Definition Allocator.h:378
void Reset()
Deallocate all but the current slab and reset the current pointer to the beginning of it,...
Definition Allocator.h:124
void Deallocate(const void *Ptr, size_t Size, size_t)
Definition Allocator.h:226
BumpPtrAllocatorImpl(T &&Allocator)
Definition Allocator.h:82
size_t getTotalMemory() const
Definition Allocator.h:289
BumpPtrAllocatorImpl & operator=(BumpPtrAllocatorImpl &&RHS)
Definition Allocator.h:103
~BumpPtrAllocatorImpl()
Definition Allocator.h:98
int64_t identifyKnownObject(const void *Ptr)
A wrapper around identifyObject that additionally asserts that the object is indeed within the alloca...
Definition Allocator.h:266
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_NOINLINE void * AllocateSlow(size_t Size, size_t SizeToAllocate, Align Alignment)
Definition Allocator.h:183
void Deallocate(const void *Ptr, size_t Size, size_t Alignment)
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, size_t Alignment)
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
~SpecificBumpPtrAllocator()
Definition Allocator.h:401
std::optional< int64_t > identifyObject(const void *Ptr)
Definition Allocator.h:445
SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
Definition Allocator.h:399
T * Allocate(size_t num=1)
Allocate space for an array of objects without constructing them.
Definition Allocator.h:440
void DestroyAll()
Call the destructor of each allocated object and deallocate all but the current slab and reset the cu...
Definition Allocator.h:411
SpecificBumpPtrAllocator()
Definition Allocator.h:394
SpecificBumpPtrAllocator & operator=(SpecificBumpPtrAllocator &&RHS)
Definition Allocator.h:403
MallocAllocator & getAllocator()
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
LLVM_ABI void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, size_t TotalMemory)
This is an optimization pass for GlobalISel generic memory operations.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
uintptr_t alignAddr(const void *Addr, Align Alignment)
Aligns Addr to Alignment bytes, rounding up.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static constexpr Align Of()
Allow constructions of constexpr Align from types.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.