Auto merge of #116370 - nnethercote:more-arena-stuff, r= · rust-lang/rust@39fc71a (original) (raw)
`@@ -15,7 +15,6 @@
`
15
15
`#![feature(dropck_eyepatch)]
`
16
16
`#![feature(new_uninit)]
`
17
17
`#![feature(maybe_uninit_slice)]
`
18
``
`-
#![feature(min_specialization)]
`
19
18
`#![feature(decl_macro)]
`
20
19
`#![feature(pointer_byte_offsets)]
`
21
20
`#![feature(rustc_attrs)]
`
`@@ -44,23 +43,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
`
44
43
`f()
`
45
44
`}
`
46
45
``
47
``
`-
/// An arena that can hold objects of only one type.
`
48
``
`-
pub struct TypedArena {
`
49
``
`-
/// A pointer to the next object to be allocated.
`
50
``
`-
ptr: Cell<*mut T>,
`
51
``
-
52
``
`-
/// A pointer to the end of the allocated area. When this pointer is
`
53
``
`-
/// reached, a new chunk is allocated.
`
54
``
`-
end: Cell<*mut T>,
`
55
``
-
56
``
`-
/// A vector of arena chunks.
`
57
``
`-
chunks: RefCell<Vec<ArenaChunk>>,
`
58
``
-
59
``
`-
/// Marker indicating that dropping the arena causes its owned
`
60
``
`` -
/// instances of T
to be dropped.
``
61
``
`-
_own: PhantomData,
`
62
``
`-
}
`
63
``
-
64
46
`struct ArenaChunk<T = u8> {
`
65
47
`/// The raw storage for the arena chunk.
`
66
48
`storage: NonNull<[MaybeUninit]>,
`
`@@ -130,6 +112,23 @@ impl ArenaChunk {
`
130
112
`const PAGE: usize = 4096;
`
131
113
`const HUGE_PAGE: usize = 2 * 1024 * 1024;
`
132
114
``
``
115
`+
/// An arena that can hold objects of only one type.
`
``
116
`+
pub struct TypedArena {
`
``
117
`+
/// A pointer to the next object to be allocated.
`
``
118
`+
ptr: Cell<*mut T>,
`
``
119
+
``
120
`+
/// A pointer to the end of the allocated area. When this pointer is
`
``
121
`+
/// reached, a new chunk is allocated.
`
``
122
`+
end: Cell<*mut T>,
`
``
123
+
``
124
`+
/// A vector of arena chunks.
`
``
125
`+
chunks: RefCell<Vec<ArenaChunk>>,
`
``
126
+
``
127
`+
/// Marker indicating that dropping the arena causes its owned
`
``
128
`` +
/// instances of T
to be dropped.
``
``
129
`+
_own: PhantomData,
`
``
130
`+
}
`
``
131
+
133
132
`impl Default for TypedArena {
`
134
133
`` /// Creates a new TypedArena
.
``
135
134
`fn default() -> TypedArena {
`
`@@ -144,77 +143,6 @@ impl Default for TypedArena {
`
144
143
`}
`
145
144
`}
`
146
145
``
147
``
`-
trait IterExt {
`
148
``
`-
fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T];
`
149
``
`-
}
`
150
``
-
151
``
`-
impl<I, T> IterExt for I
`
152
``
`-
where
`
153
``
`-
I: IntoIterator<Item = T>,
`
154
``
`-
{
`
155
``
`` -
// This default collects into a SmallVec
and then allocates by copying
``
156
``
`` -
// from it. The specializations below for types like Vec
are more
``
157
``
`-
// efficient, copying directly without the intermediate collecting step.
`
158
``
`-
// This default could be made more efficient, like
`
159
``
`` -
// DroplessArena::alloc_from_iter
, but it's not hot enough to bother.
``
160
``
`-
#[inline]
`
161
``
`-
default fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] {
`
162
``
`-
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
`
163
``
`-
vec.alloc_from_iter(arena)
`
164
``
`-
}
`
165
``
`-
}
`
166
``
-
167
``
`-
impl<T, const N: usize> IterExt for std::array::IntoIter<T, N> {
`
168
``
`-
#[inline]
`
169
``
`-
fn alloc_from_iter(self, arena: &TypedArena) -> &mut [T] {
`
170
``
`-
let len = self.len();
`
171
``
`-
if len == 0 {
`
172
``
`-
return &mut [];
`
173
``
`-
}
`
174
``
`-
// Move the content to the arena by copying and then forgetting it.
`
175
``
`-
let start_ptr = arena.alloc_raw_slice(len);
`
176
``
`-
unsafe {
`
177
``
`-
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
`
178
``
`-
mem::forget(self);
`
179
``
`-
slice::from_raw_parts_mut(start_ptr, len)
`
180
``
`-
}
`
181
``
`-
}
`
182
``
`-
}
`
183
``
-
184
``
`-
impl IterExt for Vec {
`
185
``
`-
#[inline]
`
186
``
`-
fn alloc_from_iter(mut self, arena: &TypedArena) -> &mut [T] {
`
187
``
`-
let len = self.len();
`
188
``
`-
if len == 0 {
`
189
``
`-
return &mut [];
`
190
``
`-
}
`
191
``
`-
// Move the content to the arena by copying and then forgetting it.
`
192
``
`-
let start_ptr = arena.alloc_raw_slice(len);
`
193
``
`-
unsafe {
`
194
``
`-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
`
195
``
`-
self.set_len(0);
`
196
``
`-
slice::from_raw_parts_mut(start_ptr, len)
`
197
``
`-
}
`
198
``
`-
}
`
199
``
`-
}
`
200
``
-
201
``
`-
impl<A: smallvec::Array> IterExt<A::Item> for SmallVec {
`
202
``
`-
#[inline]
`
203
``
`-
fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
`
204
``
`-
let len = self.len();
`
205
``
`-
if len == 0 {
`
206
``
`-
return &mut [];
`
207
``
`-
}
`
208
``
`-
// Move the content to the arena by copying and then forgetting it.
`
209
``
`-
let start_ptr = arena.alloc_raw_slice(len);
`
210
``
`-
unsafe {
`
211
``
`-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
`
212
``
`-
self.set_len(0);
`
213
``
`-
slice::from_raw_parts_mut(start_ptr, len)
`
214
``
`-
}
`
215
``
`-
}
`
216
``
`-
}
`
217
``
-
218
146
`impl TypedArena {
`
219
147
`` /// Allocates an object in the TypedArena
, returning a reference to it.
``
220
148
`#[inline]
`
`@@ -270,8 +198,35 @@ impl TypedArena {
`
270
198
``
271
199
`#[inline]
`
272
200
`pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
`
``
201
`+
// This implementation is entirely separate to
`
``
202
`` +
// DroplessIterator::alloc_from_iter
, even though conceptually they
``
``
203
`+
// are the same.
`
``
204
`+
//
`
``
205
`` +
// DroplessIterator
(in the fast case) writes elements from the
``
``
206
`+
// iterator one at a time into the allocated memory. That's easy
`
``
207
`` +
// because the elements don't implement Drop
. But for TypedArena
``
``
208
`` +
// they do implement Drop
, which means that if the iterator panics we
``
``
209
`+
// could end up with some allocated-but-uninitialized elements, which
`
``
210
`` +
// will then cause UB in TypedArena::drop
.
``
``
211
`+
//
`
``
212
`+
// Instead we use an approach where any iterator panic will occur
`
``
213
`+
// before the memory is allocated. This function is much less hot than
`
``
214
`` +
// DroplessArena::alloc_from_iter
, so it doesn't need to be
``
``
215
`+
// hyper-optimized.
`
273
216
`assert!(mem::size_of::() != 0);
`
274
``
`-
iter.alloc_from_iter(self)
`
``
217
+
``
218
`+
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
`
``
219
`+
if vec.is_empty() {
`
``
220
`+
return &mut [];
`
``
221
`+
}
`
``
222
`+
// Move the content to the arena by copying and then forgetting it.
`
``
223
`+
let len = vec.len();
`
``
224
`+
let start_ptr = self.alloc_raw_slice(len);
`
``
225
`+
unsafe {
`
``
226
`+
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
`
``
227
`+
vec.set_len(0);
`
``
228
`+
slice::from_raw_parts_mut(start_ptr, len)
`
``
229
`+
}
`
275
230
`}
`
276
231
``
277
232
`/// Grows the arena.
`