Rollup merge of #129674 - matthewpipie:rc-arc-new-cyclic-in, r=dtolnay · qinheping/verify-rust-std@6893990 (original) (raw)
`@@ -450,54 +450,7 @@ impl Arc {
`
450
450
`where
`
451
451
`F: FnOnce(&Weak) -> T,
`
452
452
`{
`
453
``
`-
// Construct the inner in the "uninitialized" state with a single
`
454
``
`-
// weak reference.
`
455
``
`-
let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner {
`
456
``
`-
strong: atomic::AtomicUsize::new(0),
`
457
``
`-
weak: atomic::AtomicUsize::new(1),
`
458
``
`-
data: mem::MaybeUninit::::uninit(),
`
459
``
`-
}))
`
460
``
`-
.into();
`
461
``
`-
let init_ptr: NonNull<ArcInner> = uninit_ptr.cast();
`
462
``
-
463
``
`-
let weak = Weak { ptr: init_ptr, alloc: Global };
`
464
``
-
465
``
`-
// It's important we don't give up ownership of the weak pointer, or
`
466
``
`` -
// else the memory might be freed by the time data_fn
returns. If
``
467
``
`-
// we really wanted to pass ownership, we could create an additional
`
468
``
`-
// weak pointer for ourselves, but this would result in additional
`
469
``
`-
// updates to the weak reference count which might not be necessary
`
470
``
`-
// otherwise.
`
471
``
`-
let data = data_fn(&weak);
`
472
``
-
473
``
`-
// Now we can properly initialize the inner value and turn our weak
`
474
``
`-
// reference into a strong reference.
`
475
``
`-
let strong = unsafe {
`
476
``
`-
let inner = init_ptr.as_ptr();
`
477
``
`-
ptr::write(ptr::addr_of_mut!((*inner).data), data);
`
478
``
-
479
``
`-
// The above write to the data field must be visible to any threads which
`
480
``
`-
// observe a non-zero strong count. Therefore we need at least "Release" ordering
`
481
``
`` -
// in order to synchronize with the compare_exchange_weak
in Weak::upgrade
.
``
482
``
`-
//
`
483
``
`-
// "Acquire" ordering is not required. When considering the possible behaviours
`
484
``
`` -
// of data_fn
we only need to look at what it could do with a reference to a
``
485
``
`` -
// non-upgradeable Weak
:
``
486
``
`` -
// - It can clone the Weak
, increasing the weak reference count.
``
487
``
`-
// - It can drop those clones, decreasing the weak reference count (but never to zero).
`
488
``
`-
//
`
489
``
`-
// These side effects do not impact us in any way, and no other side effects are
`
490
``
`-
// possible with safe code alone.
`
491
``
`-
let prev_value = (*inner).strong.fetch_add(1, Release);
`
492
``
`-
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
`
493
``
-
494
``
`-
Arc::from_inner(init_ptr)
`
495
``
`-
};
`
496
``
-
497
``
`-
// Strong references should collectively own a shared weak reference,
`
498
``
`-
// so don't run the destructor for our old weak reference.
`
499
``
`-
mem::forget(weak);
`
500
``
`-
strong
`
``
453
`+
Self::new_cyclic_in(data_fn, Global)
`
501
454
`}
`
502
455
``
503
456
`` /// Constructs a new Arc
with uninitialized contents.
``
`@@ -781,6 +734,98 @@ impl<T, A: Allocator> Arc<T, A> {
`
781
734
`}
`
782
735
`}
`
783
736
``
``
737
`` +
/// Constructs a new Arc<T, A>
in the given allocator while giving you a Weak<T, A>
to the allocation,
``
``
738
`` +
/// to allow you to construct a T
which holds a weak pointer to itself.
``
``
739
`+
///
`
``
740
`+
/// Generally, a structure circularly referencing itself, either directly or
`
``
741
`+
/// indirectly, should not hold a strong reference to itself to prevent a memory leak.
`
``
742
`+
/// Using this function, you get access to the weak pointer during the
`
``
743
`` +
/// initialization of T
, before the Arc<T, A>
is created, such that you can
``
``
744
`` +
/// clone and store it inside the T
.
``
``
745
`+
///
`
``
746
`` +
/// new_cyclic_in
first allocates the managed allocation for the Arc<T, A>
,
``
``
747
`` +
/// then calls your closure, giving it a Weak<T, A>
to this allocation,
``
``
748
`` +
/// and only afterwards completes the construction of the Arc<T, A>
by placing
``
``
749
`` +
/// the T
returned from your closure into the allocation.
``
``
750
`+
///
`
``
751
`` +
/// Since the new Arc<T, A>
is not fully-constructed until Arc<T, A>::new_cyclic_in
``
``
752
`` +
/// returns, calling [upgrade
] on the weak reference inside your closure will
``
``
753
`` +
/// fail and result in a None
value.
``
``
754
`+
///
`
``
755
`+
/// # Panics
`
``
756
`+
///
`
``
757
`` +
/// If data_fn
panics, the panic is propagated to the caller, and the
``
``
758
`` +
/// temporary [Weak<T>
] is dropped normally.
``
``
759
`+
///
`
``
760
`+
/// # Example
`
``
761
`+
///
`
``
762
`` +
/// See [new_cyclic
]
``
``
763
`+
///
`
``
764
`` +
/// [new_cyclic
]: Arc::new_cyclic
``
``
765
`` +
/// [upgrade
]: Weak::upgrade
``
``
766
`+
#[cfg(not(no_global_oom_handling))]
`
``
767
`+
#[inline]
`
``
768
`+
#[unstable(feature = "allocator_api", issue = "32838")]
`
``
769
`+
pub fn new_cyclic_in(data_fn: F, alloc: A) -> Arc<T, A>
`
``
770
`+
where
`
``
771
`+
F: FnOnce(&Weak<T, A>) -> T,
`
``
772
`+
{
`
``
773
`+
// Construct the inner in the "uninitialized" state with a single
`
``
774
`+
// weak reference.
`
``
775
`+
let (uninit_raw_ptr, alloc) = Box::into_raw_with_allocator(Box::new_in(
`
``
776
`+
ArcInner {
`
``
777
`+
strong: atomic::AtomicUsize::new(0),
`
``
778
`+
weak: atomic::AtomicUsize::new(1),
`
``
779
`+
data: mem::MaybeUninit::::uninit(),
`
``
780
`+
},
`
``
781
`+
alloc,
`
``
782
`+
));
`
``
783
`+
let uninit_ptr: NonNull<_> = (unsafe { &mut *uninit_raw_ptr }).into();
`
``
784
`+
let init_ptr: NonNull<ArcInner> = uninit_ptr.cast();
`
``
785
+
``
786
`+
let weak = Weak { ptr: init_ptr, alloc: alloc };
`
``
787
+
``
788
`+
// It's important we don't give up ownership of the weak pointer, or
`
``
789
`` +
// else the memory might be freed by the time data_fn
returns. If
``
``
790
`+
// we really wanted to pass ownership, we could create an additional
`
``
791
`+
// weak pointer for ourselves, but this would result in additional
`
``
792
`+
// updates to the weak reference count which might not be necessary
`
``
793
`+
// otherwise.
`
``
794
`+
let data = data_fn(&weak);
`
``
795
+
``
796
`+
// Now we can properly initialize the inner value and turn our weak
`
``
797
`+
// reference into a strong reference.
`
``
798
`+
let strong = unsafe {
`
``
799
`+
let inner = init_ptr.as_ptr();
`
``
800
`+
ptr::write(ptr::addr_of_mut!((*inner).data), data);
`
``
801
+
``
802
`+
// The above write to the data field must be visible to any threads which
`
``
803
`+
// observe a non-zero strong count. Therefore we need at least "Release" ordering
`
``
804
`` +
// in order to synchronize with the compare_exchange_weak
in Weak::upgrade
.
``
``
805
`+
//
`
``
806
`+
// "Acquire" ordering is not required. When considering the possible behaviours
`
``
807
`` +
// of data_fn
we only need to look at what it could do with a reference to a
``
``
808
`` +
// non-upgradeable Weak
:
``
``
809
`` +
// - It can clone the Weak
, increasing the weak reference count.
``
``
810
`+
// - It can drop those clones, decreasing the weak reference count (but never to zero).
`
``
811
`+
//
`
``
812
`+
// These side effects do not impact us in any way, and no other side effects are
`
``
813
`+
// possible with safe code alone.
`
``
814
`+
let prev_value = (*inner).strong.fetch_add(1, Release);
`
``
815
`+
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
`
``
816
+
``
817
`+
// Strong references should collectively own a shared weak reference,
`
``
818
`+
// so don't run the destructor for our old weak reference.
`
``
819
`+
// Calling into_raw_with_allocator has the double effect of giving us back the allocator,
`
``
820
`+
// and forgetting the weak reference.
`
``
821
`+
let alloc = weak.into_raw_with_allocator().1;
`
``
822
+
``
823
`+
Arc::from_inner_in(init_ptr, alloc)
`
``
824
`+
};
`
``
825
+
``
826
`+
strong
`
``
827
`+
}
`
``
828
+
784
829
`` /// Constructs a new Pin<Arc<T, A>>
in the provided allocator. If T
does not implement Unpin
,
``
785
830
`` /// then data
will be pinned in memory and unable to be moved.
``
786
831
`#[cfg(not(no_global_oom_handling))]
`