libstdc++: shared_ptr_atomic.h Source File (original) (raw)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#ifndef _SHARED_PTR_ATOMIC_H
31#define _SHARED_PTR_ATOMIC_H 1
32
34#include <bits/shared_ptr.h>
35
36
37#if defined _GLIBCXX_TSAN && __has_include(<sanitizer/tsan_interface.h>)
38#include <sanitizer/tsan_interface.h>
39#define _GLIBCXX_TSAN_MUTEX_DESTROY(X) \
40 __tsan_mutex_destroy(X, __tsan_mutex_not_static)
41#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X) \
42 __tsan_mutex_pre_lock(X, __tsan_mutex_not_static|__tsan_mutex_try_lock)
43#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X) __tsan_mutex_post_lock(X, \
44 __tsan_mutex_not_static|__tsan_mutex_try_lock_failed, 0)
45#define _GLIBCXX_TSAN_MUTEX_LOCKED(X) \
46 __tsan_mutex_post_lock(X, __tsan_mutex_not_static, 0)
47#define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X) __tsan_mutex_pre_unlock(X, 0)
48#define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X) __tsan_mutex_post_unlock(X, 0)
49#define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X) __tsan_mutex_pre_signal(X, 0)
50#define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X) __tsan_mutex_post_signal(X, 0)
51#else
52#define _GLIBCXX_TSAN_MUTEX_DESTROY(X)
53#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK(X)
54#define _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(X)
55#define _GLIBCXX_TSAN_MUTEX_LOCKED(X)
56#define _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(X)
57#define _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(X)
58#define _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(X)
59#define _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(X)
60#endif
61
62namespace std _GLIBCXX_VISIBILITY(default)
63{
64_GLIBCXX_BEGIN_NAMESPACE_VERSION
65
66
67
68
69
70
71
72
73
74 struct _Sp_locker
75 {
76 _Sp_locker(const _Sp_locker&) = delete;
77 _Sp_locker& operator=(const _Sp_locker&) = delete;
78
79#ifdef __GTHREADS
80 explicit
81 _Sp_locker(const void*) noexcept;
82 _Sp_locker(const void*, const void*) noexcept;
83 ~_Sp_locker();
84
85 private:
86 unsigned char _M_key1;
87 unsigned char _M_key2;
88#else
89 explicit _Sp_locker(const void*, const void* = nullptr) { }
90#endif
91 };
92
93
94
95
96
97
98
99
100
101 template<typename _Tp, _Lock_policy _Lp>
102 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
103 inline bool
104 atomic_is_lock_free(const __shared_ptr<_Tp, _Lp>*)
105 {
106#ifdef __GTHREADS
107 return __gthread_active_p() == 0;
108#else
109 return true;
110#endif
111 }
112
113 template<typename _Tp>
114 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
115 inline bool
117 { return std::atomic_is_lock_free<_Tp, __default_lock_policy>(__p); }
118
119
120
121
122
123
124
125
126
127
128
129
130 template<typename _Tp>
131 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
132 inline shared_ptr<_Tp>
134 {
135 _Sp_locker __lock{__p};
136 return *__p;
137 }
138
139 template<typename _Tp>
140 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
141 inline shared_ptr<_Tp>
143 { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
144
145 template<typename _Tp, _Lock_policy _Lp>
146 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
147 inline __shared_ptr<_Tp, _Lp>
148 atomic_load_explicit(const __shared_ptr<_Tp, _Lp>* __p, memory_order)
149 {
150 _Sp_locker __lock{__p};
151 return *__p;
152 }
153
154 template<typename _Tp, _Lock_policy _Lp>
155 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
156 inline __shared_ptr<_Tp, _Lp>
157 atomic_load(const __shared_ptr<_Tp, _Lp>* __p)
158 { return std::atomic_load_explicit(__p, memory_order_seq_cst); }
159
160
161
162
163
164
165
166
167
168
169
170 template<typename _Tp>
171 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
172 inline void
175 {
176 _Sp_locker __lock{__p};
177 __p->swap(__r);
178 }
179
180 template<typename _Tp>
181 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
182 inline void
184 { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
185
186 template<typename _Tp, _Lock_policy _Lp>
187 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
188 inline void
189 atomic_store_explicit(__shared_ptr<_Tp, _Lp>* __p,
190 __shared_ptr<_Tp, _Lp> __r,
192 {
193 _Sp_locker __lock{__p};
194 __p->swap(__r);
195 }
196
197 template<typename _Tp, _Lock_policy _Lp>
198 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
199 inline void
200 atomic_store(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
201 { std::atomic_store_explicit(__p, std::move(__r), memory_order_seq_cst); }
202
203
204
205
206
207
208
209
210
211 template<typename _Tp>
212 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
213 inline shared_ptr<_Tp>
216 {
217 _Sp_locker __lock{__p};
218 __p->swap(__r);
219 return __r;
220 }
221
222 template<typename _Tp>
223 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
224 inline shared_ptr<_Tp>
226 {
227 return std::atomic_exchange_explicit(__p, std::move(__r),
228 memory_order_seq_cst);
229 }
230
231 template<typename _Tp, _Lock_policy _Lp>
232 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
233 inline __shared_ptr<_Tp, _Lp>
234 atomic_exchange_explicit(__shared_ptr<_Tp, _Lp>* __p,
235 __shared_ptr<_Tp, _Lp> __r,
237 {
238 _Sp_locker __lock{__p};
239 __p->swap(__r);
240 return __r;
241 }
242
243 template<typename _Tp, _Lock_policy _Lp>
244 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
245 inline __shared_ptr<_Tp, _Lp>
246 atomic_exchange(__shared_ptr<_Tp, _Lp>* __p, __shared_ptr<_Tp, _Lp> __r)
247 {
248 return std::atomic_exchange_explicit(__p, std::move(__r),
249 memory_order_seq_cst);
250 }
251
252
253
254
255
256
257
258
259
260
261
262
263
264 template<typename _Tp>
265 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
266 bool
267 atomic_compare_exchange_strong_explicit(shared_ptr<_Tp>* __p,
272 {
274 _Sp_locker __lock{__p, __v};
276 if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
277 {
280 return true;
281 }
283 *__v = *__p;
284 return false;
285 }
286
287 template<typename _Tp>
288 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
289 inline bool
292 {
293 return std::atomic_compare_exchange_strong_explicit(__p, __v,
294 std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
295 }
296
297 template<typename _Tp>
298 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
299 inline bool
300 atomic_compare_exchange_weak_explicit(shared_ptr<_Tp>* __p,
305 {
306 return std::atomic_compare_exchange_strong_explicit(__p, __v,
307 std::move(__w), __success, __failure);
308 }
309
310 template<typename _Tp>
311 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
312 inline bool
315 {
316 return std::atomic_compare_exchange_weak_explicit(__p, __v,
317 std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
318 }
319
320 template<typename _Tp, _Lock_policy _Lp>
321 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
322 bool
323 atomic_compare_exchange_strong_explicit(__shared_ptr<_Tp, _Lp>* __p,
324 __shared_ptr<_Tp, _Lp>* __v,
325 __shared_ptr<_Tp, _Lp> __w,
328 {
329 __shared_ptr<_Tp, _Lp> __x;
330 _Sp_locker __lock{__p, __v};
332 if (*__p == *__v && !__less(*__p, *__v) && !__less(*__v, *__p))
333 {
336 return true;
337 }
339 *__v = *__p;
340 return false;
341 }
342
343 template<typename _Tp, _Lock_policy _Lp>
344 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
345 inline bool
346 atomic_compare_exchange_strong(__shared_ptr<_Tp, _Lp>* __p,
347 __shared_ptr<_Tp, _Lp>* __v,
348 __shared_ptr<_Tp, _Lp> __w)
349 {
350 return std::atomic_compare_exchange_strong_explicit(__p, __v,
351 std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
352 }
353
354 template<typename _Tp, _Lock_policy _Lp>
355 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
356 inline bool
357 atomic_compare_exchange_weak_explicit(__shared_ptr<_Tp, _Lp>* __p,
358 __shared_ptr<_Tp, _Lp>* __v,
359 __shared_ptr<_Tp, _Lp> __w,
362 {
363 return std::atomic_compare_exchange_strong_explicit(__p, __v,
364 std::move(__w), __success, __failure);
365 }
366
367 template<typename _Tp, _Lock_policy _Lp>
368 _GLIBCXX20_DEPRECATED_SUGGEST("std::atomic<std::shared_ptr>")
369 inline bool
370 atomic_compare_exchange_weak(__shared_ptr<_Tp, _Lp>* __p,
371 __shared_ptr<_Tp, _Lp>* __v,
372 __shared_ptr<_Tp, _Lp> __w)
373 {
374 return std::atomic_compare_exchange_weak_explicit(__p, __v,
375 std::move(__w), memory_order_seq_cst, memory_order_seq_cst);
376 }
377
378
379
380
381#ifdef __glibcxx_atomic_shared_ptr
382 template<typename _Tp>
383 struct atomic;
384
385
386
387
388
389
390
391 template<typename _Tp>
392 class _Sp_atomic
393 {
394 using value_type = _Tp;
395
396 friend struct atomic<_Tp>;
397
398
399
400 struct _Atomic_count
401 {
402
403 using __count_type = decltype(_Tp::_M_refcount);
404 using uintptr_t = __UINTPTR_TYPE__;
405
406
407 using pointer = decltype(__count_type::_M_pi);
408
409
410 static_assert(alignof(remove_pointer_t) > 1);
411
412 constexpr _Atomic_count() noexcept = default;
413
414 explicit
415 _Atomic_count(__count_type&& __c) noexcept
416 : _M_val(reinterpret_cast<uintptr_t>(__c._M_pi))
417 {
418 __c._M_pi = nullptr;
419 }
420
421 ~_Atomic_count()
422 {
423 auto __val = _M_val.load(memory_order_relaxed);
424 _GLIBCXX_TSAN_MUTEX_DESTROY(&_M_val);
425 __glibcxx_assert(!(__val & _S_lock_bit));
426 if (auto __pi = reinterpret_cast<pointer>(__val))
427 {
428 if constexpr (__is_shared_ptr<_Tp>)
429 __pi->_M_release();
430 else
431 __pi->_M_weak_release();
432 }
433 }
434
435 _Atomic_count(const _Atomic_count&) = delete;
436 _Atomic_count& operator=(const _Atomic_count&) = delete;
437
438
439
440 pointer
442 {
443
444
445 auto __current = _M_val.load(memory_order_relaxed);
446 while (__current & _S_lock_bit)
447 {
448#if __glibcxx_atomic_wait
449 __detail::__thread_relax();
450#endif
451 __current = _M_val.load(memory_order_relaxed);
452 }
453
454 _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
455
456 while (!_M_val.compare_exchange_strong(__current,
457 __current | _S_lock_bit,
458 __o,
459 memory_order_relaxed))
460 {
461 _GLIBCXX_TSAN_MUTEX_TRY_LOCK_FAILED(&_M_val);
462#if __glibcxx_atomic_wait
463 __detail::__thread_relax();
464#endif
465 __current = __current & ~_S_lock_bit;
466 _GLIBCXX_TSAN_MUTEX_TRY_LOCK(&_M_val);
467 }
468 _GLIBCXX_TSAN_MUTEX_LOCKED(&_M_val);
469 return reinterpret_cast<pointer>(__current);
470 }
471
472
473 void
475 {
476 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
477 _M_val.fetch_sub(1, __o);
478 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
479 }
480
481
482
483 void
484 _M_swap_unlock(__count_type& __c, memory_order __o) noexcept
485 {
486 if (__o != memory_order_seq_cst)
487 __o = memory_order_release;
488 auto __x = reinterpret_cast<uintptr_t>(__c._M_pi);
489 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
490 __x = _M_val.exchange(__x, __o);
491 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
492 __c._M_pi = reinterpret_cast<pointer>(__x & ~_S_lock_bit);
493 }
494
495#if __glibcxx_atomic_wait
496
497 void
498 _M_wait_unlock(memory_order __o) const noexcept
499 {
500 _GLIBCXX_TSAN_MUTEX_PRE_UNLOCK(&_M_val);
501 auto __v = _M_val.fetch_sub(1, memory_order_relaxed);
502 _GLIBCXX_TSAN_MUTEX_POST_UNLOCK(&_M_val);
503 _M_val.wait(__v & ~_S_lock_bit, __o);
504 }
505
506 void
507 notify_one() noexcept
508 {
509 _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
510 _M_val.notify_one();
511 _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
512 }
513
514 void
515 notify_all() noexcept
516 {
517 _GLIBCXX_TSAN_MUTEX_PRE_SIGNAL(&_M_val);
518 _M_val.notify_all();
519 _GLIBCXX_TSAN_MUTEX_POST_SIGNAL(&_M_val);
520 }
521#endif
522
523 private:
524 mutable __atomic_base<uintptr_t> _M_val{0};
525 static constexpr uintptr_t _S_lock_bit{1};
526 };
527
528 typename _Tp::element_type* _M_ptr = nullptr;
529 _Atomic_count _M_refcount;
530
531 static typename _Atomic_count::pointer
532 _S_add_ref(typename _Atomic_count::pointer __p)
533 {
534 if (__p)
535 {
536 if constexpr (__is_shared_ptr<_Tp>)
537 __p->_M_add_ref_copy();
538 else
539 __p->_M_weak_add_ref();
540 }
541 return __p;
542 }
543
544 constexpr _Sp_atomic() noexcept = default;
545
546 explicit
547 _Sp_atomic(value_type __r) noexcept
548 : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
549 { }
550
551 ~_Sp_atomic() = default;
552
553 _Sp_atomic(const _Sp_atomic&) = delete;
554 void operator=(const _Sp_atomic&) = delete;
555
556 value_type
558 {
559 __glibcxx_assert(__o != memory_order_release
560 && __o != memory_order_acq_rel);
561
562
563 if (__o != memory_order_seq_cst)
564 __o = memory_order_acquire;
565
566 value_type __ret;
567 auto __pi = _M_refcount.lock(__o);
568 __ret._M_ptr = _M_ptr;
569 __ret._M_refcount._M_pi = _S_add_ref(__pi);
570 _M_refcount.unlock(memory_order_relaxed);
571 return __ret;
572 }
573
574 void
575 swap(value_type& __r, memory_order __o) noexcept
576 {
577 _M_refcount.lock(memory_order_acquire);
578 std::swap(_M_ptr, __r._M_ptr);
579 _M_refcount._M_swap_unlock(__r._M_refcount, __o);
580 }
581
582 bool
583 compare_exchange_strong(value_type& __expected, value_type __desired,
585 {
586 bool __result = true;
587 auto __pi = _M_refcount.lock(memory_order_acquire);
588 if (_M_ptr == __expected._M_ptr
589 && __pi == __expected._M_refcount._M_pi)
590 {
591 _M_ptr = __desired._M_ptr;
592 _M_refcount._M_swap_unlock(__desired._M_refcount, __o);
593 }
594 else
595 {
596 _Tp __sink = std::move(__expected);
597 __expected._M_ptr = _M_ptr;
598 __expected._M_refcount._M_pi = _S_add_ref(__pi);
599 _M_refcount.unlock(__o2);
600 __result = false;
601 }
602 return __result;
603 }
604
605#if __glibcxx_atomic_wait
606 void
607 wait(value_type __old, memory_order __o) const noexcept
608 {
609 auto __pi = _M_refcount.lock(memory_order_acquire);
610 if (_M_ptr == __old._M_ptr && __pi == __old._M_refcount._M_pi)
611 _M_refcount._M_wait_unlock(__o);
612 else
613 _M_refcount.unlock(memory_order_relaxed);
614 }
615
616 void
617 notify_one() noexcept
618 {
619 _M_refcount.notify_one();
620 }
621
622 void
623 notify_all() noexcept
624 {
625 _M_refcount.notify_all();
626 }
627#endif
628 };
629
630 template<typename _Tp>
631 struct atomic<shared_ptr<_Tp>>
632 {
633 public:
634 using value_type = shared_ptr<_Tp>;
635
636 static constexpr bool is_always_lock_free = false;
637
638 bool
639 is_lock_free() const noexcept
640 { return false; }
641
642 constexpr atomic() noexcept = default;
643
644
645
646 constexpr atomic(nullptr_t) noexcept : atomic() { }
647
648 atomic(shared_ptr<_Tp> __r) noexcept
650 { }
651
652 atomic(const atomic&) = delete;
653 void operator=(const atomic&) = delete;
654
655 shared_ptr<_Tp>
656 load(memory_order __o = memory_order_seq_cst) const noexcept
657 { return _M_impl.load(__o); }
658
659 operator shared_ptr<_Tp>() const noexcept
660 { return _M_impl.load(memory_order_seq_cst); }
661
662 void
663 store(shared_ptr<_Tp> __desired,
664 memory_order __o = memory_order_seq_cst) noexcept
665 { _M_impl.swap(__desired, __o); }
666
667 void
668 operator=(shared_ptr<_Tp> __desired) noexcept
669 { _M_impl.swap(__desired, memory_order_seq_cst); }
670
671
672
673 void
674 operator=(nullptr_t) noexcept
675 { store(nullptr); }
676
677 shared_ptr<_Tp>
678 exchange(shared_ptr<_Tp> __desired,
679 memory_order __o = memory_order_seq_cst) noexcept
680 {
681 _M_impl.swap(__desired, __o);
682 return __desired;
683 }
684
685 bool
686 compare_exchange_strong(shared_ptr<_Tp>& __expected,
687 shared_ptr<_Tp> __desired,
689 {
690 return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
691 }
692
693 bool
694 compare_exchange_strong(value_type& __expected, value_type __desired,
695 memory_order __o = memory_order_seq_cst) noexcept
696 {
698 switch (__o)
699 {
700 case memory_order_acq_rel:
701 __o2 = memory_order_acquire;
702 break;
703 case memory_order_release:
704 __o2 = memory_order_relaxed;
705 break;
706 default:
707 __o2 = __o;
708 }
709 return compare_exchange_strong(__expected, std::move(__desired),
710 __o, __o2);
711 }
712
713 bool
714 compare_exchange_weak(value_type& __expected, value_type __desired,
716 {
717 return compare_exchange_strong(__expected, std::move(__desired),
718 __o, __o2);
719 }
720
721 bool
722 compare_exchange_weak(value_type& __expected, value_type __desired,
723 memory_order __o = memory_order_seq_cst) noexcept
724 {
725 return compare_exchange_strong(__expected, std::move(__desired), __o);
726 }
727
728#if __glibcxx_atomic_wait
729 void
730 wait(value_type __old,
731 memory_order __o = memory_order_seq_cst) const noexcept
732 {
733 _M_impl.wait(std::move(__old), __o);
734 }
735
736 void
737 notify_one() noexcept
738 {
739 _M_impl.notify_one();
740 }
741
742 void
743 notify_all() noexcept
744 {
745 _M_impl.notify_all();
746 }
747#endif
748
749 private:
750 _Sp_atomic<shared_ptr<_Tp>> _M_impl;
751 };
752
753 template<typename _Tp>
754 struct atomic<weak_ptr<_Tp>>
755 {
756 public:
757 using value_type = weak_ptr<_Tp>;
758
759 static constexpr bool is_always_lock_free = false;
760
761 bool
762 is_lock_free() const noexcept
763 { return false; }
764
765 constexpr atomic() noexcept = default;
766
767 atomic(weak_ptr<_Tp> __r) noexcept
768 : _M_impl(move(__r))
769 { }
770
771 atomic(const atomic&) = delete;
772 void operator=(const atomic&) = delete;
773
774 weak_ptr<_Tp>
775 load(memory_order __o = memory_order_seq_cst) const noexcept
776 { return _M_impl.load(__o); }
777
778 operator weak_ptr<_Tp>() const noexcept
779 { return _M_impl.load(memory_order_seq_cst); }
780
781 void
782 store(weak_ptr<_Tp> __desired,
783 memory_order __o = memory_order_seq_cst) noexcept
784 { _M_impl.swap(__desired, __o); }
785
786 void
787 operator=(weak_ptr<_Tp> __desired) noexcept
788 { _M_impl.swap(__desired, memory_order_seq_cst); }
789
790 weak_ptr<_Tp>
791 exchange(weak_ptr<_Tp> __desired,
792 memory_order __o = memory_order_seq_cst) noexcept
793 {
794 _M_impl.swap(__desired, __o);
795 return __desired;
796 }
797
798 bool
799 compare_exchange_strong(weak_ptr<_Tp>& __expected,
800 weak_ptr<_Tp> __desired,
802 {
803 return _M_impl.compare_exchange_strong(__expected, __desired, __o, __o2);
804 }
805
806 bool
807 compare_exchange_strong(value_type& __expected, value_type __desired,
808 memory_order __o = memory_order_seq_cst) noexcept
809 {
811 switch (__o)
812 {
813 case memory_order_acq_rel:
814 __o2 = memory_order_acquire;
815 break;
816 case memory_order_release:
817 __o2 = memory_order_relaxed;
818 break;
819 default:
820 __o2 = __o;
821 }
822 return compare_exchange_strong(__expected, std::move(__desired),
823 __o, __o2);
824 }
825
826 bool
827 compare_exchange_weak(value_type& __expected, value_type __desired,
829 {
830 return compare_exchange_strong(__expected, std::move(__desired),
831 __o, __o2);
832 }
833
834 bool
835 compare_exchange_weak(value_type& __expected, value_type __desired,
836 memory_order __o = memory_order_seq_cst) noexcept
837 {
838 return compare_exchange_strong(__expected, std::move(__desired), __o);
839 }
840
841#if __glibcxx_atomic_wait
842 void
843 wait(value_type __old,
844 memory_order __o = memory_order_seq_cst) const noexcept
845 {
846 _M_impl.wait(std::move(__old), __o);
847 }
848
849 void
850 notify_one() noexcept
851 {
852 _M_impl.notify_one();
853 }
854
855 void
856 notify_all() noexcept
857 {
858 _M_impl.notify_all();
859 }
860#endif
861
862 private:
863 _Sp_atomic<weak_ptr<_Tp>> _M_impl;
864 };
865
866#endif
867
868_GLIBCXX_END_NAMESPACE_VERSION
869}
870
871#endif
constexpr std::remove_reference< _Tp >::type && move(_Tp &&__t) noexcept
Convert a value to an rvalue.
memory_order
Enumeration for memory_order.
void lock(_L1 &__l1, _L2 &__l2, _L3 &... __l3)
Generic lock.
ISO C++ entities toplevel namespace is std.
A smart pointer with reference-counted copy semantics.
Primary template owner_less.