30 #ifndef _GLIBCXX_ATOMIC_BASE_H 31 #define _GLIBCXX_ATOMIC_BASE_H 1 33 #pragma GCC system_header 40 #if __cplusplus > 201703L 44 #ifndef _GLIBCXX_ALWAYS_INLINE 45 #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__)) 48 namespace std _GLIBCXX_VISIBILITY(default)
50 _GLIBCXX_BEGIN_NAMESPACE_VERSION
60 #if __cplusplus > 201703L 71 inline constexpr
memory_order memory_order_relaxed = memory_order::relaxed;
72 inline constexpr
memory_order memory_order_consume = memory_order::consume;
73 inline constexpr
memory_order memory_order_acquire = memory_order::acquire;
74 inline constexpr
memory_order memory_order_release = memory_order::release;
75 inline constexpr
memory_order memory_order_acq_rel = memory_order::acq_rel;
76 inline constexpr
memory_order memory_order_seq_cst = memory_order::seq_cst;
89 enum __memory_order_modifier
91 __memory_order_mask = 0x0ffff,
92 __memory_order_modifier_mask = 0xffff0000,
93 __memory_order_hle_acquire = 0x10000,
94 __memory_order_hle_release = 0x20000
113 return __m == memory_order_acq_rel ? memory_order_acquire
114 : __m == memory_order_release ? memory_order_relaxed : __m;
120 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
121 | __memory_order_modifier(__m & __memory_order_modifier_mask));
124 _GLIBCXX_ALWAYS_INLINE
void 126 { __atomic_thread_fence(
int(__m)); }
128 _GLIBCXX_ALWAYS_INLINE
void 130 { __atomic_signal_fence(
int(__m)); }
133 template<
typename _Tp>
142 template<
typename _IntTp>
145 #if __cplusplus <= 201703L 146 # define _GLIBCXX20_INIT(I) 148 # define __cpp_lib_atomic_value_initialization 201911L 149 # define _GLIBCXX20_INIT(I) = I 152 #define ATOMIC_VAR_INIT(_VI) { _VI } 154 template<
typename _Tp>
157 template<
typename _Tp>
161 #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1 162 typedef bool __atomic_flag_data_type;
164 typedef unsigned char __atomic_flag_data_type;
177 _GLIBCXX_BEGIN_EXTERN_C
181 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
184 _GLIBCXX_END_EXTERN_C
186 #define ATOMIC_FLAG_INIT { 0 } 202 _GLIBCXX_ALWAYS_INLINE
bool 203 test_and_set(
memory_order __m = memory_order_seq_cst) noexcept
205 return __atomic_test_and_set (&_M_i,
int(__m));
208 _GLIBCXX_ALWAYS_INLINE
bool 209 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
211 return __atomic_test_and_set (&_M_i,
int(__m));
214 #if __cplusplus > 201703L 215 #define __cpp_lib_atomic_flag_test 201907L 217 _GLIBCXX_ALWAYS_INLINE
bool 218 test(
memory_order __m = memory_order_seq_cst)
const noexcept
220 __atomic_flag_data_type __v;
221 __atomic_load(&_M_i, &__v,
int(__m));
222 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
225 _GLIBCXX_ALWAYS_INLINE
bool 226 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
228 __atomic_flag_data_type __v;
229 __atomic_load(&_M_i, &__v,
int(__m));
230 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
233 #if __cpp_lib_atomic_wait 234 _GLIBCXX_ALWAYS_INLINE
void 238 const __atomic_flag_data_type __v
239 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
241 std::__atomic_wait_address_v(&_M_i, __v,
242 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
247 _GLIBCXX_ALWAYS_INLINE
void 248 notify_one()
const noexcept
249 { std::__atomic_notify_address(&_M_i,
false); }
253 _GLIBCXX_ALWAYS_INLINE
void 254 notify_all()
const noexcept
255 { std::__atomic_notify_address(&_M_i,
true); }
258 #endif // __cpp_lib_atomic_wait 261 _GLIBCXX_ALWAYS_INLINE
void 265 = __m & __memory_order_mask;
266 __glibcxx_assert(__b != memory_order_consume);
267 __glibcxx_assert(__b != memory_order_acquire);
268 __glibcxx_assert(__b != memory_order_acq_rel);
270 __atomic_clear (&_M_i,
int(__m));
273 _GLIBCXX_ALWAYS_INLINE
void 274 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
277 = __m & __memory_order_mask;
278 __glibcxx_assert(__b != memory_order_consume);
279 __glibcxx_assert(__b != memory_order_acquire);
280 __glibcxx_assert(__b != memory_order_acq_rel);
282 __atomic_clear (&_M_i,
int(__m));
286 static constexpr __atomic_flag_data_type
288 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
316 template<
typename _ITp>
319 using value_type = _ITp;
320 using difference_type = value_type;
323 typedef _ITp __int_type;
325 static constexpr
int _S_alignment =
326 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) :
alignof(_ITp);
328 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
338 constexpr
__atomic_base(__int_type __i) noexcept : _M_i (__i) { }
340 operator __int_type() const noexcept
343 operator __int_type() const volatile noexcept
347 operator=(__int_type __i) noexcept
354 operator=(__int_type __i)
volatile noexcept
361 operator++(
int) noexcept
362 {
return fetch_add(1); }
365 operator++(
int)
volatile noexcept
366 {
return fetch_add(1); }
369 operator--(
int) noexcept
370 {
return fetch_sub(1); }
373 operator--(
int)
volatile noexcept
374 {
return fetch_sub(1); }
377 operator++() noexcept
378 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
381 operator++() volatile noexcept
382 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
385 operator--() noexcept
386 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
389 operator--() volatile noexcept
390 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
393 operator+=(__int_type __i) noexcept
394 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
397 operator+=(__int_type __i)
volatile noexcept
398 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
401 operator-=(__int_type __i) noexcept
402 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
405 operator-=(__int_type __i)
volatile noexcept
406 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
409 operator&=(__int_type __i) noexcept
410 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
413 operator&=(__int_type __i)
volatile noexcept
414 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
417 operator|=(__int_type __i) noexcept
418 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
421 operator|=(__int_type __i)
volatile noexcept
422 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
425 operator^=(__int_type __i) noexcept
426 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
429 operator^=(__int_type __i)
volatile noexcept
430 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
433 is_lock_free() const noexcept
436 return __atomic_is_lock_free(
sizeof(_M_i),
437 reinterpret_cast<void *>(-_S_alignment));
441 is_lock_free() const volatile noexcept
444 return __atomic_is_lock_free(
sizeof(_M_i),
445 reinterpret_cast<void *>(-_S_alignment));
448 _GLIBCXX_ALWAYS_INLINE
void 449 store(__int_type __i,
memory_order __m = memory_order_seq_cst) noexcept
452 = __m & __memory_order_mask;
453 __glibcxx_assert(__b != memory_order_acquire);
454 __glibcxx_assert(__b != memory_order_acq_rel);
455 __glibcxx_assert(__b != memory_order_consume);
457 __atomic_store_n(&_M_i, __i,
int(__m));
460 _GLIBCXX_ALWAYS_INLINE
void 461 store(__int_type __i,
462 memory_order __m = memory_order_seq_cst)
volatile noexcept
465 = __m & __memory_order_mask;
466 __glibcxx_assert(__b != memory_order_acquire);
467 __glibcxx_assert(__b != memory_order_acq_rel);
468 __glibcxx_assert(__b != memory_order_consume);
470 __atomic_store_n(&_M_i, __i,
int(__m));
473 _GLIBCXX_ALWAYS_INLINE __int_type
474 load(
memory_order __m = memory_order_seq_cst)
const noexcept
477 = __m & __memory_order_mask;
478 __glibcxx_assert(__b != memory_order_release);
479 __glibcxx_assert(__b != memory_order_acq_rel);
481 return __atomic_load_n(&_M_i,
int(__m));
484 _GLIBCXX_ALWAYS_INLINE __int_type
485 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
488 = __m & __memory_order_mask;
489 __glibcxx_assert(__b != memory_order_release);
490 __glibcxx_assert(__b != memory_order_acq_rel);
492 return __atomic_load_n(&_M_i,
int(__m));
495 _GLIBCXX_ALWAYS_INLINE __int_type
496 exchange(__int_type __i,
499 return __atomic_exchange_n(&_M_i, __i,
int(__m));
503 _GLIBCXX_ALWAYS_INLINE __int_type
504 exchange(__int_type __i,
505 memory_order __m = memory_order_seq_cst)
volatile noexcept
507 return __atomic_exchange_n(&_M_i, __i,
int(__m));
510 _GLIBCXX_ALWAYS_INLINE
bool 511 compare_exchange_weak(__int_type& __i1, __int_type __i2,
515 = __m2 & __memory_order_mask;
517 = __m1 & __memory_order_mask;
518 __glibcxx_assert(__b2 != memory_order_release);
519 __glibcxx_assert(__b2 != memory_order_acq_rel);
520 __glibcxx_assert(__b2 <= __b1);
522 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
523 int(__m1),
int(__m2));
526 _GLIBCXX_ALWAYS_INLINE
bool 527 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 = __m2 & __memory_order_mask;
534 = __m1 & __memory_order_mask;
535 __glibcxx_assert(__b2 != memory_order_release);
536 __glibcxx_assert(__b2 != memory_order_acq_rel);
537 __glibcxx_assert(__b2 <= __b1);
539 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
540 int(__m1),
int(__m2));
543 _GLIBCXX_ALWAYS_INLINE
bool 544 compare_exchange_weak(__int_type& __i1, __int_type __i2,
547 return compare_exchange_weak(__i1, __i2, __m,
548 __cmpexch_failure_order(__m));
551 _GLIBCXX_ALWAYS_INLINE
bool 552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst)
volatile noexcept
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
559 _GLIBCXX_ALWAYS_INLINE
bool 560 compare_exchange_strong(__int_type& __i1, __int_type __i2,
564 = __m2 & __memory_order_mask;
566 = __m1 & __memory_order_mask;
567 __glibcxx_assert(__b2 != memory_order_release);
568 __glibcxx_assert(__b2 != memory_order_acq_rel);
569 __glibcxx_assert(__b2 <= __b1);
571 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
572 int(__m1),
int(__m2));
575 _GLIBCXX_ALWAYS_INLINE
bool 576 compare_exchange_strong(__int_type& __i1, __int_type __i2,
581 = __m2 & __memory_order_mask;
583 = __m1 & __memory_order_mask;
585 __glibcxx_assert(__b2 != memory_order_release);
586 __glibcxx_assert(__b2 != memory_order_acq_rel);
587 __glibcxx_assert(__b2 <= __b1);
589 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
590 int(__m1),
int(__m2));
593 _GLIBCXX_ALWAYS_INLINE
bool 594 compare_exchange_strong(__int_type& __i1, __int_type __i2,
597 return compare_exchange_strong(__i1, __i2, __m,
598 __cmpexch_failure_order(__m));
601 _GLIBCXX_ALWAYS_INLINE
bool 602 compare_exchange_strong(__int_type& __i1, __int_type __i2,
603 memory_order __m = memory_order_seq_cst)
volatile noexcept
605 return compare_exchange_strong(__i1, __i2, __m,
606 __cmpexch_failure_order(__m));
609 #if __cpp_lib_atomic_wait 610 _GLIBCXX_ALWAYS_INLINE
void 611 wait(__int_type __old,
614 std::__atomic_wait_address_v(&_M_i, __old,
615 [__m,
this] {
return this->load(__m); });
620 _GLIBCXX_ALWAYS_INLINE
void 621 notify_one() const noexcept
622 { std::__atomic_notify_address(&_M_i,
false); }
626 _GLIBCXX_ALWAYS_INLINE
void 627 notify_all() const noexcept
628 { std::__atomic_notify_address(&_M_i,
true); }
631 #endif // __cpp_lib_atomic_wait 633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
636 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_add(__int_type __i,
640 memory_order __m = memory_order_seq_cst)
volatile noexcept
641 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
646 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_sub(__int_type __i,
650 memory_order __m = memory_order_seq_cst)
volatile noexcept
651 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
656 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_and(__int_type __i,
660 memory_order __m = memory_order_seq_cst)
volatile noexcept
661 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
666 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_or(__int_type __i,
670 memory_order __m = memory_order_seq_cst)
volatile noexcept
671 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
676 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
678 _GLIBCXX_ALWAYS_INLINE __int_type
679 fetch_xor(__int_type __i,
680 memory_order __m = memory_order_seq_cst)
volatile noexcept
681 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
686 template<
typename _PTp>
690 typedef _PTp* __pointer_type;
692 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
696 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
699 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
709 constexpr
__atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
711 operator __pointer_type()
const noexcept
714 operator __pointer_type()
const volatile noexcept
718 operator=(__pointer_type __p) noexcept
725 operator=(__pointer_type __p)
volatile noexcept
732 operator++(
int) noexcept
733 {
return fetch_add(1); }
736 operator++(
int)
volatile noexcept
737 {
return fetch_add(1); }
740 operator--(
int) noexcept
741 {
return fetch_sub(1); }
744 operator--(
int)
volatile noexcept
745 {
return fetch_sub(1); }
748 operator++() noexcept
749 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
750 int(memory_order_seq_cst)); }
753 operator++()
volatile noexcept
754 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
755 int(memory_order_seq_cst)); }
758 operator--() noexcept
759 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
760 int(memory_order_seq_cst)); }
763 operator--()
volatile noexcept
764 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
765 int(memory_order_seq_cst)); }
768 operator+=(ptrdiff_t __d) noexcept
769 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
770 int(memory_order_seq_cst)); }
773 operator+=(ptrdiff_t __d)
volatile noexcept
774 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
775 int(memory_order_seq_cst)); }
778 operator-=(ptrdiff_t __d) noexcept
779 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
780 int(memory_order_seq_cst)); }
783 operator-=(ptrdiff_t __d)
volatile noexcept
784 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
785 int(memory_order_seq_cst)); }
788 is_lock_free()
const noexcept
791 return __atomic_is_lock_free(
sizeof(_M_p),
792 reinterpret_cast<void *>(-__alignof(_M_p)));
796 is_lock_free()
const volatile noexcept
799 return __atomic_is_lock_free(
sizeof(_M_p),
800 reinterpret_cast<void *>(-__alignof(_M_p)));
803 _GLIBCXX_ALWAYS_INLINE
void 804 store(__pointer_type __p,
808 = __m & __memory_order_mask;
810 __glibcxx_assert(__b != memory_order_acquire);
811 __glibcxx_assert(__b != memory_order_acq_rel);
812 __glibcxx_assert(__b != memory_order_consume);
814 __atomic_store_n(&_M_p, __p,
int(__m));
817 _GLIBCXX_ALWAYS_INLINE
void 818 store(__pointer_type __p,
819 memory_order __m = memory_order_seq_cst)
volatile noexcept
822 = __m & __memory_order_mask;
823 __glibcxx_assert(__b != memory_order_acquire);
824 __glibcxx_assert(__b != memory_order_acq_rel);
825 __glibcxx_assert(__b != memory_order_consume);
827 __atomic_store_n(&_M_p, __p,
int(__m));
830 _GLIBCXX_ALWAYS_INLINE __pointer_type
831 load(
memory_order __m = memory_order_seq_cst)
const noexcept
834 = __m & __memory_order_mask;
835 __glibcxx_assert(__b != memory_order_release);
836 __glibcxx_assert(__b != memory_order_acq_rel);
838 return __atomic_load_n(&_M_p,
int(__m));
841 _GLIBCXX_ALWAYS_INLINE __pointer_type
842 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
845 = __m & __memory_order_mask;
846 __glibcxx_assert(__b != memory_order_release);
847 __glibcxx_assert(__b != memory_order_acq_rel);
849 return __atomic_load_n(&_M_p,
int(__m));
852 _GLIBCXX_ALWAYS_INLINE __pointer_type
856 return __atomic_exchange_n(&_M_p, __p,
int(__m));
860 _GLIBCXX_ALWAYS_INLINE __pointer_type
862 memory_order __m = memory_order_seq_cst)
volatile noexcept
864 return __atomic_exchange_n(&_M_p, __p,
int(__m));
867 _GLIBCXX_ALWAYS_INLINE
bool 868 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
873 = __m2 & __memory_order_mask;
875 = __m1 & __memory_order_mask;
876 __glibcxx_assert(__b2 != memory_order_release);
877 __glibcxx_assert(__b2 != memory_order_acq_rel);
878 __glibcxx_assert(__b2 <= __b1);
880 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
881 int(__m1),
int(__m2));
884 _GLIBCXX_ALWAYS_INLINE
bool 885 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
890 = __m2 & __memory_order_mask;
892 = __m1 & __memory_order_mask;
894 __glibcxx_assert(__b2 != memory_order_release);
895 __glibcxx_assert(__b2 != memory_order_acq_rel);
896 __glibcxx_assert(__b2 <= __b1);
898 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
899 int(__m1),
int(__m2));
902 #if __cpp_lib_atomic_wait 903 _GLIBCXX_ALWAYS_INLINE
void 904 wait(__pointer_type __old,
907 std::__atomic_wait_address_v(&_M_p, __old,
909 {
return this->load(__m); });
914 _GLIBCXX_ALWAYS_INLINE
void 915 notify_one()
const noexcept
916 { std::__atomic_notify_address(&_M_p,
false); }
920 _GLIBCXX_ALWAYS_INLINE
void 921 notify_all()
const noexcept
922 { std::__atomic_notify_address(&_M_p,
true); }
925 #endif // __cpp_lib_atomic_wait 927 _GLIBCXX_ALWAYS_INLINE __pointer_type
928 fetch_add(ptrdiff_t __d,
930 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
932 _GLIBCXX_ALWAYS_INLINE __pointer_type
933 fetch_add(ptrdiff_t __d,
934 memory_order __m = memory_order_seq_cst)
volatile noexcept
935 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
937 _GLIBCXX_ALWAYS_INLINE __pointer_type
938 fetch_sub(ptrdiff_t __d,
940 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
942 _GLIBCXX_ALWAYS_INLINE __pointer_type
943 fetch_sub(ptrdiff_t __d,
944 memory_order __m = memory_order_seq_cst)
volatile noexcept
945 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
948 #if __cplusplus > 201703L 950 namespace __atomic_impl
953 template<
typename _Tp>
957 template<
typename _Tp>
960 template<
size_t _Size,
size_t _Align>
961 _GLIBCXX_ALWAYS_INLINE
bool 962 is_lock_free() noexcept
965 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
968 template<
typename _Tp>
969 _GLIBCXX_ALWAYS_INLINE
void 970 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m) noexcept
973 template<
typename _Tp>
974 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
977 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
978 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
979 __atomic_load(__ptr, __dest,
int(__m));
983 template<
typename _Tp>
984 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
987 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
988 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
993 template<
typename _Tp>
994 _GLIBCXX_ALWAYS_INLINE
bool 995 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1001 int(__success),
int(__failure));
1004 template<
typename _Tp>
1005 _GLIBCXX_ALWAYS_INLINE
bool 1006 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1012 int(__success),
int(__failure));
1015 #if __cpp_lib_atomic_wait 1016 template<
typename _Tp>
1017 _GLIBCXX_ALWAYS_INLINE
void 1018 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1021 std::__atomic_wait_address_v(__ptr, __old,
1022 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1027 template<
typename _Tp>
1028 _GLIBCXX_ALWAYS_INLINE
void 1029 notify_one(
const _Tp* __ptr) noexcept
1030 { std::__atomic_notify_address(__ptr,
false); }
1034 template<
typename _Tp>
1035 _GLIBCXX_ALWAYS_INLINE
void 1036 notify_all(
const _Tp* __ptr) noexcept
1037 { std::__atomic_notify_address(__ptr,
true); }
1040 #endif // __cpp_lib_atomic_wait 1042 template<
typename _Tp>
1043 _GLIBCXX_ALWAYS_INLINE _Tp
1044 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
1045 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1047 template<
typename _Tp>
1048 _GLIBCXX_ALWAYS_INLINE _Tp
1049 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m) noexcept
1050 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1052 template<
typename _Tp>
1053 _GLIBCXX_ALWAYS_INLINE _Tp
1054 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1055 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1057 template<
typename _Tp>
1058 _GLIBCXX_ALWAYS_INLINE _Tp
1059 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1060 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1062 template<
typename _Tp>
1063 _GLIBCXX_ALWAYS_INLINE _Tp
1064 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1065 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1067 template<
typename _Tp>
1068 _GLIBCXX_ALWAYS_INLINE _Tp
1069 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1070 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1072 template<
typename _Tp>
1073 _GLIBCXX_ALWAYS_INLINE _Tp
1074 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1075 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1077 template<
typename _Tp>
1078 _GLIBCXX_ALWAYS_INLINE _Tp
1079 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1080 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1082 template<
typename _Tp>
1083 _GLIBCXX_ALWAYS_INLINE _Tp
1084 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1085 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1087 template<
typename _Tp>
1088 _GLIBCXX_ALWAYS_INLINE _Tp
1089 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1090 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1092 template<
typename _Tp>
1094 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1096 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1097 _Val<_Tp> __newval = __oldval + __i;
1098 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1099 memory_order_relaxed))
1100 __newval = __oldval + __i;
1104 template<
typename _Tp>
1106 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m) noexcept
1108 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1109 _Val<_Tp> __newval = __oldval - __i;
1110 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1111 memory_order_relaxed))
1112 __newval = __oldval - __i;
1116 template<
typename _Tp>
1118 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1120 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1121 _Val<_Tp> __newval = __oldval + __i;
1122 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1123 memory_order_seq_cst,
1124 memory_order_relaxed))
1125 __newval = __oldval + __i;
1129 template<
typename _Tp>
1131 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1133 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1134 _Val<_Tp> __newval = __oldval - __i;
1135 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1136 memory_order_seq_cst,
1137 memory_order_relaxed))
1138 __newval = __oldval - __i;
1144 template<
typename _Fp>
1145 struct __atomic_float
1147 static_assert(is_floating_point_v<_Fp>);
1149 static constexpr
size_t _S_alignment = __alignof__(_Fp);
1152 using value_type = _Fp;
1153 using difference_type = value_type;
1155 static constexpr
bool is_always_lock_free
1156 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1158 __atomic_float() =
default;
1161 __atomic_float(_Fp __t) : _M_fp(__t)
1164 __atomic_float(
const __atomic_float&) =
delete;
1165 __atomic_float& operator=(
const __atomic_float&) =
delete;
1166 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1169 operator=(_Fp __t)
volatile noexcept
1176 operator=(_Fp __t) noexcept
1183 is_lock_free() const volatile noexcept
1184 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1187 is_lock_free() const noexcept
1188 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1191 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
volatile noexcept
1192 { __atomic_impl::store(&_M_fp, __t, __m); }
1195 store(_Fp __t,
memory_order __m = memory_order_seq_cst) noexcept
1196 { __atomic_impl::store(&_M_fp, __t, __m); }
1199 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
1200 {
return __atomic_impl::load(&_M_fp, __m); }
1203 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1204 {
return __atomic_impl::load(&_M_fp, __m); }
1206 operator _Fp() const volatile noexcept {
return this->load(); }
1207 operator _Fp() const noexcept {
return this->load(); }
1211 memory_order __m = memory_order_seq_cst)
volatile noexcept
1220 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1224 return __atomic_impl::compare_exchange_weak(&_M_fp,
1225 __expected, __desired,
1226 __success, __failure);
1230 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1234 return __atomic_impl::compare_exchange_weak(&_M_fp,
1235 __expected, __desired,
1236 __success, __failure);
1240 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1244 return __atomic_impl::compare_exchange_strong(&_M_fp,
1245 __expected, __desired,
1246 __success, __failure);
1250 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1254 return __atomic_impl::compare_exchange_strong(&_M_fp,
1255 __expected, __desired,
1256 __success, __failure);
1260 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1264 return compare_exchange_weak(__expected, __desired, __order,
1265 __cmpexch_failure_order(__order));
1269 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1273 return compare_exchange_weak(__expected, __desired, __order,
1274 __cmpexch_failure_order(__order));
1278 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1282 return compare_exchange_strong(__expected, __desired, __order,
1283 __cmpexch_failure_order(__order));
1287 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1291 return compare_exchange_strong(__expected, __desired, __order,
1292 __cmpexch_failure_order(__order));
1295 #if __cpp_lib_atomic_wait 1296 _GLIBCXX_ALWAYS_INLINE
void 1297 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1298 { __atomic_impl::wait(&_M_fp, __old, __m); }
1302 _GLIBCXX_ALWAYS_INLINE
void 1303 notify_one() const noexcept
1304 { __atomic_impl::notify_one(&_M_fp); }
1308 _GLIBCXX_ALWAYS_INLINE
void 1309 notify_all() const noexcept
1310 { __atomic_impl::notify_all(&_M_fp); }
1313 #endif // __cpp_lib_atomic_wait 1316 fetch_add(value_type __i,
1318 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1321 fetch_add(value_type __i,
1322 memory_order __m = memory_order_seq_cst)
volatile noexcept
1323 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1326 fetch_sub(value_type __i,
1328 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1331 fetch_sub(value_type __i,
1332 memory_order __m = memory_order_seq_cst)
volatile noexcept
1333 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1336 operator+=(value_type __i) noexcept
1337 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1340 operator+=(value_type __i)
volatile noexcept
1341 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1344 operator-=(value_type __i) noexcept
1345 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1348 operator-=(value_type __i)
volatile noexcept
1349 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1352 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1354 #undef _GLIBCXX20_INIT 1356 template<
typename _Tp,
1357 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1358 struct __atomic_ref;
1361 template<
typename _Tp>
1362 struct __atomic_ref<_Tp, false, false>
1364 static_assert(is_trivially_copyable_v<_Tp>);
1367 static constexpr
int _S_min_alignment
1368 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1372 using value_type = _Tp;
1374 static constexpr
bool is_always_lock_free
1375 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1377 static constexpr
size_t required_alignment
1378 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment :
alignof(_Tp);
1380 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1384 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1386 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1389 operator=(_Tp __t)
const noexcept
1395 operator _Tp() const noexcept {
return this->load(); }
1398 is_lock_free() const noexcept
1399 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1402 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1403 { __atomic_impl::store(_M_ptr, __t, __m); }
1406 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1407 {
return __atomic_impl::load(_M_ptr, __m); }
1415 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1419 return __atomic_impl::compare_exchange_weak(_M_ptr,
1420 __expected, __desired,
1421 __success, __failure);
1425 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1429 return __atomic_impl::compare_exchange_strong(_M_ptr,
1430 __expected, __desired,
1431 __success, __failure);
1435 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1439 return compare_exchange_weak(__expected, __desired, __order,
1440 __cmpexch_failure_order(__order));
1444 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1448 return compare_exchange_strong(__expected, __desired, __order,
1449 __cmpexch_failure_order(__order));
1452 #if __cpp_lib_atomic_wait 1453 _GLIBCXX_ALWAYS_INLINE
void 1454 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1455 { __atomic_impl::wait(_M_ptr, __old, __m); }
1459 _GLIBCXX_ALWAYS_INLINE
void 1460 notify_one() const noexcept
1461 { __atomic_impl::notify_one(_M_ptr); }
1465 _GLIBCXX_ALWAYS_INLINE
void 1466 notify_all() const noexcept
1467 { __atomic_impl::notify_all(_M_ptr); }
1470 #endif // __cpp_lib_atomic_wait 1477 template<
typename _Tp>
1478 struct __atomic_ref<_Tp, true, false>
1480 static_assert(is_integral_v<_Tp>);
1483 using value_type = _Tp;
1484 using difference_type = value_type;
1486 static constexpr
bool is_always_lock_free
1487 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1489 static constexpr
size_t required_alignment
1490 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) :
alignof(_Tp);
1492 __atomic_ref() =
delete;
1493 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1496 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1497 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1499 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1502 operator=(_Tp __t)
const noexcept
1508 operator _Tp() const noexcept {
return this->load(); }
1511 is_lock_free() const noexcept
1513 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1517 store(_Tp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1518 { __atomic_impl::store(_M_ptr, __t, __m); }
1521 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1522 {
return __atomic_impl::load(_M_ptr, __m); }
1526 memory_order __m = memory_order_seq_cst)
const noexcept
1530 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1534 return __atomic_impl::compare_exchange_weak(_M_ptr,
1535 __expected, __desired,
1536 __success, __failure);
1540 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1544 return __atomic_impl::compare_exchange_strong(_M_ptr,
1545 __expected, __desired,
1546 __success, __failure);
1550 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1554 return compare_exchange_weak(__expected, __desired, __order,
1555 __cmpexch_failure_order(__order));
1559 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1563 return compare_exchange_strong(__expected, __desired, __order,
1564 __cmpexch_failure_order(__order));
1567 #if __cpp_lib_atomic_wait 1568 _GLIBCXX_ALWAYS_INLINE
void 1569 wait(_Tp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1570 { __atomic_impl::wait(_M_ptr, __old, __m); }
1574 _GLIBCXX_ALWAYS_INLINE
void 1575 notify_one() const noexcept
1576 { __atomic_impl::notify_one(_M_ptr); }
1580 _GLIBCXX_ALWAYS_INLINE
void 1581 notify_all() const noexcept
1582 { __atomic_impl::notify_all(_M_ptr); }
1585 #endif // __cpp_lib_atomic_wait 1588 fetch_add(value_type __i,
1589 memory_order __m = memory_order_seq_cst)
const noexcept
1590 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1593 fetch_sub(value_type __i,
1594 memory_order __m = memory_order_seq_cst)
const noexcept
1595 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1598 fetch_and(value_type __i,
1599 memory_order __m = memory_order_seq_cst)
const noexcept
1600 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1603 fetch_or(value_type __i,
1604 memory_order __m = memory_order_seq_cst)
const noexcept
1605 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1608 fetch_xor(value_type __i,
1609 memory_order __m = memory_order_seq_cst)
const noexcept
1610 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1612 _GLIBCXX_ALWAYS_INLINE value_type
1613 operator++(
int)
const noexcept
1614 {
return fetch_add(1); }
1616 _GLIBCXX_ALWAYS_INLINE value_type
1617 operator--(
int)
const noexcept
1618 {
return fetch_sub(1); }
1621 operator++() const noexcept
1622 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1625 operator--() const noexcept
1626 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1629 operator+=(value_type __i)
const noexcept
1630 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1633 operator-=(value_type __i)
const noexcept
1634 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1637 operator&=(value_type __i)
const noexcept
1638 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1641 operator|=(value_type __i)
const noexcept
1642 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1645 operator^=(value_type __i)
const noexcept
1646 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1653 template<
typename _Fp>
1654 struct __atomic_ref<_Fp, false, true>
1656 static_assert(is_floating_point_v<_Fp>);
1659 using value_type = _Fp;
1660 using difference_type = value_type;
1662 static constexpr
bool is_always_lock_free
1663 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1665 static constexpr
size_t required_alignment = __alignof__(_Fp);
1667 __atomic_ref() =
delete;
1668 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1671 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1672 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1674 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1677 operator=(_Fp __t)
const noexcept
1683 operator _Fp() const noexcept {
return this->load(); }
1686 is_lock_free() const noexcept
1688 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1692 store(_Fp __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1693 { __atomic_impl::store(_M_ptr, __t, __m); }
1696 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1697 {
return __atomic_impl::load(_M_ptr, __m); }
1701 memory_order __m = memory_order_seq_cst)
const noexcept
1705 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1709 return __atomic_impl::compare_exchange_weak(_M_ptr,
1710 __expected, __desired,
1711 __success, __failure);
1715 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1719 return __atomic_impl::compare_exchange_strong(_M_ptr,
1720 __expected, __desired,
1721 __success, __failure);
1725 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1729 return compare_exchange_weak(__expected, __desired, __order,
1730 __cmpexch_failure_order(__order));
1734 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1738 return compare_exchange_strong(__expected, __desired, __order,
1739 __cmpexch_failure_order(__order));
1742 #if __cpp_lib_atomic_wait 1743 _GLIBCXX_ALWAYS_INLINE
void 1744 wait(_Fp __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1745 { __atomic_impl::wait(_M_ptr, __old, __m); }
1749 _GLIBCXX_ALWAYS_INLINE
void 1750 notify_one() const noexcept
1751 { __atomic_impl::notify_one(_M_ptr); }
1755 _GLIBCXX_ALWAYS_INLINE
void 1756 notify_all() const noexcept
1757 { __atomic_impl::notify_all(_M_ptr); }
1760 #endif // __cpp_lib_atomic_wait 1763 fetch_add(value_type __i,
1764 memory_order __m = memory_order_seq_cst)
const noexcept
1765 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1768 fetch_sub(value_type __i,
1769 memory_order __m = memory_order_seq_cst)
const noexcept
1770 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1773 operator+=(value_type __i)
const noexcept
1774 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1777 operator-=(value_type __i)
const noexcept
1778 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1785 template<
typename _Tp>
1786 struct __atomic_ref<_Tp*,
false,
false>
1789 using value_type = _Tp*;
1790 using difference_type = ptrdiff_t;
1792 static constexpr
bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1794 static constexpr
size_t required_alignment = __alignof__(_Tp*);
1796 __atomic_ref() =
delete;
1797 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1801 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1803 __atomic_ref(
const __atomic_ref&) noexcept =
default;
1806 operator=(_Tp* __t)
const noexcept
1812 operator _Tp*()
const noexcept {
return this->load(); }
1815 is_lock_free() const noexcept
1817 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1821 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1822 { __atomic_impl::store(_M_ptr, __t, __m); }
1825 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1826 {
return __atomic_impl::load(_M_ptr, __m); }
1830 memory_order __m = memory_order_seq_cst)
const noexcept
1834 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1838 return __atomic_impl::compare_exchange_weak(_M_ptr,
1839 __expected, __desired,
1840 __success, __failure);
1844 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1848 return __atomic_impl::compare_exchange_strong(_M_ptr,
1849 __expected, __desired,
1850 __success, __failure);
1854 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1858 return compare_exchange_weak(__expected, __desired, __order,
1859 __cmpexch_failure_order(__order));
1863 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1867 return compare_exchange_strong(__expected, __desired, __order,
1868 __cmpexch_failure_order(__order));
1871 #if __cpp_lib_atomic_wait 1872 _GLIBCXX_ALWAYS_INLINE
void 1873 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1874 { __atomic_impl::wait(_M_ptr, __old, __m); }
1878 _GLIBCXX_ALWAYS_INLINE
void 1879 notify_one() const noexcept
1880 { __atomic_impl::notify_one(_M_ptr); }
1884 _GLIBCXX_ALWAYS_INLINE
void 1885 notify_all() const noexcept
1886 { __atomic_impl::notify_all(_M_ptr); }
1889 #endif // __cpp_lib_atomic_wait 1891 _GLIBCXX_ALWAYS_INLINE value_type
1892 fetch_add(difference_type __d,
1893 memory_order __m = memory_order_seq_cst)
const noexcept
1894 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1896 _GLIBCXX_ALWAYS_INLINE value_type
1897 fetch_sub(difference_type __d,
1898 memory_order __m = memory_order_seq_cst)
const noexcept
1899 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1902 operator++(
int)
const noexcept
1903 {
return fetch_add(1); }
1906 operator--(
int)
const noexcept
1907 {
return fetch_sub(1); }
1910 operator++() const noexcept
1912 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1916 operator--() const noexcept
1918 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1922 operator+=(difference_type __d)
const noexcept
1924 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1928 operator-=(difference_type __d)
const noexcept
1930 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1934 static constexpr ptrdiff_t
1935 _S_type_size(ptrdiff_t __d) noexcept
1937 static_assert(is_object_v<_Tp>);
1938 return __d *
sizeof(_Tp);
1948 _GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val)
Assign __new_val to __obj and return its previous value.
typename remove_volatile< _Tp >::type remove_volatile_t
Alias template for remove_volatile.
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Base class for atomic integrals.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
Generic atomic type, primary class template.
bitset< _Nb > operator &(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
typename conditional< _Cond, _Iftrue, _Iffalse >::type conditional_t
Alias template for conditional.
memory_order
Enumeration for memory_order.
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
ISO C++ entities toplevel namespace is std.
Base type for atomic_flag.