30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
40#if __cplusplus > 201703L && _GLIBCXX_HOSTED
44#ifndef _GLIBCXX_ALWAYS_INLINE
45#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48namespace std _GLIBCXX_VISIBILITY(default)
50_GLIBCXX_BEGIN_NAMESPACE_VERSION
60#if __cplusplus > 201703L
71 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
72 inline constexpr memory_order memory_order_consume = memory_order::consume;
73 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
74 inline constexpr memory_order memory_order_release = memory_order::release;
75 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
76 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
90 enum __memory_order_modifier
92 __memory_order_mask = 0x0ffff,
93 __memory_order_modifier_mask = 0xffff0000,
94 __memory_order_hle_acquire = 0x10000,
95 __memory_order_hle_release = 0x20000
117 return __m == memory_order_acq_rel ? memory_order_acquire
118 : __m == memory_order_release ? memory_order_relaxed : __m;
124 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
125 | __memory_order_modifier(__m & __memory_order_modifier_mask));
129 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
131 return (__m & __memory_order_mask) != memory_order_release
132 && (__m & __memory_order_mask) != memory_order_acq_rel;
136 template<
typename _IntTp>
137 struct __atomic_base;
141 _GLIBCXX_ALWAYS_INLINE
void
143 { __atomic_thread_fence(
int(__m)); }
145 _GLIBCXX_ALWAYS_INLINE
void
147 { __atomic_signal_fence(
int(__m)); }
150 template<
typename _Tp>
158#if __cplusplus >= 202002L
159# define __cpp_lib_atomic_value_initialization 201911L
163#if __cpp_lib_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
166# define _GLIBCXX20_INIT(I)
170#define ATOMIC_VAR_INIT(_VI) { _VI }
172 template<
typename _Tp>
175 template<
typename _Tp>
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
182 typedef unsigned char __atomic_flag_data_type;
197 _GLIBCXX_BEGIN_EXTERN_C
199 struct __atomic_flag_base
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
204 _GLIBCXX_END_EXTERN_C
208#define ATOMIC_FLAG_INIT { 0 }
221 : __atomic_flag_base{ _S_init(__i) }
224 _GLIBCXX_ALWAYS_INLINE
bool
230 _GLIBCXX_ALWAYS_INLINE
bool
231 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
236#if __cplusplus > 201703L
237#define __cpp_lib_atomic_flag_test 201907L
239 _GLIBCXX_ALWAYS_INLINE
bool
247 _GLIBCXX_ALWAYS_INLINE
bool
255#if __cpp_lib_atomic_wait
256 _GLIBCXX_ALWAYS_INLINE
void
263 std::__atomic_wait_address_v(&_M_i,
__v,
269 _GLIBCXX_ALWAYS_INLINE
void
270 notify_one()
noexcept
271 { std::__atomic_notify_address(&_M_i,
false); }
275 _GLIBCXX_ALWAYS_INLINE
void
276 notify_all()
noexcept
277 { std::__atomic_notify_address(&_M_i,
true); }
283 _GLIBCXX_ALWAYS_INLINE
void
288 __glibcxx_assert(__b != memory_order_consume);
289 __glibcxx_assert(__b != memory_order_acquire);
290 __glibcxx_assert(__b != memory_order_acq_rel);
295 _GLIBCXX_ALWAYS_INLINE
void
300 __glibcxx_assert(__b != memory_order_consume);
301 __glibcxx_assert(__b != memory_order_acquire);
302 __glibcxx_assert(__b != memory_order_acq_rel);
339 template<
typename _ITp>
342 using value_type =
_ITp;
343 using difference_type = value_type;
346 typedef _ITp __int_type;
348 static constexpr int _S_alignment =
351 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
354 __atomic_base() noexcept =
default;
355 ~__atomic_base() noexcept =
default;
356 __atomic_base(const __atomic_base&) =
delete;
357 __atomic_base& operator=(const __atomic_base&) =
delete;
361 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
363 operator __int_type() const noexcept
366 operator __int_type() const volatile noexcept
370 operator=(__int_type __i)
noexcept
377 operator=(__int_type __i)
volatile noexcept
384 operator++(
int)
noexcept
385 {
return fetch_add(1); }
388 operator++(
int)
volatile noexcept
389 {
return fetch_add(1); }
392 operator--(
int)
noexcept
393 {
return fetch_sub(1); }
396 operator--(
int)
volatile noexcept
397 {
return fetch_sub(1); }
400 operator++() noexcept
401 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
404 operator++() volatile noexcept
405 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
408 operator--() noexcept
409 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
412 operator--() volatile noexcept
413 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
416 operator+=(__int_type __i)
noexcept
417 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
420 operator+=(__int_type __i)
volatile noexcept
421 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
424 operator-=(__int_type __i)
noexcept
425 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
428 operator-=(__int_type __i)
volatile noexcept
429 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
432 operator&=(__int_type __i)
noexcept
433 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
436 operator&=(__int_type __i)
volatile noexcept
437 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
440 operator|=(__int_type __i)
noexcept
441 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
444 operator|=(__int_type __i)
volatile noexcept
445 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
448 operator^=(__int_type __i)
noexcept
449 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
452 operator^=(__int_type __i)
volatile noexcept
453 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
456 is_lock_free() const noexcept
459 return __atomic_is_lock_free(
sizeof(_M_i),
460 reinterpret_cast<void *
>(-_S_alignment));
464 is_lock_free() const volatile noexcept
467 return __atomic_is_lock_free(
sizeof(_M_i),
468 reinterpret_cast<void *
>(-_S_alignment));
471 _GLIBCXX_ALWAYS_INLINE
void
472 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
475 = __m & __memory_order_mask;
476 __glibcxx_assert(__b != memory_order_acquire);
477 __glibcxx_assert(__b != memory_order_acq_rel);
478 __glibcxx_assert(__b != memory_order_consume);
480 __atomic_store_n(&_M_i, __i,
int(__m));
483 _GLIBCXX_ALWAYS_INLINE
void
484 store(__int_type __i,
485 memory_order __m = memory_order_seq_cst)
volatile noexcept
488 = __m & __memory_order_mask;
489 __glibcxx_assert(__b != memory_order_acquire);
490 __glibcxx_assert(__b != memory_order_acq_rel);
491 __glibcxx_assert(__b != memory_order_consume);
493 __atomic_store_n(&_M_i, __i,
int(__m));
496 _GLIBCXX_ALWAYS_INLINE __int_type
497 load(memory_order __m = memory_order_seq_cst)
const noexcept
500 = __m & __memory_order_mask;
501 __glibcxx_assert(__b != memory_order_release);
502 __glibcxx_assert(__b != memory_order_acq_rel);
504 return __atomic_load_n(&_M_i,
int(__m));
507 _GLIBCXX_ALWAYS_INLINE __int_type
508 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
511 = __m & __memory_order_mask;
512 __glibcxx_assert(__b != memory_order_release);
513 __glibcxx_assert(__b != memory_order_acq_rel);
515 return __atomic_load_n(&_M_i,
int(__m));
518 _GLIBCXX_ALWAYS_INLINE __int_type
520 memory_order __m = memory_order_seq_cst)
noexcept
522 return __atomic_exchange_n(&_M_i, __i,
int(__m));
526 _GLIBCXX_ALWAYS_INLINE __int_type
528 memory_order __m = memory_order_seq_cst)
volatile noexcept
530 return __atomic_exchange_n(&_M_i, __i,
int(__m));
533 _GLIBCXX_ALWAYS_INLINE
bool
534 compare_exchange_weak(__int_type& __i1, __int_type __i2,
535 memory_order __m1, memory_order __m2)
noexcept
537 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
539 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
540 int(__m1),
int(__m2));
543 _GLIBCXX_ALWAYS_INLINE
bool
544 compare_exchange_weak(__int_type& __i1, __int_type __i2,
546 memory_order __m2)
volatile noexcept
548 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
550 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
551 int(__m1),
int(__m2));
554 _GLIBCXX_ALWAYS_INLINE
bool
555 compare_exchange_weak(__int_type& __i1, __int_type __i2,
556 memory_order __m = memory_order_seq_cst)
noexcept
558 return compare_exchange_weak(__i1, __i2, __m,
559 __cmpexch_failure_order(__m));
562 _GLIBCXX_ALWAYS_INLINE
bool
563 compare_exchange_weak(__int_type& __i1, __int_type __i2,
564 memory_order __m = memory_order_seq_cst)
volatile noexcept
566 return compare_exchange_weak(__i1, __i2, __m,
567 __cmpexch_failure_order(__m));
570 _GLIBCXX_ALWAYS_INLINE
bool
571 compare_exchange_strong(__int_type& __i1, __int_type __i2,
572 memory_order __m1, memory_order __m2)
noexcept
574 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
576 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
577 int(__m1),
int(__m2));
580 _GLIBCXX_ALWAYS_INLINE
bool
581 compare_exchange_strong(__int_type& __i1, __int_type __i2,
583 memory_order __m2)
volatile noexcept
585 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
587 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
588 int(__m1),
int(__m2));
591 _GLIBCXX_ALWAYS_INLINE
bool
592 compare_exchange_strong(__int_type& __i1, __int_type __i2,
593 memory_order __m = memory_order_seq_cst)
noexcept
595 return compare_exchange_strong(__i1, __i2, __m,
596 __cmpexch_failure_order(__m));
599 _GLIBCXX_ALWAYS_INLINE
bool
600 compare_exchange_strong(__int_type& __i1, __int_type __i2,
601 memory_order __m = memory_order_seq_cst)
volatile noexcept
603 return compare_exchange_strong(__i1, __i2, __m,
604 __cmpexch_failure_order(__m));
607#if __cpp_lib_atomic_wait
608 _GLIBCXX_ALWAYS_INLINE
void
609 wait(__int_type __old,
610 memory_order __m = memory_order_seq_cst)
const noexcept
612 std::__atomic_wait_address_v(&_M_i, __old,
613 [__m,
this] {
return this->load(__m); });
618 _GLIBCXX_ALWAYS_INLINE
void
619 notify_one() noexcept
620 { std::__atomic_notify_address(&_M_i,
false); }
624 _GLIBCXX_ALWAYS_INLINE
void
625 notify_all() noexcept
626 { std::__atomic_notify_address(&_M_i,
true); }
631 _GLIBCXX_ALWAYS_INLINE __int_type
632 fetch_add(__int_type __i,
633 memory_order __m = memory_order_seq_cst)
noexcept
634 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
636 _GLIBCXX_ALWAYS_INLINE __int_type
637 fetch_add(__int_type __i,
638 memory_order __m = memory_order_seq_cst)
volatile noexcept
639 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
641 _GLIBCXX_ALWAYS_INLINE __int_type
642 fetch_sub(__int_type __i,
643 memory_order __m = memory_order_seq_cst)
noexcept
644 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
646 _GLIBCXX_ALWAYS_INLINE __int_type
647 fetch_sub(__int_type __i,
648 memory_order __m = memory_order_seq_cst)
volatile noexcept
649 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
651 _GLIBCXX_ALWAYS_INLINE __int_type
652 fetch_and(__int_type __i,
653 memory_order __m = memory_order_seq_cst)
noexcept
654 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
656 _GLIBCXX_ALWAYS_INLINE __int_type
657 fetch_and(__int_type __i,
658 memory_order __m = memory_order_seq_cst)
volatile noexcept
659 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
661 _GLIBCXX_ALWAYS_INLINE __int_type
662 fetch_or(__int_type __i,
663 memory_order __m = memory_order_seq_cst)
noexcept
664 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
666 _GLIBCXX_ALWAYS_INLINE __int_type
667 fetch_or(__int_type __i,
668 memory_order __m = memory_order_seq_cst)
volatile noexcept
669 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
671 _GLIBCXX_ALWAYS_INLINE __int_type
672 fetch_xor(__int_type __i,
673 memory_order __m = memory_order_seq_cst)
noexcept
674 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
676 _GLIBCXX_ALWAYS_INLINE __int_type
677 fetch_xor(__int_type __i,
678 memory_order __m = memory_order_seq_cst)
volatile noexcept
679 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
684 template<
typename _PTp>
685 struct __atomic_base<_PTp*>
688 typedef _PTp* __pointer_type;
690 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
694 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
697 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
700 __atomic_base() noexcept = default;
701 ~__atomic_base() noexcept = default;
702 __atomic_base(const __atomic_base&) = delete;
703 __atomic_base& operator=(const __atomic_base&) = delete;
704 __atomic_base& operator=(const __atomic_base&) volatile = delete;
707 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
709 operator __pointer_type() const noexcept
712 operator __pointer_type() const volatile noexcept
716 operator=(__pointer_type __p)
noexcept
723 operator=(__pointer_type __p)
volatile noexcept
730 operator++(
int)
noexcept
731 {
return fetch_add(1); }
734 operator++(
int)
volatile noexcept
735 {
return fetch_add(1); }
738 operator--(
int)
noexcept
739 {
return fetch_sub(1); }
742 operator--(
int)
volatile noexcept
743 {
return fetch_sub(1); }
746 operator++() noexcept
747 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
748 int(memory_order_seq_cst)); }
751 operator++() volatile noexcept
752 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
753 int(memory_order_seq_cst)); }
756 operator--() noexcept
757 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
758 int(memory_order_seq_cst)); }
761 operator--() volatile noexcept
762 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
763 int(memory_order_seq_cst)); }
766 operator+=(ptrdiff_t __d)
noexcept
767 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
768 int(memory_order_seq_cst)); }
771 operator+=(ptrdiff_t __d)
volatile noexcept
772 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
773 int(memory_order_seq_cst)); }
776 operator-=(ptrdiff_t __d)
noexcept
777 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
778 int(memory_order_seq_cst)); }
781 operator-=(ptrdiff_t __d)
volatile noexcept
782 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
783 int(memory_order_seq_cst)); }
786 is_lock_free() const noexcept
789 return __atomic_is_lock_free(
sizeof(_M_p),
790 reinterpret_cast<void *
>(-__alignof(_M_p)));
794 is_lock_free() const volatile noexcept
797 return __atomic_is_lock_free(
sizeof(_M_p),
798 reinterpret_cast<void *
>(-__alignof(_M_p)));
801 _GLIBCXX_ALWAYS_INLINE
void
802 store(__pointer_type __p,
806 = __m & __memory_order_mask;
808 __glibcxx_assert(__b != memory_order_acquire);
809 __glibcxx_assert(__b != memory_order_acq_rel);
810 __glibcxx_assert(__b != memory_order_consume);
812 __atomic_store_n(&_M_p, __p,
int(__m));
815 _GLIBCXX_ALWAYS_INLINE
void
816 store(__pointer_type __p,
817 memory_order __m = memory_order_seq_cst)
volatile noexcept
820 = __m & __memory_order_mask;
821 __glibcxx_assert(__b != memory_order_acquire);
822 __glibcxx_assert(__b != memory_order_acq_rel);
823 __glibcxx_assert(__b != memory_order_consume);
825 __atomic_store_n(&_M_p, __p,
int(__m));
828 _GLIBCXX_ALWAYS_INLINE __pointer_type
829 load(
memory_order __m = memory_order_seq_cst)
const noexcept
832 = __m & __memory_order_mask;
833 __glibcxx_assert(__b != memory_order_release);
834 __glibcxx_assert(__b != memory_order_acq_rel);
836 return __atomic_load_n(&_M_p,
int(__m));
839 _GLIBCXX_ALWAYS_INLINE __pointer_type
840 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
843 = __m & __memory_order_mask;
844 __glibcxx_assert(__b != memory_order_release);
845 __glibcxx_assert(__b != memory_order_acq_rel);
847 return __atomic_load_n(&_M_p,
int(__m));
850 _GLIBCXX_ALWAYS_INLINE __pointer_type
854 return __atomic_exchange_n(&_M_p, __p,
int(__m));
858 _GLIBCXX_ALWAYS_INLINE __pointer_type
860 memory_order __m = memory_order_seq_cst)
volatile noexcept
862 return __atomic_exchange_n(&_M_p, __p,
int(__m));
865 _GLIBCXX_ALWAYS_INLINE
bool
866 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
870 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
872 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
873 int(__m1),
int(__m2));
876 _GLIBCXX_ALWAYS_INLINE
bool
877 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
881 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
883 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
884 int(__m1),
int(__m2));
887 _GLIBCXX_ALWAYS_INLINE
bool
888 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
892 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
894 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
895 int(__m1),
int(__m2));
898 _GLIBCXX_ALWAYS_INLINE
bool
899 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
903 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
905 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
906 int(__m1),
int(__m2));
909#if __cpp_lib_atomic_wait
910 _GLIBCXX_ALWAYS_INLINE
void
911 wait(__pointer_type __old,
914 std::__atomic_wait_address_v(&_M_p, __old,
916 {
return this->load(__m); });
921 _GLIBCXX_ALWAYS_INLINE
void
922 notify_one() const noexcept
923 { std::__atomic_notify_address(&_M_p,
false); }
927 _GLIBCXX_ALWAYS_INLINE
void
928 notify_all() const noexcept
929 { std::__atomic_notify_address(&_M_p,
true); }
934 _GLIBCXX_ALWAYS_INLINE __pointer_type
935 fetch_add(ptrdiff_t __d,
937 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
939 _GLIBCXX_ALWAYS_INLINE __pointer_type
940 fetch_add(ptrdiff_t __d,
941 memory_order __m = memory_order_seq_cst)
volatile noexcept
942 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
944 _GLIBCXX_ALWAYS_INLINE __pointer_type
945 fetch_sub(ptrdiff_t __d,
947 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
949 _GLIBCXX_ALWAYS_INLINE __pointer_type
950 fetch_sub(ptrdiff_t __d,
951 memory_order __m = memory_order_seq_cst)
volatile noexcept
952 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
957#if __cplusplus > 201703L
961 namespace __atomic_impl
964 template<
typename _Tp>
965 using _Val = remove_volatile_t<_Tp>;
968 template<
typename _Tp>
969 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
971 template<
size_t _Size,
size_t _Align>
972 _GLIBCXX_ALWAYS_INLINE
bool
973 is_lock_free() noexcept
976 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
979 template<
typename _Tp>
980 _GLIBCXX_ALWAYS_INLINE
void
981 store(_Tp* __ptr, _Val<_Tp> __t,
memory_order __m)
noexcept
984 template<
typename _Tp>
985 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
988 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
989 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
990 __atomic_load(__ptr, __dest,
int(__m));
994 template<
typename _Tp>
995 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
998 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
999 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1004 template<
typename _Tp>
1005 _GLIBCXX_ALWAYS_INLINE
bool
1006 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1010 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
1014 int(__success),
int(__failure));
1017 template<
typename _Tp>
1018 _GLIBCXX_ALWAYS_INLINE
bool
1019 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1023 __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
1027 int(__success),
int(__failure));
1030#if __cpp_lib_atomic_wait
1031 template<
typename _Tp>
1032 _GLIBCXX_ALWAYS_INLINE
void
1033 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1036 std::__atomic_wait_address_v(__ptr, __old,
1037 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1042 template<
typename _Tp>
1043 _GLIBCXX_ALWAYS_INLINE
void
1044 notify_one(
const _Tp* __ptr)
noexcept
1045 { std::__atomic_notify_address(__ptr,
false); }
1049 template<
typename _Tp>
1050 _GLIBCXX_ALWAYS_INLINE
void
1051 notify_all(
const _Tp* __ptr)
noexcept
1052 { std::__atomic_notify_address(__ptr,
true); }
1057 template<
typename _Tp>
1058 _GLIBCXX_ALWAYS_INLINE _Tp
1059 fetch_add(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
1060 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1062 template<
typename _Tp>
1063 _GLIBCXX_ALWAYS_INLINE _Tp
1064 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i,
memory_order __m)
noexcept
1065 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1067 template<
typename _Tp>
1068 _GLIBCXX_ALWAYS_INLINE _Tp
1069 fetch_and(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1070 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1072 template<
typename _Tp>
1073 _GLIBCXX_ALWAYS_INLINE _Tp
1074 fetch_or(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1075 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1077 template<
typename _Tp>
1078 _GLIBCXX_ALWAYS_INLINE _Tp
1079 fetch_xor(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1080 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1082 template<
typename _Tp>
1083 _GLIBCXX_ALWAYS_INLINE _Tp
1084 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1085 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1087 template<
typename _Tp>
1088 _GLIBCXX_ALWAYS_INLINE _Tp
1089 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1090 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1092 template<
typename _Tp>
1093 _GLIBCXX_ALWAYS_INLINE _Tp
1094 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1095 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1097 template<
typename _Tp>
1098 _GLIBCXX_ALWAYS_INLINE _Tp
1099 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1100 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1102 template<
typename _Tp>
1103 _GLIBCXX_ALWAYS_INLINE _Tp
1104 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1105 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1107 template<
typename _Tp>
1109 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1111 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1112 _Val<_Tp> __newval = __oldval + __i;
1113 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1114 memory_order_relaxed))
1115 __newval = __oldval + __i;
1119 template<
typename _Tp>
1121 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i,
memory_order __m)
noexcept
1123 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1124 _Val<_Tp> __newval = __oldval - __i;
1125 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1126 memory_order_relaxed))
1127 __newval = __oldval - __i;
1131 template<
typename _Tp>
1133 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1135 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1136 _Val<_Tp> __newval = __oldval + __i;
1137 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1138 memory_order_seq_cst,
1139 memory_order_relaxed))
1140 __newval = __oldval + __i;
1144 template<
typename _Tp>
1146 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1148 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1149 _Val<_Tp> __newval = __oldval - __i;
1150 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1151 memory_order_seq_cst,
1152 memory_order_relaxed))
1153 __newval = __oldval - __i;
1159 template<
typename _Fp>
1160 struct __atomic_float
1162 static_assert(is_floating_point_v<_Fp>);
1164 static constexpr size_t _S_alignment = __alignof__(_Fp);
1167 using value_type = _Fp;
1168 using difference_type = value_type;
1170 static constexpr bool is_always_lock_free
1171 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1173 __atomic_float() =
default;
1176 __atomic_float(_Fp __t) : _M_fp(__t)
1179 __atomic_float(
const __atomic_float&) =
delete;
1180 __atomic_float& operator=(
const __atomic_float&) =
delete;
1181 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1184 operator=(_Fp __t)
volatile noexcept
1191 operator=(_Fp __t)
noexcept
1198 is_lock_free() const volatile noexcept
1199 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1202 is_lock_free() const noexcept
1203 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1206 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1207 { __atomic_impl::store(&_M_fp, __t, __m); }
1210 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1211 { __atomic_impl::store(&_M_fp, __t, __m); }
1214 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1215 {
return __atomic_impl::load(&_M_fp, __m); }
1218 load(memory_order __m = memory_order_seq_cst)
const noexcept
1219 {
return __atomic_impl::load(&_M_fp, __m); }
1221 operator _Fp() const volatile noexcept {
return this->load(); }
1222 operator _Fp() const noexcept {
return this->load(); }
1226 memory_order __m = memory_order_seq_cst)
volatile noexcept
1227 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1231 memory_order __m = memory_order_seq_cst)
noexcept
1232 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1235 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1236 memory_order __success,
1237 memory_order __failure)
noexcept
1239 return __atomic_impl::compare_exchange_weak(&_M_fp,
1240 __expected, __desired,
1241 __success, __failure);
1245 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1246 memory_order __success,
1247 memory_order __failure)
volatile noexcept
1249 return __atomic_impl::compare_exchange_weak(&_M_fp,
1250 __expected, __desired,
1251 __success, __failure);
1255 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1256 memory_order __success,
1257 memory_order __failure)
noexcept
1259 return __atomic_impl::compare_exchange_strong(&_M_fp,
1260 __expected, __desired,
1261 __success, __failure);
1265 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1266 memory_order __success,
1267 memory_order __failure)
volatile noexcept
1269 return __atomic_impl::compare_exchange_strong(&_M_fp,
1270 __expected, __desired,
1271 __success, __failure);
1275 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1276 memory_order __order = memory_order_seq_cst)
1279 return compare_exchange_weak(__expected, __desired, __order,
1280 __cmpexch_failure_order(__order));
1284 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1285 memory_order __order = memory_order_seq_cst)
1288 return compare_exchange_weak(__expected, __desired, __order,
1289 __cmpexch_failure_order(__order));
1293 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1294 memory_order __order = memory_order_seq_cst)
1297 return compare_exchange_strong(__expected, __desired, __order,
1298 __cmpexch_failure_order(__order));
1302 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1303 memory_order __order = memory_order_seq_cst)
1306 return compare_exchange_strong(__expected, __desired, __order,
1307 __cmpexch_failure_order(__order));
1310#if __cpp_lib_atomic_wait
1311 _GLIBCXX_ALWAYS_INLINE
void
1312 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1313 { __atomic_impl::wait(&_M_fp, __old, __m); }
1317 _GLIBCXX_ALWAYS_INLINE
void
1318 notify_one() const noexcept
1319 { __atomic_impl::notify_one(&_M_fp); }
1323 _GLIBCXX_ALWAYS_INLINE
void
1324 notify_all() const noexcept
1325 { __atomic_impl::notify_all(&_M_fp); }
1331 fetch_add(value_type __i,
1332 memory_order __m = memory_order_seq_cst)
noexcept
1333 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1336 fetch_add(value_type __i,
1337 memory_order __m = memory_order_seq_cst)
volatile noexcept
1338 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1341 fetch_sub(value_type __i,
1342 memory_order __m = memory_order_seq_cst)
noexcept
1343 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1346 fetch_sub(value_type __i,
1347 memory_order __m = memory_order_seq_cst)
volatile noexcept
1348 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1351 operator+=(value_type __i)
noexcept
1352 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1355 operator+=(value_type __i)
volatile noexcept
1356 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1359 operator-=(value_type __i)
noexcept
1360 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1363 operator-=(value_type __i)
volatile noexcept
1364 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1367 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1369#undef _GLIBCXX20_INIT
1371 template<
typename _Tp,
1372 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1373 struct __atomic_ref;
1376 template<
typename _Tp>
1377 struct __atomic_ref<_Tp, false, false>
1379 static_assert(is_trivially_copyable_v<_Tp>);
1382 static constexpr int _S_min_alignment
1383 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1387 using value_type = _Tp;
1389 static constexpr bool is_always_lock_free
1390 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1392 static constexpr size_t required_alignment
1393 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1395 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1399 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1401 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1404 operator=(_Tp __t)
const noexcept
1410 operator _Tp() const noexcept {
return this->load(); }
1413 is_lock_free() const noexcept
1414 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1417 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1418 { __atomic_impl::store(_M_ptr, __t, __m); }
1421 load(memory_order __m = memory_order_seq_cst)
const noexcept
1422 {
return __atomic_impl::load(_M_ptr, __m); }
1425 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1427 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1430 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1431 memory_order __success,
1432 memory_order __failure)
const noexcept
1434 return __atomic_impl::compare_exchange_weak(_M_ptr,
1435 __expected, __desired,
1436 __success, __failure);
1440 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1441 memory_order __success,
1442 memory_order __failure)
const noexcept
1444 return __atomic_impl::compare_exchange_strong(_M_ptr,
1445 __expected, __desired,
1446 __success, __failure);
1450 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1451 memory_order __order = memory_order_seq_cst)
1454 return compare_exchange_weak(__expected, __desired, __order,
1455 __cmpexch_failure_order(__order));
1459 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1460 memory_order __order = memory_order_seq_cst)
1463 return compare_exchange_strong(__expected, __desired, __order,
1464 __cmpexch_failure_order(__order));
1467#if __cpp_lib_atomic_wait
1468 _GLIBCXX_ALWAYS_INLINE
void
1469 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1470 { __atomic_impl::wait(_M_ptr, __old, __m); }
1474 _GLIBCXX_ALWAYS_INLINE
void
1475 notify_one() const noexcept
1476 { __atomic_impl::notify_one(_M_ptr); }
1480 _GLIBCXX_ALWAYS_INLINE
void
1481 notify_all() const noexcept
1482 { __atomic_impl::notify_all(_M_ptr); }
1492 template<
typename _Tp>
1493 struct __atomic_ref<_Tp, true, false>
1495 static_assert(is_integral_v<_Tp>);
1498 using value_type = _Tp;
1499 using difference_type = value_type;
1501 static constexpr bool is_always_lock_free
1502 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1504 static constexpr size_t required_alignment
1505 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1507 __atomic_ref() =
delete;
1508 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1511 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1512 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1514 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1517 operator=(_Tp __t)
const noexcept
1523 operator _Tp() const noexcept {
return this->load(); }
1526 is_lock_free() const noexcept
1528 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1532 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1533 { __atomic_impl::store(_M_ptr, __t, __m); }
1536 load(memory_order __m = memory_order_seq_cst)
const noexcept
1537 {
return __atomic_impl::load(_M_ptr, __m); }
1541 memory_order __m = memory_order_seq_cst)
const noexcept
1542 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1545 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1546 memory_order __success,
1547 memory_order __failure)
const noexcept
1549 return __atomic_impl::compare_exchange_weak(_M_ptr,
1550 __expected, __desired,
1551 __success, __failure);
1555 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1556 memory_order __success,
1557 memory_order __failure)
const noexcept
1559 return __atomic_impl::compare_exchange_strong(_M_ptr,
1560 __expected, __desired,
1561 __success, __failure);
1565 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1566 memory_order __order = memory_order_seq_cst)
1569 return compare_exchange_weak(__expected, __desired, __order,
1570 __cmpexch_failure_order(__order));
1574 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1575 memory_order __order = memory_order_seq_cst)
1578 return compare_exchange_strong(__expected, __desired, __order,
1579 __cmpexch_failure_order(__order));
1582#if __cpp_lib_atomic_wait
1583 _GLIBCXX_ALWAYS_INLINE
void
1584 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1585 { __atomic_impl::wait(_M_ptr, __old, __m); }
1589 _GLIBCXX_ALWAYS_INLINE
void
1590 notify_one() const noexcept
1591 { __atomic_impl::notify_one(_M_ptr); }
1595 _GLIBCXX_ALWAYS_INLINE
void
1596 notify_all() const noexcept
1597 { __atomic_impl::notify_all(_M_ptr); }
1603 fetch_add(value_type __i,
1604 memory_order __m = memory_order_seq_cst)
const noexcept
1605 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1608 fetch_sub(value_type __i,
1609 memory_order __m = memory_order_seq_cst)
const noexcept
1610 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1613 fetch_and(value_type __i,
1614 memory_order __m = memory_order_seq_cst)
const noexcept
1615 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1618 fetch_or(value_type __i,
1619 memory_order __m = memory_order_seq_cst)
const noexcept
1620 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1623 fetch_xor(value_type __i,
1624 memory_order __m = memory_order_seq_cst)
const noexcept
1625 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1627 _GLIBCXX_ALWAYS_INLINE value_type
1628 operator++(
int)
const noexcept
1629 {
return fetch_add(1); }
1631 _GLIBCXX_ALWAYS_INLINE value_type
1632 operator--(
int)
const noexcept
1633 {
return fetch_sub(1); }
1636 operator++() const noexcept
1637 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1640 operator--() const noexcept
1641 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1644 operator+=(value_type __i)
const noexcept
1645 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1648 operator-=(value_type __i)
const noexcept
1649 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1652 operator&=(value_type __i)
const noexcept
1653 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1656 operator|=(value_type __i)
const noexcept
1657 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1660 operator^=(value_type __i)
const noexcept
1661 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1668 template<
typename _Fp>
1669 struct __atomic_ref<_Fp, false, true>
1671 static_assert(is_floating_point_v<_Fp>);
1674 using value_type = _Fp;
1675 using difference_type = value_type;
1677 static constexpr bool is_always_lock_free
1678 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1680 static constexpr size_t required_alignment = __alignof__(_Fp);
1682 __atomic_ref() =
delete;
1683 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1686 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1687 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1689 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1692 operator=(_Fp __t)
const noexcept
1698 operator _Fp() const noexcept {
return this->load(); }
1701 is_lock_free() const noexcept
1703 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1707 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1708 { __atomic_impl::store(_M_ptr, __t, __m); }
1711 load(memory_order __m = memory_order_seq_cst)
const noexcept
1712 {
return __atomic_impl::load(_M_ptr, __m); }
1716 memory_order __m = memory_order_seq_cst)
const noexcept
1717 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1720 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1721 memory_order __success,
1722 memory_order __failure)
const noexcept
1724 return __atomic_impl::compare_exchange_weak(_M_ptr,
1725 __expected, __desired,
1726 __success, __failure);
1730 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1731 memory_order __success,
1732 memory_order __failure)
const noexcept
1734 return __atomic_impl::compare_exchange_strong(_M_ptr,
1735 __expected, __desired,
1736 __success, __failure);
1740 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1741 memory_order __order = memory_order_seq_cst)
1744 return compare_exchange_weak(__expected, __desired, __order,
1745 __cmpexch_failure_order(__order));
1749 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1750 memory_order __order = memory_order_seq_cst)
1753 return compare_exchange_strong(__expected, __desired, __order,
1754 __cmpexch_failure_order(__order));
1757#if __cpp_lib_atomic_wait
1758 _GLIBCXX_ALWAYS_INLINE
void
1759 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1760 { __atomic_impl::wait(_M_ptr, __old, __m); }
1764 _GLIBCXX_ALWAYS_INLINE
void
1765 notify_one() const noexcept
1766 { __atomic_impl::notify_one(_M_ptr); }
1770 _GLIBCXX_ALWAYS_INLINE
void
1771 notify_all() const noexcept
1772 { __atomic_impl::notify_all(_M_ptr); }
1778 fetch_add(value_type __i,
1779 memory_order __m = memory_order_seq_cst)
const noexcept
1780 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1783 fetch_sub(value_type __i,
1784 memory_order __m = memory_order_seq_cst)
const noexcept
1785 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1788 operator+=(value_type __i)
const noexcept
1789 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1792 operator-=(value_type __i)
const noexcept
1793 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1800 template<
typename _Tp>
1801 struct __atomic_ref<_Tp*,
false,
false>
1804 using value_type = _Tp*;
1805 using difference_type = ptrdiff_t;
1807 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1809 static constexpr size_t required_alignment = __alignof__(_Tp*);
1811 __atomic_ref() =
delete;
1812 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1816 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1818 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1821 operator=(_Tp* __t)
const noexcept
1827 operator _Tp*()
const noexcept {
return this->load(); }
1830 is_lock_free() const noexcept
1832 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1836 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1837 { __atomic_impl::store(_M_ptr, __t, __m); }
1840 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1841 {
return __atomic_impl::load(_M_ptr, __m); }
1845 memory_order __m = memory_order_seq_cst)
const noexcept
1846 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1849 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1853 return __atomic_impl::compare_exchange_weak(_M_ptr,
1854 __expected, __desired,
1855 __success, __failure);
1859 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1863 return __atomic_impl::compare_exchange_strong(_M_ptr,
1864 __expected, __desired,
1865 __success, __failure);
1869 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1873 return compare_exchange_weak(__expected, __desired, __order,
1874 __cmpexch_failure_order(__order));
1878 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1882 return compare_exchange_strong(__expected, __desired, __order,
1883 __cmpexch_failure_order(__order));
1886#if __cpp_lib_atomic_wait
1887 _GLIBCXX_ALWAYS_INLINE
void
1888 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1889 { __atomic_impl::wait(_M_ptr, __old, __m); }
1893 _GLIBCXX_ALWAYS_INLINE
void
1894 notify_one() const noexcept
1895 { __atomic_impl::notify_one(_M_ptr); }
1899 _GLIBCXX_ALWAYS_INLINE
void
1900 notify_all() const noexcept
1901 { __atomic_impl::notify_all(_M_ptr); }
1906 _GLIBCXX_ALWAYS_INLINE value_type
1907 fetch_add(difference_type __d,
1908 memory_order __m = memory_order_seq_cst)
const noexcept
1909 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
1911 _GLIBCXX_ALWAYS_INLINE value_type
1912 fetch_sub(difference_type __d,
1913 memory_order __m = memory_order_seq_cst)
const noexcept
1914 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
1917 operator++(
int)
const noexcept
1918 {
return fetch_add(1); }
1921 operator--(
int)
const noexcept
1922 {
return fetch_sub(1); }
1925 operator++() const noexcept
1927 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
1931 operator--() const noexcept
1933 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
1937 operator+=(difference_type __d)
const noexcept
1939 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
1943 operator-=(difference_type __d)
const noexcept
1945 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
1949 static constexpr ptrdiff_t
1950 _S_type_size(ptrdiff_t __d)
noexcept
1952 static_assert(is_object_v<_Tp>);
1953 return __d *
sizeof(_Tp);
1964_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr _Tp exchange(_Tp &__obj, _Up &&__new_val) noexcept(__and_< is_nothrow_move_constructible< _Tp >, is_nothrow_assignable< _Tp &, _Up > >::value)
Assign __new_val to __obj and return its previous value.
bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.