30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
33#pragma GCC system_header
41#if __cplusplus > 201703L && _GLIBCXX_HOSTED
45#ifndef _GLIBCXX_ALWAYS_INLINE
46#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
51namespace std _GLIBCXX_VISIBILITY(default)
53_GLIBCXX_BEGIN_NAMESPACE_VERSION
63#if __cplusplus > 201703L
74 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
75 inline constexpr memory_order memory_order_consume = memory_order::consume;
76 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
77 inline constexpr memory_order memory_order_release = memory_order::release;
78 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
79 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
93 enum __memory_order_modifier
95 __memory_order_mask = 0x0ffff,
96 __memory_order_modifier_mask = 0xffff0000,
97 __memory_order_hle_acquire = 0x10000,
98 __memory_order_hle_release = 0x20000
120 return __m == memory_order_acq_rel ? memory_order_acquire
121 : __m == memory_order_release ? memory_order_relaxed : __m;
127 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
128 | __memory_order_modifier(__m & __memory_order_modifier_mask));
132 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
134 return (__m & __memory_order_mask) != memory_order_release
135 && (__m & __memory_order_mask) != memory_order_acq_rel;
139 template<
typename _IntTp>
140 struct __atomic_base;
144 _GLIBCXX_ALWAYS_INLINE
void
146 { __atomic_thread_fence(
int(__m)); }
148 _GLIBCXX_ALWAYS_INLINE
void
150 { __atomic_signal_fence(
int(__m)); }
153 template<
typename _Tp>
162#if __glibcxx_atomic_value_initialization
163# define _GLIBCXX20_INIT(I) = I
165# define _GLIBCXX20_INIT(I)
169#define ATOMIC_VAR_INIT(_VI) { _VI }
171 template<
typename _Tp>
174 template<
typename _Tp>
178#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
179 typedef bool __atomic_flag_data_type;
181 typedef unsigned char __atomic_flag_data_type;
196 _GLIBCXX_BEGIN_EXTERN_C
198 struct __atomic_flag_base
200 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
203 _GLIBCXX_END_EXTERN_C
207#define ATOMIC_FLAG_INIT { 0 }
220 : __atomic_flag_base{ _S_init(__i) }
223 _GLIBCXX_ALWAYS_INLINE
bool
224 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
226 return __atomic_test_and_set (&_M_i,
int(__m));
229 _GLIBCXX_ALWAYS_INLINE
bool
230 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
232 return __atomic_test_and_set (&_M_i,
int(__m));
235#ifdef __glibcxx_atomic_flag_test
236 _GLIBCXX_ALWAYS_INLINE
bool
237 test(
memory_order __m = memory_order_seq_cst)
const noexcept
239 __atomic_flag_data_type __v;
240 __atomic_load(&_M_i, &__v,
int(__m));
241 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
244 _GLIBCXX_ALWAYS_INLINE
bool
245 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
247 __atomic_flag_data_type __v;
248 __atomic_load(&_M_i, &__v,
int(__m));
249 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
253#if __glibcxx_atomic_wait
254 _GLIBCXX_ALWAYS_INLINE
void
258 const __atomic_flag_data_type __v
259 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261 std::__atomic_wait_address_v(&_M_i, __v,
262 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
267 _GLIBCXX_ALWAYS_INLINE
void
268 notify_one()
noexcept
269 { std::__atomic_notify_address(&_M_i,
false); }
273 _GLIBCXX_ALWAYS_INLINE
void
274 notify_all()
noexcept
275 { std::__atomic_notify_address(&_M_i,
true); }
280 _GLIBCXX_ALWAYS_INLINE
void
284 = __m & __memory_order_mask;
285 __glibcxx_assert(__b != memory_order_consume);
286 __glibcxx_assert(__b != memory_order_acquire);
287 __glibcxx_assert(__b != memory_order_acq_rel);
289 __atomic_clear (&_M_i,
int(__m));
292 _GLIBCXX_ALWAYS_INLINE
void
293 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
296 = __m & __memory_order_mask;
297 __glibcxx_assert(__b != memory_order_consume);
298 __glibcxx_assert(__b != memory_order_acquire);
299 __glibcxx_assert(__b != memory_order_acq_rel);
301 __atomic_clear (&_M_i,
int(__m));
305 static constexpr __atomic_flag_data_type
307 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
336 template<
typename _ITp>
339 using value_type = _ITp;
340 using difference_type = value_type;
343 typedef _ITp __int_type;
345 static constexpr int _S_alignment =
346 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
348 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
351 __atomic_base() noexcept = default;
352 ~__atomic_base() noexcept = default;
353 __atomic_base(const __atomic_base&) = delete;
354 __atomic_base& operator=(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) volatile = delete;
358 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
360 operator __int_type() const noexcept
363 operator __int_type() const volatile noexcept
367 operator=(__int_type __i)
noexcept
374 operator=(__int_type __i)
volatile noexcept
381 operator++(
int)
noexcept
382 {
return fetch_add(1); }
385 operator++(
int)
volatile noexcept
386 {
return fetch_add(1); }
389 operator--(
int)
noexcept
390 {
return fetch_sub(1); }
393 operator--(
int)
volatile noexcept
394 {
return fetch_sub(1); }
397 operator++() noexcept
398 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
401 operator++() volatile noexcept
402 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
405 operator--() noexcept
406 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
409 operator--() volatile noexcept
410 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
413 operator+=(__int_type __i)
noexcept
414 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
417 operator+=(__int_type __i)
volatile noexcept
418 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
421 operator-=(__int_type __i)
noexcept
422 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
425 operator-=(__int_type __i)
volatile noexcept
426 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
429 operator&=(__int_type __i)
noexcept
430 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
433 operator&=(__int_type __i)
volatile noexcept
434 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
437 operator|=(__int_type __i)
noexcept
438 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
441 operator|=(__int_type __i)
volatile noexcept
442 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
445 operator^=(__int_type __i)
noexcept
446 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
449 operator^=(__int_type __i)
volatile noexcept
450 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
453 is_lock_free() const noexcept
456 return __atomic_is_lock_free(
sizeof(_M_i),
457 reinterpret_cast<void *
>(-_S_alignment));
461 is_lock_free() const volatile noexcept
464 return __atomic_is_lock_free(
sizeof(_M_i),
465 reinterpret_cast<void *
>(-_S_alignment));
468 _GLIBCXX_ALWAYS_INLINE
void
469 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
477 __atomic_store_n(&_M_i, __i,
int(__m));
480 _GLIBCXX_ALWAYS_INLINE
void
481 store(__int_type __i,
482 memory_order __m = memory_order_seq_cst)
volatile noexcept
485 = __m & __memory_order_mask;
486 __glibcxx_assert(__b != memory_order_acquire);
487 __glibcxx_assert(__b != memory_order_acq_rel);
488 __glibcxx_assert(__b != memory_order_consume);
490 __atomic_store_n(&_M_i, __i,
int(__m));
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 load(memory_order __m = memory_order_seq_cst)
const noexcept
497 = __m & __memory_order_mask;
498 __glibcxx_assert(__b != memory_order_release);
499 __glibcxx_assert(__b != memory_order_acq_rel);
501 return __atomic_load_n(&_M_i,
int(__m));
504 _GLIBCXX_ALWAYS_INLINE __int_type
505 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
508 = __m & __memory_order_mask;
509 __glibcxx_assert(__b != memory_order_release);
510 __glibcxx_assert(__b != memory_order_acq_rel);
512 return __atomic_load_n(&_M_i,
int(__m));
515 _GLIBCXX_ALWAYS_INLINE __int_type
516 exchange(__int_type __i,
517 memory_order __m = memory_order_seq_cst)
noexcept
519 return __atomic_exchange_n(&_M_i, __i,
int(__m));
523 _GLIBCXX_ALWAYS_INLINE __int_type
524 exchange(__int_type __i,
525 memory_order __m = memory_order_seq_cst)
volatile noexcept
527 return __atomic_exchange_n(&_M_i, __i,
int(__m));
530 _GLIBCXX_ALWAYS_INLINE
bool
531 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 memory_order __m1, memory_order __m2)
noexcept
534 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
536 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
537 int(__m1),
int(__m2));
540 _GLIBCXX_ALWAYS_INLINE
bool
541 compare_exchange_weak(__int_type& __i1, __int_type __i2,
543 memory_order __m2)
volatile noexcept
545 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
547 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
548 int(__m1),
int(__m2));
551 _GLIBCXX_ALWAYS_INLINE
bool
552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst)
noexcept
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
559 _GLIBCXX_ALWAYS_INLINE
bool
560 compare_exchange_weak(__int_type& __i1, __int_type __i2,
561 memory_order __m = memory_order_seq_cst)
volatile noexcept
563 return compare_exchange_weak(__i1, __i2, __m,
564 __cmpexch_failure_order(__m));
567 _GLIBCXX_ALWAYS_INLINE
bool
568 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 memory_order __m1, memory_order __m2)
noexcept
571 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
573 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
574 int(__m1),
int(__m2));
577 _GLIBCXX_ALWAYS_INLINE
bool
578 compare_exchange_strong(__int_type& __i1, __int_type __i2,
580 memory_order __m2)
volatile noexcept
582 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
584 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
585 int(__m1),
int(__m2));
588 _GLIBCXX_ALWAYS_INLINE
bool
589 compare_exchange_strong(__int_type& __i1, __int_type __i2,
590 memory_order __m = memory_order_seq_cst)
noexcept
592 return compare_exchange_strong(__i1, __i2, __m,
593 __cmpexch_failure_order(__m));
596 _GLIBCXX_ALWAYS_INLINE
bool
597 compare_exchange_strong(__int_type& __i1, __int_type __i2,
598 memory_order __m = memory_order_seq_cst)
volatile noexcept
600 return compare_exchange_strong(__i1, __i2, __m,
601 __cmpexch_failure_order(__m));
604#if __glibcxx_atomic_wait
605 _GLIBCXX_ALWAYS_INLINE
void
606 wait(__int_type __old,
607 memory_order __m = memory_order_seq_cst)
const noexcept
609 std::__atomic_wait_address_v(&_M_i, __old,
610 [__m,
this] {
return this->load(__m); });
615 _GLIBCXX_ALWAYS_INLINE
void
616 notify_one() noexcept
617 { std::__atomic_notify_address(&_M_i,
false); }
621 _GLIBCXX_ALWAYS_INLINE
void
622 notify_all() noexcept
623 { std::__atomic_notify_address(&_M_i,
true); }
628 _GLIBCXX_ALWAYS_INLINE __int_type
629 fetch_add(__int_type __i,
630 memory_order __m = memory_order_seq_cst)
noexcept
631 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
635 memory_order __m = memory_order_seq_cst)
volatile noexcept
636 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_sub(__int_type __i,
640 memory_order __m = memory_order_seq_cst)
noexcept
641 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
645 memory_order __m = memory_order_seq_cst)
volatile noexcept
646 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_and(__int_type __i,
650 memory_order __m = memory_order_seq_cst)
noexcept
651 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
655 memory_order __m = memory_order_seq_cst)
volatile noexcept
656 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_or(__int_type __i,
660 memory_order __m = memory_order_seq_cst)
noexcept
661 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
665 memory_order __m = memory_order_seq_cst)
volatile noexcept
666 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_xor(__int_type __i,
670 memory_order __m = memory_order_seq_cst)
noexcept
671 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
675 memory_order __m = memory_order_seq_cst)
volatile noexcept
676 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
681 template<
typename _PTp>
682 struct __atomic_base<_PTp*>
685 typedef _PTp* __pointer_type;
687 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
691 _M_type_size(ptrdiff_t __d)
const {
return __d *
sizeof(_PTp); }
694 _M_type_size(ptrdiff_t __d)
const volatile {
return __d *
sizeof(_PTp); }
697 __atomic_base() noexcept = default;
698 ~__atomic_base() noexcept = default;
699 __atomic_base(const __atomic_base&) = delete;
700 __atomic_base& operator=(const __atomic_base&) = delete;
701 __atomic_base& operator=(const __atomic_base&) volatile = delete;
704 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
706 operator __pointer_type() const noexcept
709 operator __pointer_type() const volatile noexcept
713 operator=(__pointer_type __p)
noexcept
720 operator=(__pointer_type __p)
volatile noexcept
727 operator++(
int)
noexcept
728 {
return fetch_add(1); }
731 operator++(
int)
volatile noexcept
732 {
return fetch_add(1); }
735 operator--(
int)
noexcept
736 {
return fetch_sub(1); }
739 operator--(
int)
volatile noexcept
740 {
return fetch_sub(1); }
743 operator++() noexcept
744 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
745 int(memory_order_seq_cst)); }
748 operator++() volatile noexcept
749 {
return __atomic_add_fetch(&_M_p, _M_type_size(1),
750 int(memory_order_seq_cst)); }
753 operator--() noexcept
754 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
755 int(memory_order_seq_cst)); }
758 operator--() volatile noexcept
759 {
return __atomic_sub_fetch(&_M_p, _M_type_size(1),
760 int(memory_order_seq_cst)); }
763 operator+=(ptrdiff_t __d)
noexcept
764 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
765 int(memory_order_seq_cst)); }
768 operator+=(ptrdiff_t __d)
volatile noexcept
769 {
return __atomic_add_fetch(&_M_p, _M_type_size(__d),
770 int(memory_order_seq_cst)); }
773 operator-=(ptrdiff_t __d)
noexcept
774 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
775 int(memory_order_seq_cst)); }
778 operator-=(ptrdiff_t __d)
volatile noexcept
779 {
return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
780 int(memory_order_seq_cst)); }
783 is_lock_free() const noexcept
786 return __atomic_is_lock_free(
sizeof(_M_p),
787 reinterpret_cast<void *
>(-__alignof(_M_p)));
791 is_lock_free() const volatile noexcept
794 return __atomic_is_lock_free(
sizeof(_M_p),
795 reinterpret_cast<void *
>(-__alignof(_M_p)));
798 _GLIBCXX_ALWAYS_INLINE
void
799 store(__pointer_type __p,
803 = __m & __memory_order_mask;
805 __glibcxx_assert(__b != memory_order_acquire);
806 __glibcxx_assert(__b != memory_order_acq_rel);
807 __glibcxx_assert(__b != memory_order_consume);
809 __atomic_store_n(&_M_p, __p,
int(__m));
812 _GLIBCXX_ALWAYS_INLINE
void
813 store(__pointer_type __p,
814 memory_order __m = memory_order_seq_cst)
volatile noexcept
817 = __m & __memory_order_mask;
818 __glibcxx_assert(__b != memory_order_acquire);
819 __glibcxx_assert(__b != memory_order_acq_rel);
820 __glibcxx_assert(__b != memory_order_consume);
822 __atomic_store_n(&_M_p, __p,
int(__m));
825 _GLIBCXX_ALWAYS_INLINE __pointer_type
826 load(
memory_order __m = memory_order_seq_cst)
const noexcept
829 = __m & __memory_order_mask;
830 __glibcxx_assert(__b != memory_order_release);
831 __glibcxx_assert(__b != memory_order_acq_rel);
833 return __atomic_load_n(&_M_p,
int(__m));
836 _GLIBCXX_ALWAYS_INLINE __pointer_type
837 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
840 = __m & __memory_order_mask;
841 __glibcxx_assert(__b != memory_order_release);
842 __glibcxx_assert(__b != memory_order_acq_rel);
844 return __atomic_load_n(&_M_p,
int(__m));
847 _GLIBCXX_ALWAYS_INLINE __pointer_type
848 exchange(__pointer_type __p,
851 return __atomic_exchange_n(&_M_p, __p,
int(__m));
855 _GLIBCXX_ALWAYS_INLINE __pointer_type
856 exchange(__pointer_type __p,
857 memory_order __m = memory_order_seq_cst)
volatile noexcept
859 return __atomic_exchange_n(&_M_p, __p,
int(__m));
862 _GLIBCXX_ALWAYS_INLINE
bool
863 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
867 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
869 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
870 int(__m1),
int(__m2));
873 _GLIBCXX_ALWAYS_INLINE
bool
874 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
878 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
880 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
881 int(__m1),
int(__m2));
884 _GLIBCXX_ALWAYS_INLINE
bool
885 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
889 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
891 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
892 int(__m1),
int(__m2));
895 _GLIBCXX_ALWAYS_INLINE
bool
896 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
900 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
902 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
903 int(__m1),
int(__m2));
906#if __glibcxx_atomic_wait
907 _GLIBCXX_ALWAYS_INLINE
void
908 wait(__pointer_type __old,
911 std::__atomic_wait_address_v(&_M_p, __old,
913 {
return this->load(__m); });
918 _GLIBCXX_ALWAYS_INLINE
void
919 notify_one() const noexcept
920 { std::__atomic_notify_address(&_M_p,
false); }
924 _GLIBCXX_ALWAYS_INLINE
void
925 notify_all() const noexcept
926 { std::__atomic_notify_address(&_M_p,
true); }
931 _GLIBCXX_ALWAYS_INLINE __pointer_type
932 fetch_add(ptrdiff_t __d,
934 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
936 _GLIBCXX_ALWAYS_INLINE __pointer_type
937 fetch_add(ptrdiff_t __d,
938 memory_order __m = memory_order_seq_cst)
volatile noexcept
939 {
return __atomic_fetch_add(&_M_p, _M_type_size(__d),
int(__m)); }
941 _GLIBCXX_ALWAYS_INLINE __pointer_type
942 fetch_sub(ptrdiff_t __d,
944 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
946 _GLIBCXX_ALWAYS_INLINE __pointer_type
947 fetch_sub(ptrdiff_t __d,
948 memory_order __m = memory_order_seq_cst)
volatile noexcept
949 {
return __atomic_fetch_sub(&_M_p, _M_type_size(__d),
int(__m)); }
952 namespace __atomic_impl
956 template<
typename _Tp>
958 __maybe_has_padding()
960#if ! __has_builtin(__builtin_clear_padding)
962#elif __has_builtin(__has_unique_object_representations)
963 return !__has_unique_object_representations(_Tp)
964 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
970 template<
typename _Tp>
971 _GLIBCXX_ALWAYS_INLINE _Tp*
972 __clear_padding(_Tp& __val)
noexcept
975#if __has_builtin(__builtin_clear_padding)
976 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
977 __builtin_clear_padding(__ptr);
983 template<
typename _Tp>
984 using _Val =
typename remove_volatile<_Tp>::type;
986#pragma GCC diagnostic push
987#pragma GCC diagnostic ignored "-Wc++17-extensions"
989 template<
bool _AtomicRef = false,
typename _Tp>
990 _GLIBCXX_ALWAYS_INLINE
bool
991 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
993 memory_order __s, memory_order __f)
noexcept
995 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
997 using _Vp = _Val<_Tp>;
1000 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1004 int(__s),
int(__f));
1006 else if constexpr (!_AtomicRef)
1009 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1013 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1017 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1018 __is_weak,
int(__s),
int(__f)))
1027 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1033 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1050 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1051 __is_weak,
int(__s),
int(__f)))
1058 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1059 __atomic_impl::__clear_padding(__curr),
1070#pragma GCC diagnostic pop
1073#if __cplusplus > 201703L
1075 namespace __atomic_impl
1078 template<
typename _Tp>
1079 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1081 template<
size_t _Size,
size_t _Align>
1082 _GLIBCXX_ALWAYS_INLINE
bool
1083 is_lock_free() noexcept
1086 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1089 template<
typename _Tp>
1090 _GLIBCXX_ALWAYS_INLINE
void
1091 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1093 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1096 template<
typename _Tp>
1097 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1098 load(
const _Tp* __ptr, memory_order __m)
noexcept
1100 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1101 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1102 __atomic_load(__ptr, __dest,
int(__m));
1106 template<
typename _Tp>
1107 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1108 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1110 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1111 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1112 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1117 template<
bool _AtomicRef = false,
typename _Tp>
1118 _GLIBCXX_ALWAYS_INLINE
bool
1119 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1120 _Val<_Tp> __desired, memory_order __success,
1121 memory_order __failure,
1122 bool __check_padding =
false) noexcept
1124 return __atomic_impl::__compare_exchange<_AtomicRef>(
1125 *__ptr, __expected, __desired,
true, __success, __failure);
1128 template<
bool _AtomicRef = false,
typename _Tp>
1129 _GLIBCXX_ALWAYS_INLINE
bool
1130 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1131 _Val<_Tp> __desired, memory_order __success,
1132 memory_order __failure,
1133 bool __ignore_padding =
false) noexcept
1135 return __atomic_impl::__compare_exchange<_AtomicRef>(
1136 *__ptr, __expected, __desired,
false, __success, __failure);
1139#if __glibcxx_atomic_wait
1140 template<
typename _Tp>
1141 _GLIBCXX_ALWAYS_INLINE
void
1142 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1143 memory_order __m = memory_order_seq_cst)
noexcept
1145 std::__atomic_wait_address_v(__ptr, __old,
1146 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1151 template<
typename _Tp>
1152 _GLIBCXX_ALWAYS_INLINE
void
1153 notify_one(
const _Tp* __ptr)
noexcept
1154 { std::__atomic_notify_address(__ptr,
false); }
1158 template<
typename _Tp>
1159 _GLIBCXX_ALWAYS_INLINE
void
1160 notify_all(
const _Tp* __ptr)
noexcept
1161 { std::__atomic_notify_address(__ptr,
true); }
1166 template<
typename _Tp>
1167 _GLIBCXX_ALWAYS_INLINE _Tp
1168 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1169 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1171 template<
typename _Tp>
1172 _GLIBCXX_ALWAYS_INLINE _Tp
1173 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1174 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1176 template<
typename _Tp>
1177 _GLIBCXX_ALWAYS_INLINE _Tp
1178 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1179 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1181 template<
typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE _Tp
1183 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1184 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1186 template<
typename _Tp>
1187 _GLIBCXX_ALWAYS_INLINE _Tp
1188 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1189 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1191 template<
typename _Tp>
1192 _GLIBCXX_ALWAYS_INLINE _Tp
1193 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1194 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1196 template<
typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1199 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1201 template<
typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1204 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1206 template<
typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1209 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1211 template<
typename _Tp>
1212 _GLIBCXX_ALWAYS_INLINE _Tp
1213 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1214 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1216 template<
typename _Tp>
1218 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1220 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1221 _Val<_Tp> __newval = __oldval + __i;
1222 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1223 memory_order_relaxed))
1224 __newval = __oldval + __i;
1228 template<
typename _Tp>
1230 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1232 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1233 _Val<_Tp> __newval = __oldval - __i;
1234 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1235 memory_order_relaxed))
1236 __newval = __oldval - __i;
1240 template<
typename _Tp>
1242 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1244 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1245 _Val<_Tp> __newval = __oldval + __i;
1246 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1247 memory_order_seq_cst,
1248 memory_order_relaxed))
1249 __newval = __oldval + __i;
1253 template<
typename _Tp>
1255 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1257 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1258 _Val<_Tp> __newval = __oldval - __i;
1259 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1260 memory_order_seq_cst,
1261 memory_order_relaxed))
1262 __newval = __oldval - __i;
1268 template<
typename _Fp>
1269 struct __atomic_float
1271 static_assert(is_floating_point_v<_Fp>);
1273 static constexpr size_t _S_alignment = __alignof__(_Fp);
1276 using value_type = _Fp;
1277 using difference_type = value_type;
1279 static constexpr bool is_always_lock_free
1280 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1282 __atomic_float() =
default;
1285 __atomic_float(_Fp __t) : _M_fp(__t)
1286 { __atomic_impl::__clear_padding(_M_fp); }
1288 __atomic_float(
const __atomic_float&) =
delete;
1289 __atomic_float& operator=(
const __atomic_float&) =
delete;
1290 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1293 operator=(_Fp __t)
volatile noexcept
1300 operator=(_Fp __t)
noexcept
1307 is_lock_free() const volatile noexcept
1308 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1311 is_lock_free() const noexcept
1312 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1315 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1316 { __atomic_impl::store(&_M_fp, __t, __m); }
1319 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1320 { __atomic_impl::store(&_M_fp, __t, __m); }
1323 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1324 {
return __atomic_impl::load(&_M_fp, __m); }
1327 load(memory_order __m = memory_order_seq_cst)
const noexcept
1328 {
return __atomic_impl::load(&_M_fp, __m); }
1330 operator _Fp() const volatile noexcept {
return this->load(); }
1331 operator _Fp() const noexcept {
return this->load(); }
1334 exchange(_Fp __desired,
1335 memory_order __m = memory_order_seq_cst)
volatile noexcept
1336 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1339 exchange(_Fp __desired,
1340 memory_order __m = memory_order_seq_cst)
noexcept
1341 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1344 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1345 memory_order __success,
1346 memory_order __failure)
noexcept
1348 return __atomic_impl::compare_exchange_weak(&_M_fp,
1349 __expected, __desired,
1350 __success, __failure);
1354 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1355 memory_order __success,
1356 memory_order __failure)
volatile noexcept
1358 return __atomic_impl::compare_exchange_weak(&_M_fp,
1359 __expected, __desired,
1360 __success, __failure);
1364 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1365 memory_order __success,
1366 memory_order __failure)
noexcept
1368 return __atomic_impl::compare_exchange_strong(&_M_fp,
1369 __expected, __desired,
1370 __success, __failure);
1374 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1375 memory_order __success,
1376 memory_order __failure)
volatile noexcept
1378 return __atomic_impl::compare_exchange_strong(&_M_fp,
1379 __expected, __desired,
1380 __success, __failure);
1384 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1385 memory_order __order = memory_order_seq_cst)
1388 return compare_exchange_weak(__expected, __desired, __order,
1389 __cmpexch_failure_order(__order));
1393 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1394 memory_order __order = memory_order_seq_cst)
1397 return compare_exchange_weak(__expected, __desired, __order,
1398 __cmpexch_failure_order(__order));
1402 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1403 memory_order __order = memory_order_seq_cst)
1406 return compare_exchange_strong(__expected, __desired, __order,
1407 __cmpexch_failure_order(__order));
1411 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1412 memory_order __order = memory_order_seq_cst)
1415 return compare_exchange_strong(__expected, __desired, __order,
1416 __cmpexch_failure_order(__order));
1419#if __glibcxx_atomic_wait
1420 _GLIBCXX_ALWAYS_INLINE
void
1421 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1422 { __atomic_impl::wait(&_M_fp, __old, __m); }
1426 _GLIBCXX_ALWAYS_INLINE
void
1427 notify_one() const noexcept
1428 { __atomic_impl::notify_one(&_M_fp); }
1432 _GLIBCXX_ALWAYS_INLINE
void
1433 notify_all() const noexcept
1434 { __atomic_impl::notify_all(&_M_fp); }
1440 fetch_add(value_type __i,
1441 memory_order __m = memory_order_seq_cst)
noexcept
1442 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1445 fetch_add(value_type __i,
1446 memory_order __m = memory_order_seq_cst)
volatile noexcept
1447 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1450 fetch_sub(value_type __i,
1451 memory_order __m = memory_order_seq_cst)
noexcept
1452 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1455 fetch_sub(value_type __i,
1456 memory_order __m = memory_order_seq_cst)
volatile noexcept
1457 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1460 operator+=(value_type __i)
noexcept
1461 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1464 operator+=(value_type __i)
volatile noexcept
1465 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1468 operator-=(value_type __i)
noexcept
1469 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1472 operator-=(value_type __i)
volatile noexcept
1473 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1476 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1478#undef _GLIBCXX20_INIT
1480 template<
typename _Tp,
1481 bool = is_integral_v<_Tp>,
bool = is_floating_point_v<_Tp>>
1482 struct __atomic_ref;
1485 template<
typename _Tp>
1486 struct __atomic_ref<_Tp, false, false>
1488 static_assert(is_trivially_copyable_v<_Tp>);
1491 static constexpr int _S_min_alignment
1492 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1496 using value_type = _Tp;
1498 static constexpr bool is_always_lock_free
1499 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1501 static constexpr size_t required_alignment
1502 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1504 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1508 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1510 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1513 operator=(_Tp __t)
const noexcept
1519 operator _Tp() const noexcept {
return this->load(); }
1522 is_lock_free() const noexcept
1523 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1526 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1527 { __atomic_impl::store(_M_ptr, __t, __m); }
1530 load(memory_order __m = memory_order_seq_cst)
const noexcept
1531 {
return __atomic_impl::load(_M_ptr, __m); }
1534 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1536 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1539 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1540 memory_order __success,
1541 memory_order __failure)
const noexcept
1543 return __atomic_impl::compare_exchange_weak<true>(
1544 _M_ptr, __expected, __desired, __success, __failure);
1548 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1549 memory_order __success,
1550 memory_order __failure)
const noexcept
1552 return __atomic_impl::compare_exchange_strong<true>(
1553 _M_ptr, __expected, __desired, __success, __failure);
1557 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1558 memory_order __order = memory_order_seq_cst)
1561 return compare_exchange_weak(__expected, __desired, __order,
1562 __cmpexch_failure_order(__order));
1566 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1567 memory_order __order = memory_order_seq_cst)
1570 return compare_exchange_strong(__expected, __desired, __order,
1571 __cmpexch_failure_order(__order));
1574#if __glibcxx_atomic_wait
1575 _GLIBCXX_ALWAYS_INLINE
void
1576 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1577 { __atomic_impl::wait(_M_ptr, __old, __m); }
1581 _GLIBCXX_ALWAYS_INLINE
void
1582 notify_one() const noexcept
1583 { __atomic_impl::notify_one(_M_ptr); }
1587 _GLIBCXX_ALWAYS_INLINE
void
1588 notify_all() const noexcept
1589 { __atomic_impl::notify_all(_M_ptr); }
1599 template<
typename _Tp>
1600 struct __atomic_ref<_Tp, true, false>
1602 static_assert(is_integral_v<_Tp>);
1605 using value_type = _Tp;
1606 using difference_type = value_type;
1608 static constexpr bool is_always_lock_free
1609 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1611 static constexpr size_t required_alignment
1612 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1614 __atomic_ref() =
delete;
1615 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1618 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1619 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1621 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1624 operator=(_Tp __t)
const noexcept
1630 operator _Tp() const noexcept {
return this->load(); }
1633 is_lock_free() const noexcept
1635 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1639 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1640 { __atomic_impl::store(_M_ptr, __t, __m); }
1643 load(memory_order __m = memory_order_seq_cst)
const noexcept
1644 {
return __atomic_impl::load(_M_ptr, __m); }
1647 exchange(_Tp __desired,
1648 memory_order __m = memory_order_seq_cst)
const noexcept
1649 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1652 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1653 memory_order __success,
1654 memory_order __failure)
const noexcept
1656 return __atomic_impl::compare_exchange_weak<true>(
1657 _M_ptr, __expected, __desired, __success, __failure);
1661 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1662 memory_order __success,
1663 memory_order __failure)
const noexcept
1665 return __atomic_impl::compare_exchange_strong<true>(
1666 _M_ptr, __expected, __desired, __success, __failure);
1670 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1671 memory_order __order = memory_order_seq_cst)
1674 return compare_exchange_weak(__expected, __desired, __order,
1675 __cmpexch_failure_order(__order));
1679 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1680 memory_order __order = memory_order_seq_cst)
1683 return compare_exchange_strong(__expected, __desired, __order,
1684 __cmpexch_failure_order(__order));
1687#if __glibcxx_atomic_wait
1688 _GLIBCXX_ALWAYS_INLINE
void
1689 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1690 { __atomic_impl::wait(_M_ptr, __old, __m); }
1694 _GLIBCXX_ALWAYS_INLINE
void
1695 notify_one() const noexcept
1696 { __atomic_impl::notify_one(_M_ptr); }
1700 _GLIBCXX_ALWAYS_INLINE
void
1701 notify_all() const noexcept
1702 { __atomic_impl::notify_all(_M_ptr); }
1708 fetch_add(value_type __i,
1709 memory_order __m = memory_order_seq_cst)
const noexcept
1710 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1713 fetch_sub(value_type __i,
1714 memory_order __m = memory_order_seq_cst)
const noexcept
1715 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1718 fetch_and(value_type __i,
1719 memory_order __m = memory_order_seq_cst)
const noexcept
1720 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1723 fetch_or(value_type __i,
1724 memory_order __m = memory_order_seq_cst)
const noexcept
1725 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1728 fetch_xor(value_type __i,
1729 memory_order __m = memory_order_seq_cst)
const noexcept
1730 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1732 _GLIBCXX_ALWAYS_INLINE value_type
1733 operator++(
int)
const noexcept
1734 {
return fetch_add(1); }
1736 _GLIBCXX_ALWAYS_INLINE value_type
1737 operator--(
int)
const noexcept
1738 {
return fetch_sub(1); }
1741 operator++() const noexcept
1742 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1745 operator--() const noexcept
1746 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1749 operator+=(value_type __i)
const noexcept
1750 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1753 operator-=(value_type __i)
const noexcept
1754 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1757 operator&=(value_type __i)
const noexcept
1758 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1761 operator|=(value_type __i)
const noexcept
1762 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1765 operator^=(value_type __i)
const noexcept
1766 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1773 template<
typename _Fp>
1774 struct __atomic_ref<_Fp, false, true>
1776 static_assert(is_floating_point_v<_Fp>);
1779 using value_type = _Fp;
1780 using difference_type = value_type;
1782 static constexpr bool is_always_lock_free
1783 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1785 static constexpr size_t required_alignment = __alignof__(_Fp);
1787 __atomic_ref() =
delete;
1788 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1791 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1792 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1794 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1797 operator=(_Fp __t)
const noexcept
1803 operator _Fp() const noexcept {
return this->load(); }
1806 is_lock_free() const noexcept
1808 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1812 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1813 { __atomic_impl::store(_M_ptr, __t, __m); }
1816 load(memory_order __m = memory_order_seq_cst)
const noexcept
1817 {
return __atomic_impl::load(_M_ptr, __m); }
1820 exchange(_Fp __desired,
1821 memory_order __m = memory_order_seq_cst)
const noexcept
1822 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1825 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1826 memory_order __success,
1827 memory_order __failure)
const noexcept
1829 return __atomic_impl::compare_exchange_weak<true>(
1830 _M_ptr, __expected, __desired, __success, __failure);
1834 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1835 memory_order __success,
1836 memory_order __failure)
const noexcept
1838 return __atomic_impl::compare_exchange_strong<true>(
1839 _M_ptr, __expected, __desired, __success, __failure);
1843 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1844 memory_order __order = memory_order_seq_cst)
1847 return compare_exchange_weak(__expected, __desired, __order,
1848 __cmpexch_failure_order(__order));
1852 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1853 memory_order __order = memory_order_seq_cst)
1856 return compare_exchange_strong(__expected, __desired, __order,
1857 __cmpexch_failure_order(__order));
1860#if __glibcxx_atomic_wait
1861 _GLIBCXX_ALWAYS_INLINE
void
1862 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1863 { __atomic_impl::wait(_M_ptr, __old, __m); }
1867 _GLIBCXX_ALWAYS_INLINE
void
1868 notify_one() const noexcept
1869 { __atomic_impl::notify_one(_M_ptr); }
1873 _GLIBCXX_ALWAYS_INLINE
void
1874 notify_all() const noexcept
1875 { __atomic_impl::notify_all(_M_ptr); }
1881 fetch_add(value_type __i,
1882 memory_order __m = memory_order_seq_cst)
const noexcept
1883 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1886 fetch_sub(value_type __i,
1887 memory_order __m = memory_order_seq_cst)
const noexcept
1888 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1891 operator+=(value_type __i)
const noexcept
1892 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1895 operator-=(value_type __i)
const noexcept
1896 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1903 template<
typename _Tp>
1904 struct __atomic_ref<_Tp*,
false,
false>
1907 using value_type = _Tp*;
1908 using difference_type = ptrdiff_t;
1910 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1912 static constexpr size_t required_alignment = __alignof__(_Tp*);
1914 __atomic_ref() =
delete;
1915 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1919 { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
1921 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1924 operator=(_Tp* __t)
const noexcept
1930 operator _Tp*()
const noexcept {
return this->load(); }
1933 is_lock_free() const noexcept
1935 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1939 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1940 { __atomic_impl::store(_M_ptr, __t, __m); }
1943 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1944 {
return __atomic_impl::load(_M_ptr, __m); }
1947 exchange(_Tp* __desired,
1948 memory_order __m = memory_order_seq_cst)
const noexcept
1949 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1952 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1956 return __atomic_impl::compare_exchange_weak<true>(
1957 _M_ptr, __expected, __desired, __success, __failure);
1961 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1965 return __atomic_impl::compare_exchange_strong<true>(
1966 _M_ptr, __expected, __desired, __success, __failure);
1970 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1974 return compare_exchange_weak(__expected, __desired, __order,
1975 __cmpexch_failure_order(__order));
1979 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1983 return compare_exchange_strong(__expected, __desired, __order,
1984 __cmpexch_failure_order(__order));
1987#if __glibcxx_atomic_wait
1988 _GLIBCXX_ALWAYS_INLINE
void
1989 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
1990 { __atomic_impl::wait(_M_ptr, __old, __m); }
1994 _GLIBCXX_ALWAYS_INLINE
void
1995 notify_one() const noexcept
1996 { __atomic_impl::notify_one(_M_ptr); }
2000 _GLIBCXX_ALWAYS_INLINE
void
2001 notify_all() const noexcept
2002 { __atomic_impl::notify_all(_M_ptr); }
2007 _GLIBCXX_ALWAYS_INLINE value_type
2008 fetch_add(difference_type __d,
2009 memory_order __m = memory_order_seq_cst)
const noexcept
2010 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2012 _GLIBCXX_ALWAYS_INLINE value_type
2013 fetch_sub(difference_type __d,
2014 memory_order __m = memory_order_seq_cst)
const noexcept
2015 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2018 operator++(
int)
const noexcept
2019 {
return fetch_add(1); }
2022 operator--(
int)
const noexcept
2023 {
return fetch_sub(1); }
2026 operator++() const noexcept
2028 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2032 operator--() const noexcept
2034 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2038 operator+=(difference_type __d)
const noexcept
2040 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2044 operator-=(difference_type __d)
const noexcept
2046 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2050 static constexpr ptrdiff_t
2051 _S_type_size(ptrdiff_t __d)
noexcept
2053 static_assert(is_object_v<_Tp>);
2054 return __d *
sizeof(_Tp);
2065_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.