3 // Copyright (C) 2008, 2009, 2010
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_0.h
27 * This is an internal header file, included by other library headers.
28 * Do not attempt to use it directly. @headername{atomic}
31 #ifndef _GLIBCXX_ATOMIC_0_H
32 #define _GLIBCXX_ATOMIC_0_H 1
34 #pragma GCC system_header
36 namespace std _GLIBCXX_VISIBILITY(default)
38 _GLIBCXX_BEGIN_NAMESPACE_VERSION
40 // 0 == __atomic0 == Never lock-free
43 _GLIBCXX_BEGIN_EXTERN_C
46 atomic_flag_clear_explicit(__atomic_flag_base*, memory_order)
50 __atomic_flag_wait_explicit(__atomic_flag_base*, memory_order)
53 _GLIBCXX_CONST __atomic_flag_base*
54 __atomic_flag_for_address(const volatile void* __z) _GLIBCXX_NOTHROW;
58 // Implementation specific defines.
59 #define _ATOMIC_MEMBER_ _M_i
61 // Implementation specific defines.
62 #define _ATOMIC_LOAD_(__a, __x) \
63 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
64 __i_type* __p = &_ATOMIC_MEMBER_; \
65 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
66 __atomic_flag_wait_explicit(__g, __x); \
67 __i_type __r = *__p; \
68 atomic_flag_clear_explicit(__g, __x); \
71 #define _ATOMIC_STORE_(__a, __m, __x) \
72 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
73 __i_type* __p = &_ATOMIC_MEMBER_; \
74 __typeof__(__m) __v = (__m); \
75 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
76 __atomic_flag_wait_explicit(__g, __x); \
78 atomic_flag_clear_explicit(__g, __x); \
81 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
82 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
83 __i_type* __p = &_ATOMIC_MEMBER_; \
84 __typeof__(__m) __v = (__m); \
85 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
86 __atomic_flag_wait_explicit(__g, __x); \
87 __i_type __r = *__p; \
89 atomic_flag_clear_explicit(__g, __x); \
92 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
93 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
94 __i_type* __p = &_ATOMIC_MEMBER_; \
95 __typeof__(__e) __q = (__e); \
96 __typeof__(__m) __v = (__m); \
98 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
99 __atomic_flag_wait_explicit(__g, __x); \
100 __i_type __t = *__p; \
103 *__p = (__i_type)__v; \
106 else { *__q = __t; __r = false; } \
107 atomic_flag_clear_explicit(__g, __x); \
112 struct atomic_flag : public __atomic_flag_base
114 atomic_flag() = default;
115 ~atomic_flag() = default;
116 atomic_flag(const atomic_flag&) = delete;
117 atomic_flag& operator=(const atomic_flag&) = delete;
118 atomic_flag& operator=(const atomic_flag&) volatile = delete;
120 // Conversion to ATOMIC_FLAG_INIT.
121 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
124 test_and_set(memory_order __m = memory_order_seq_cst);
127 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
130 clear(memory_order __m = memory_order_seq_cst);
133 clear(memory_order __m = memory_order_seq_cst) volatile;
138 struct atomic_address
144 atomic_address() = default;
145 ~atomic_address() = default;
146 atomic_address(const atomic_address&) = delete;
147 atomic_address& operator=(const atomic_address&) = delete;
148 atomic_address& operator=(const atomic_address&) volatile = delete;
150 constexpr atomic_address(void* __v): _M_i (__v) { }
153 is_lock_free() const { return false; }
156 is_lock_free() const volatile { return false; }
159 store(void* __v, memory_order __m = memory_order_seq_cst)
161 __glibcxx_assert(__m != memory_order_acquire);
162 __glibcxx_assert(__m != memory_order_acq_rel);
163 __glibcxx_assert(__m != memory_order_consume);
164 _ATOMIC_STORE_(this, __v, __m);
168 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
170 __glibcxx_assert(__m != memory_order_acquire);
171 __glibcxx_assert(__m != memory_order_acq_rel);
172 __glibcxx_assert(__m != memory_order_consume);
173 _ATOMIC_STORE_(this, __v, __m);
177 load(memory_order __m = memory_order_seq_cst) const
179 __glibcxx_assert(__m != memory_order_release);
180 __glibcxx_assert(__m != memory_order_acq_rel);
181 return _ATOMIC_LOAD_(this, __m);
185 load(memory_order __m = memory_order_seq_cst) const volatile
187 __glibcxx_assert(__m != memory_order_release);
188 __glibcxx_assert(__m != memory_order_acq_rel);
189 return _ATOMIC_LOAD_(this, __m);
193 exchange(void* __v, memory_order __m = memory_order_seq_cst)
194 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
197 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
198 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
201 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
204 __glibcxx_assert(__m2 != memory_order_release);
205 __glibcxx_assert(__m2 != memory_order_acq_rel);
206 __glibcxx_assert(__m2 <= __m1);
207 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
211 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
212 memory_order __m2) volatile
214 __glibcxx_assert(__m2 != memory_order_release);
215 __glibcxx_assert(__m2 != memory_order_acq_rel);
216 __glibcxx_assert(__m2 <= __m1);
217 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
221 compare_exchange_weak(void*& __v1, void* __v2,
222 memory_order __m = memory_order_seq_cst)
224 return compare_exchange_weak(__v1, __v2, __m,
225 __calculate_memory_order(__m));
229 compare_exchange_weak(void*& __v1, void* __v2,
230 memory_order __m = memory_order_seq_cst) volatile
232 return compare_exchange_weak(__v1, __v2, __m,
233 __calculate_memory_order(__m));
237 compare_exchange_weak(const void*& __v1, const void* __v2,
238 memory_order __m1, memory_order __m2)
240 __glibcxx_assert(__m2 != memory_order_release);
241 __glibcxx_assert(__m2 != memory_order_acq_rel);
242 __glibcxx_assert(__m2 <= __m1);
243 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
247 compare_exchange_weak(const void*& __v1, const void* __v2,
248 memory_order __m1, memory_order __m2) volatile
250 __glibcxx_assert(__m2 != memory_order_release);
251 __glibcxx_assert(__m2 != memory_order_acq_rel);
252 __glibcxx_assert(__m2 <= __m1);
253 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
257 compare_exchange_weak(const void*& __v1, const void* __v2,
258 memory_order __m = memory_order_seq_cst)
260 return compare_exchange_weak(__v1, __v2, __m,
261 __calculate_memory_order(__m));
265 compare_exchange_weak(const void*& __v1, const void* __v2,
266 memory_order __m = memory_order_seq_cst) volatile
268 return compare_exchange_weak(__v1, __v2, __m,
269 __calculate_memory_order(__m));
273 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
276 __glibcxx_assert(__m2 != memory_order_release);
277 __glibcxx_assert(__m2 != memory_order_acq_rel);
278 __glibcxx_assert(__m2 <= __m1);
279 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
283 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
284 memory_order __m2) volatile
286 __glibcxx_assert(__m2 != memory_order_release);
287 __glibcxx_assert(__m2 != memory_order_acq_rel);
288 __glibcxx_assert(__m2 <= __m1);
289 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
293 compare_exchange_strong(void*& __v1, void* __v2,
294 memory_order __m = memory_order_seq_cst)
296 return compare_exchange_strong(__v1, __v2, __m,
297 __calculate_memory_order(__m));
301 compare_exchange_strong(void*& __v1, void* __v2,
302 memory_order __m = memory_order_seq_cst) volatile
304 return compare_exchange_strong(__v1, __v2, __m,
305 __calculate_memory_order(__m));
309 compare_exchange_strong(const void*& __v1, const void* __v2,
310 memory_order __m1, memory_order __m2)
312 __glibcxx_assert(__m2 != memory_order_release);
313 __glibcxx_assert(__m2 != memory_order_acq_rel);
314 __glibcxx_assert(__m2 <= __m1);
315 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
319 compare_exchange_strong(const void*& __v1, const void* __v2,
320 memory_order __m1, memory_order __m2) volatile
322 __glibcxx_assert(__m2 != memory_order_release);
323 __glibcxx_assert(__m2 != memory_order_acq_rel);
324 __glibcxx_assert(__m2 <= __m1);
325 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
329 compare_exchange_strong(const void*& __v1, const void* __v2,
330 memory_order __m = memory_order_seq_cst)
332 return compare_exchange_strong(__v1, __v2, __m,
333 __calculate_memory_order(__m));
337 compare_exchange_strong(const void*& __v1, const void* __v2,
338 memory_order __m = memory_order_seq_cst) volatile
340 return compare_exchange_strong(__v1, __v2, __m,
341 __calculate_memory_order(__m));
345 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
347 void** __p = &(_M_i);
348 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
349 __atomic_flag_wait_explicit(__g, __m);
351 *__p = (void*)((char*)(*__p) + __d);
352 atomic_flag_clear_explicit(__g, __m);
357 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
359 void* volatile* __p = &(_M_i);
360 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
361 __atomic_flag_wait_explicit(__g, __m);
363 *__p = (void*)((char*)(*__p) + __d);
364 atomic_flag_clear_explicit(__g, __m);
369 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
371 void** __p = &(_M_i);
372 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
373 __atomic_flag_wait_explicit(__g, __m);
375 *__p = (void*)((char*)(*__p) - __d);
376 atomic_flag_clear_explicit(__g, __m);
381 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
383 void* volatile* __p = &(_M_i);
384 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
385 __atomic_flag_wait_explicit(__g, __m);
387 *__p = (void*)((char*)(*__p) - __d);
388 atomic_flag_clear_explicit(__g, __m);
392 operator void*() const
395 operator void*() const volatile
407 operator=(void* __v) volatile
414 operator+=(ptrdiff_t __d)
415 { return fetch_add(__d) + __d; }
418 operator+=(ptrdiff_t __d) volatile
419 { return fetch_add(__d) + __d; }
422 operator-=(ptrdiff_t __d)
423 { return fetch_sub(__d) - __d; }
426 operator-=(ptrdiff_t __d) volatile
427 { return fetch_sub(__d) - __d; }
431 /// Base class for atomic integrals.
433 // For each of the integral types, define atomic_[integral type] struct
437 // atomic_schar signed char
438 // atomic_uchar unsigned char
439 // atomic_short short
440 // atomic_ushort unsigned short
442 // atomic_uint unsigned int
444 // atomic_ulong unsigned long
445 // atomic_llong long long
446 // atomic_ullong unsigned long long
447 // atomic_char16_t char16_t
448 // atomic_char32_t char32_t
449 // atomic_wchar_t wchar_t
452 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
453 // since that is what GCC built-in functions for atomic memory access work on.
454 template<typename _ITp>
458 typedef _ITp __int_type;
463 __atomic_base() = default;
464 ~__atomic_base() = default;
465 __atomic_base(const __atomic_base&) = delete;
466 __atomic_base& operator=(const __atomic_base&) = delete;
467 __atomic_base& operator=(const __atomic_base&) volatile = delete;
469 // Requires __int_type convertible to _M_base._M_i.
470 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
472 operator __int_type() const
475 operator __int_type() const volatile
479 operator=(__int_type __i)
486 operator=(__int_type __i) volatile
494 { return fetch_add(1); }
497 operator++(int) volatile
498 { return fetch_add(1); }
502 { return fetch_sub(1); }
505 operator--(int) volatile
506 { return fetch_sub(1); }
510 { return fetch_add(1) + 1; }
513 operator++() volatile
514 { return fetch_add(1) + 1; }
518 { return fetch_sub(1) - 1; }
521 operator--() volatile
522 { return fetch_sub(1) - 1; }
525 operator+=(__int_type __i)
526 { return fetch_add(__i) + __i; }
529 operator+=(__int_type __i) volatile
530 { return fetch_add(__i) + __i; }
533 operator-=(__int_type __i)
534 { return fetch_sub(__i) - __i; }
537 operator-=(__int_type __i) volatile
538 { return fetch_sub(__i) - __i; }
541 operator&=(__int_type __i)
542 { return fetch_and(__i) & __i; }
545 operator&=(__int_type __i) volatile
546 { return fetch_and(__i) & __i; }
549 operator|=(__int_type __i)
550 { return fetch_or(__i) | __i; }
553 operator|=(__int_type __i) volatile
554 { return fetch_or(__i) | __i; }
557 operator^=(__int_type __i)
558 { return fetch_xor(__i) ^ __i; }
561 operator^=(__int_type __i) volatile
562 { return fetch_xor(__i) ^ __i; }
569 is_lock_free() const volatile
573 store(__int_type __i, memory_order __m = memory_order_seq_cst)
575 __glibcxx_assert(__m != memory_order_acquire);
576 __glibcxx_assert(__m != memory_order_acq_rel);
577 __glibcxx_assert(__m != memory_order_consume);
578 _ATOMIC_STORE_(this, __i, __m);
582 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
584 __glibcxx_assert(__m != memory_order_acquire);
585 __glibcxx_assert(__m != memory_order_acq_rel);
586 __glibcxx_assert(__m != memory_order_consume);
587 _ATOMIC_STORE_(this, __i, __m);
591 load(memory_order __m = memory_order_seq_cst) const
593 __glibcxx_assert(__m != memory_order_release);
594 __glibcxx_assert(__m != memory_order_acq_rel);
595 return _ATOMIC_LOAD_(this, __m);
599 load(memory_order __m = memory_order_seq_cst) const volatile
601 __glibcxx_assert(__m != memory_order_release);
602 __glibcxx_assert(__m != memory_order_acq_rel);
603 return _ATOMIC_LOAD_(this, __m);
607 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
608 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
611 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
612 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
615 compare_exchange_weak(__int_type& __i1, __int_type __i2,
616 memory_order __m1, memory_order __m2)
618 __glibcxx_assert(__m2 != memory_order_release);
619 __glibcxx_assert(__m2 != memory_order_acq_rel);
620 __glibcxx_assert(__m2 <= __m1);
621 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
625 compare_exchange_weak(__int_type& __i1, __int_type __i2,
626 memory_order __m1, memory_order __m2) volatile
628 __glibcxx_assert(__m2 != memory_order_release);
629 __glibcxx_assert(__m2 != memory_order_acq_rel);
630 __glibcxx_assert(__m2 <= __m1);
631 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
635 compare_exchange_weak(__int_type& __i1, __int_type __i2,
636 memory_order __m = memory_order_seq_cst)
638 return compare_exchange_weak(__i1, __i2, __m,
639 __calculate_memory_order(__m));
643 compare_exchange_weak(__int_type& __i1, __int_type __i2,
644 memory_order __m = memory_order_seq_cst) volatile
646 return compare_exchange_weak(__i1, __i2, __m,
647 __calculate_memory_order(__m));
651 compare_exchange_strong(__int_type& __i1, __int_type __i2,
652 memory_order __m1, memory_order __m2)
654 __glibcxx_assert(__m2 != memory_order_release);
655 __glibcxx_assert(__m2 != memory_order_acq_rel);
656 __glibcxx_assert(__m2 <= __m1);
657 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
661 compare_exchange_strong(__int_type& __i1, __int_type __i2,
662 memory_order __m1, memory_order __m2) volatile
664 __glibcxx_assert(__m2 != memory_order_release);
665 __glibcxx_assert(__m2 != memory_order_acq_rel);
666 __glibcxx_assert(__m2 <= __m1);
667 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
671 compare_exchange_strong(__int_type& __i1, __int_type __i2,
672 memory_order __m = memory_order_seq_cst)
674 return compare_exchange_strong(__i1, __i2, __m,
675 __calculate_memory_order(__m));
679 compare_exchange_strong(__int_type& __i1, __int_type __i2,
680 memory_order __m = memory_order_seq_cst) volatile
682 return compare_exchange_strong(__i1, __i2, __m,
683 __calculate_memory_order(__m));
687 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
688 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
691 fetch_add(__int_type __i,
692 memory_order __m = memory_order_seq_cst) volatile
693 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
696 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
697 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
700 fetch_sub(__int_type __i,
701 memory_order __m = memory_order_seq_cst) volatile
702 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
705 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
706 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
709 fetch_and(__int_type __i,
710 memory_order __m = memory_order_seq_cst) volatile
711 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
714 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
715 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
718 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
719 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
722 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
723 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
726 fetch_xor(__int_type __i,
727 memory_order __m = memory_order_seq_cst) volatile
728 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
732 #undef _ATOMIC_STORE_
733 #undef _ATOMIC_MODIFY_
734 #undef _ATOMIC_CMPEXCHNG_
735 } // namespace __atomic0
737 _GLIBCXX_END_NAMESPACE_VERSION