3 // Copyright (C) 2008, 2009, 2010
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_0.h
27 * This is an internal header file, included by other library headers.
28 * You should not attempt to use it directly.
31 #ifndef _GLIBCXX_ATOMIC_0_H
32 #define _GLIBCXX_ATOMIC_0_H 1
34 #pragma GCC system_header
36 _GLIBCXX_BEGIN_NAMESPACE(std)
38 // 0 == __atomic0 == Never lock-free
41 _GLIBCXX_BEGIN_EXTERN_C
44 atomic_flag_clear_explicit(__atomic_flag_base*, memory_order)
48 __atomic_flag_wait_explicit(__atomic_flag_base*, memory_order)
51 _GLIBCXX_CONST __atomic_flag_base*
52 __atomic_flag_for_address(const volatile void* __z) _GLIBCXX_NOTHROW;
56 // Implementation specific defines.
57 #define _ATOMIC_MEMBER_ _M_i
59 // Implementation specific defines.
60 #define _ATOMIC_LOAD_(__a, __x) \
61 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
62 __i_type* __p = &_ATOMIC_MEMBER_; \
63 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
64 __atomic_flag_wait_explicit(__g, __x); \
65 __i_type __r = *__p; \
66 atomic_flag_clear_explicit(__g, __x); \
69 #define _ATOMIC_STORE_(__a, __m, __x) \
70 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
71 __i_type* __p = &_ATOMIC_MEMBER_; \
72 __typeof__(__m) __v = (__m); \
73 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
74 __atomic_flag_wait_explicit(__g, __x); \
76 atomic_flag_clear_explicit(__g, __x); \
79 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
80 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
81 __i_type* __p = &_ATOMIC_MEMBER_; \
82 __typeof__(__m) __v = (__m); \
83 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
84 __atomic_flag_wait_explicit(__g, __x); \
85 __i_type __r = *__p; \
87 atomic_flag_clear_explicit(__g, __x); \
90 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
91 ({typedef __typeof__(_ATOMIC_MEMBER_) __i_type; \
92 __i_type* __p = &_ATOMIC_MEMBER_; \
93 __typeof__(__e) __q = (__e); \
94 __typeof__(__m) __v = (__m); \
96 __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
97 __atomic_flag_wait_explicit(__g, __x); \
98 __i_type __t = *__p; \
101 *__p = (__i_type)__v; \
104 else { *__q = __t; __r = false; } \
105 atomic_flag_clear_explicit(__g, __x); \
110 struct atomic_flag : public __atomic_flag_base
112 atomic_flag() = default;
113 ~atomic_flag() = default;
114 atomic_flag(const atomic_flag&) = delete;
115 atomic_flag& operator=(const atomic_flag&) = delete;
116 atomic_flag& operator=(const atomic_flag&) volatile = delete;
118 // Conversion to ATOMIC_FLAG_INIT.
119 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
122 test_and_set(memory_order __m = memory_order_seq_cst);
125 test_and_set(memory_order __m = memory_order_seq_cst) volatile;
128 clear(memory_order __m = memory_order_seq_cst);
131 clear(memory_order __m = memory_order_seq_cst) volatile;
136 struct atomic_address
142 atomic_address() = default;
143 ~atomic_address() = default;
144 atomic_address(const atomic_address&) = delete;
145 atomic_address& operator=(const atomic_address&) = delete;
146 atomic_address& operator=(const atomic_address&) volatile = delete;
148 constexpr atomic_address(void* __v): _M_i (__v) { }
151 is_lock_free() const { return false; }
154 is_lock_free() const volatile { return false; }
157 store(void* __v, memory_order __m = memory_order_seq_cst)
159 __glibcxx_assert(__m != memory_order_acquire);
160 __glibcxx_assert(__m != memory_order_acq_rel);
161 __glibcxx_assert(__m != memory_order_consume);
162 _ATOMIC_STORE_(this, __v, __m);
166 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
168 __glibcxx_assert(__m != memory_order_acquire);
169 __glibcxx_assert(__m != memory_order_acq_rel);
170 __glibcxx_assert(__m != memory_order_consume);
171 _ATOMIC_STORE_(this, __v, __m);
175 load(memory_order __m = memory_order_seq_cst) const
177 __glibcxx_assert(__m != memory_order_release);
178 __glibcxx_assert(__m != memory_order_acq_rel);
179 return _ATOMIC_LOAD_(this, __m);
183 load(memory_order __m = memory_order_seq_cst) const volatile
185 __glibcxx_assert(__m != memory_order_release);
186 __glibcxx_assert(__m != memory_order_acq_rel);
187 return _ATOMIC_LOAD_(this, __m);
191 exchange(void* __v, memory_order __m = memory_order_seq_cst)
192 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
195 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
196 { return _ATOMIC_MODIFY_(this, =, __v, __m); }
199 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
202 __glibcxx_assert(__m2 != memory_order_release);
203 __glibcxx_assert(__m2 != memory_order_acq_rel);
204 __glibcxx_assert(__m2 <= __m1);
205 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
209 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
210 memory_order __m2) volatile
212 __glibcxx_assert(__m2 != memory_order_release);
213 __glibcxx_assert(__m2 != memory_order_acq_rel);
214 __glibcxx_assert(__m2 <= __m1);
215 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
219 compare_exchange_weak(void*& __v1, void* __v2,
220 memory_order __m = memory_order_seq_cst)
222 return compare_exchange_weak(__v1, __v2, __m,
223 __calculate_memory_order(__m));
227 compare_exchange_weak(void*& __v1, void* __v2,
228 memory_order __m = memory_order_seq_cst) volatile
230 return compare_exchange_weak(__v1, __v2, __m,
231 __calculate_memory_order(__m));
235 compare_exchange_weak(const void*& __v1, const void* __v2,
236 memory_order __m1, memory_order __m2)
238 __glibcxx_assert(__m2 != memory_order_release);
239 __glibcxx_assert(__m2 != memory_order_acq_rel);
240 __glibcxx_assert(__m2 <= __m1);
241 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
245 compare_exchange_weak(const void*& __v1, const void* __v2,
246 memory_order __m1, memory_order __m2) volatile
248 __glibcxx_assert(__m2 != memory_order_release);
249 __glibcxx_assert(__m2 != memory_order_acq_rel);
250 __glibcxx_assert(__m2 <= __m1);
251 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
255 compare_exchange_weak(const void*& __v1, const void* __v2,
256 memory_order __m = memory_order_seq_cst)
258 return compare_exchange_weak(__v1, __v2, __m,
259 __calculate_memory_order(__m));
263 compare_exchange_weak(const void*& __v1, const void* __v2,
264 memory_order __m = memory_order_seq_cst) volatile
266 return compare_exchange_weak(__v1, __v2, __m,
267 __calculate_memory_order(__m));
271 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
274 __glibcxx_assert(__m2 != memory_order_release);
275 __glibcxx_assert(__m2 != memory_order_acq_rel);
276 __glibcxx_assert(__m2 <= __m1);
277 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
281 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
282 memory_order __m2) volatile
284 __glibcxx_assert(__m2 != memory_order_release);
285 __glibcxx_assert(__m2 != memory_order_acq_rel);
286 __glibcxx_assert(__m2 <= __m1);
287 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
291 compare_exchange_strong(void*& __v1, void* __v2,
292 memory_order __m = memory_order_seq_cst)
294 return compare_exchange_strong(__v1, __v2, __m,
295 __calculate_memory_order(__m));
299 compare_exchange_strong(void*& __v1, void* __v2,
300 memory_order __m = memory_order_seq_cst) volatile
302 return compare_exchange_strong(__v1, __v2, __m,
303 __calculate_memory_order(__m));
307 compare_exchange_strong(const void*& __v1, const void* __v2,
308 memory_order __m1, memory_order __m2)
310 __glibcxx_assert(__m2 != memory_order_release);
311 __glibcxx_assert(__m2 != memory_order_acq_rel);
312 __glibcxx_assert(__m2 <= __m1);
313 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
317 compare_exchange_strong(const void*& __v1, const void* __v2,
318 memory_order __m1, memory_order __m2) volatile
320 __glibcxx_assert(__m2 != memory_order_release);
321 __glibcxx_assert(__m2 != memory_order_acq_rel);
322 __glibcxx_assert(__m2 <= __m1);
323 return _ATOMIC_CMPEXCHNG_(this, &__v1, __v2, __m1);
327 compare_exchange_strong(const void*& __v1, const void* __v2,
328 memory_order __m = memory_order_seq_cst)
330 return compare_exchange_strong(__v1, __v2, __m,
331 __calculate_memory_order(__m));
335 compare_exchange_strong(const void*& __v1, const void* __v2,
336 memory_order __m = memory_order_seq_cst) volatile
338 return compare_exchange_strong(__v1, __v2, __m,
339 __calculate_memory_order(__m));
343 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
345 void** __p = &(_M_i);
346 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
347 __atomic_flag_wait_explicit(__g, __m);
349 *__p = (void*)((char*)(*__p) + __d);
350 atomic_flag_clear_explicit(__g, __m);
355 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
357 void* volatile* __p = &(_M_i);
358 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
359 __atomic_flag_wait_explicit(__g, __m);
361 *__p = (void*)((char*)(*__p) + __d);
362 atomic_flag_clear_explicit(__g, __m);
367 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
369 void** __p = &(_M_i);
370 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
371 __atomic_flag_wait_explicit(__g, __m);
373 *__p = (void*)((char*)(*__p) - __d);
374 atomic_flag_clear_explicit(__g, __m);
379 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
381 void* volatile* __p = &(_M_i);
382 __atomic_flag_base* __g = __atomic_flag_for_address(__p);
383 __atomic_flag_wait_explicit(__g, __m);
385 *__p = (void*)((char*)(*__p) - __d);
386 atomic_flag_clear_explicit(__g, __m);
390 operator void*() const
393 operator void*() const volatile
405 operator=(void* __v) volatile
412 operator+=(ptrdiff_t __d)
413 { return fetch_add(__d) + __d; }
416 operator+=(ptrdiff_t __d) volatile
417 { return fetch_add(__d) + __d; }
420 operator-=(ptrdiff_t __d)
421 { return fetch_sub(__d) - __d; }
424 operator-=(ptrdiff_t __d) volatile
425 { return fetch_sub(__d) - __d; }
429 /// Base class for atomic integrals.
431 // For each of the integral types, define atomic_[integral type] struct
435 // atomic_schar signed char
436 // atomic_uchar unsigned char
437 // atomic_short short
438 // atomic_ushort unsigned short
440 // atomic_uint unsigned int
442 // atomic_ulong unsigned long
443 // atomic_llong long long
444 // atomic_ullong unsigned long long
445 // atomic_char16_t char16_t
446 // atomic_char32_t char32_t
447 // atomic_wchar_t wchar_t
450 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
451 // since that is what GCC built-in functions for atomic memory access work on.
452 template<typename _ITp>
456 typedef _ITp __int_type;
461 __atomic_base() = default;
462 ~__atomic_base() = default;
463 __atomic_base(const __atomic_base&) = delete;
464 __atomic_base& operator=(const __atomic_base&) = delete;
465 __atomic_base& operator=(const __atomic_base&) volatile = delete;
467 // Requires __int_type convertible to _M_base._M_i.
468 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
470 operator __int_type() const
473 operator __int_type() const volatile
477 operator=(__int_type __i)
484 operator=(__int_type __i) volatile
492 { return fetch_add(1); }
495 operator++(int) volatile
496 { return fetch_add(1); }
500 { return fetch_sub(1); }
503 operator--(int) volatile
504 { return fetch_sub(1); }
508 { return fetch_add(1) + 1; }
511 operator++() volatile
512 { return fetch_add(1) + 1; }
516 { return fetch_sub(1) - 1; }
519 operator--() volatile
520 { return fetch_sub(1) - 1; }
523 operator+=(__int_type __i)
524 { return fetch_add(__i) + __i; }
527 operator+=(__int_type __i) volatile
528 { return fetch_add(__i) + __i; }
531 operator-=(__int_type __i)
532 { return fetch_sub(__i) - __i; }
535 operator-=(__int_type __i) volatile
536 { return fetch_sub(__i) - __i; }
539 operator&=(__int_type __i)
540 { return fetch_and(__i) & __i; }
543 operator&=(__int_type __i) volatile
544 { return fetch_and(__i) & __i; }
547 operator|=(__int_type __i)
548 { return fetch_or(__i) | __i; }
551 operator|=(__int_type __i) volatile
552 { return fetch_or(__i) | __i; }
555 operator^=(__int_type __i)
556 { return fetch_xor(__i) ^ __i; }
559 operator^=(__int_type __i) volatile
560 { return fetch_xor(__i) ^ __i; }
567 is_lock_free() const volatile
571 store(__int_type __i, memory_order __m = memory_order_seq_cst)
573 __glibcxx_assert(__m != memory_order_acquire);
574 __glibcxx_assert(__m != memory_order_acq_rel);
575 __glibcxx_assert(__m != memory_order_consume);
576 _ATOMIC_STORE_(this, __i, __m);
580 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
582 __glibcxx_assert(__m != memory_order_acquire);
583 __glibcxx_assert(__m != memory_order_acq_rel);
584 __glibcxx_assert(__m != memory_order_consume);
585 _ATOMIC_STORE_(this, __i, __m);
589 load(memory_order __m = memory_order_seq_cst) const
591 __glibcxx_assert(__m != memory_order_release);
592 __glibcxx_assert(__m != memory_order_acq_rel);
593 return _ATOMIC_LOAD_(this, __m);
597 load(memory_order __m = memory_order_seq_cst) const volatile
599 __glibcxx_assert(__m != memory_order_release);
600 __glibcxx_assert(__m != memory_order_acq_rel);
601 return _ATOMIC_LOAD_(this, __m);
605 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
606 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
609 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
610 { return _ATOMIC_MODIFY_(this, =, __i, __m); }
613 compare_exchange_weak(__int_type& __i1, __int_type __i2,
614 memory_order __m1, memory_order __m2)
616 __glibcxx_assert(__m2 != memory_order_release);
617 __glibcxx_assert(__m2 != memory_order_acq_rel);
618 __glibcxx_assert(__m2 <= __m1);
619 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
623 compare_exchange_weak(__int_type& __i1, __int_type __i2,
624 memory_order __m1, memory_order __m2) volatile
626 __glibcxx_assert(__m2 != memory_order_release);
627 __glibcxx_assert(__m2 != memory_order_acq_rel);
628 __glibcxx_assert(__m2 <= __m1);
629 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
633 compare_exchange_weak(__int_type& __i1, __int_type __i2,
634 memory_order __m = memory_order_seq_cst)
636 return compare_exchange_weak(__i1, __i2, __m,
637 __calculate_memory_order(__m));
641 compare_exchange_weak(__int_type& __i1, __int_type __i2,
642 memory_order __m = memory_order_seq_cst) volatile
644 return compare_exchange_weak(__i1, __i2, __m,
645 __calculate_memory_order(__m));
649 compare_exchange_strong(__int_type& __i1, __int_type __i2,
650 memory_order __m1, memory_order __m2)
652 __glibcxx_assert(__m2 != memory_order_release);
653 __glibcxx_assert(__m2 != memory_order_acq_rel);
654 __glibcxx_assert(__m2 <= __m1);
655 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
659 compare_exchange_strong(__int_type& __i1, __int_type __i2,
660 memory_order __m1, memory_order __m2) volatile
662 __glibcxx_assert(__m2 != memory_order_release);
663 __glibcxx_assert(__m2 != memory_order_acq_rel);
664 __glibcxx_assert(__m2 <= __m1);
665 return _ATOMIC_CMPEXCHNG_(this, &__i1, __i2, __m1);
669 compare_exchange_strong(__int_type& __i1, __int_type __i2,
670 memory_order __m = memory_order_seq_cst)
672 return compare_exchange_strong(__i1, __i2, __m,
673 __calculate_memory_order(__m));
677 compare_exchange_strong(__int_type& __i1, __int_type __i2,
678 memory_order __m = memory_order_seq_cst) volatile
680 return compare_exchange_strong(__i1, __i2, __m,
681 __calculate_memory_order(__m));
685 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
686 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
689 fetch_add(__int_type __i,
690 memory_order __m = memory_order_seq_cst) volatile
691 { return _ATOMIC_MODIFY_(this, +=, __i, __m); }
694 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
695 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
698 fetch_sub(__int_type __i,
699 memory_order __m = memory_order_seq_cst) volatile
700 { return _ATOMIC_MODIFY_(this, -=, __i, __m); }
703 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
704 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
707 fetch_and(__int_type __i,
708 memory_order __m = memory_order_seq_cst) volatile
709 { return _ATOMIC_MODIFY_(this, &=, __i, __m); }
712 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
713 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
716 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
717 { return _ATOMIC_MODIFY_(this, |=, __i, __m); }
720 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
721 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
724 fetch_xor(__int_type __i,
725 memory_order __m = memory_order_seq_cst) volatile
726 { return _ATOMIC_MODIFY_(this, ^=, __i, __m); }
730 #undef _ATOMIC_STORE_
731 #undef _ATOMIC_MODIFY_
732 #undef _ATOMIC_CMPEXCHNG_
733 } // namespace __atomic0
735 _GLIBCXX_END_NAMESPACE