3 // Copyright (C) 2008, 2009
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
27 * This is a Standard C++ Library header.
30 // Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl.
31 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html
33 #ifndef _GLIBCXX_ATOMIC
34 #define _GLIBCXX_ATOMIC 1
36 #pragma GCC system_header
38 #ifndef __GXX_EXPERIMENTAL_CXX0X__
39 # include <c++0x_warning.h>
42 #include <bits/atomic_base.h>
45 _GLIBCXX_BEGIN_NAMESPACE(std)
53 template<typename _Tp>
55 kill_dependency(_Tp __y)
62 __calculate_memory_order(memory_order __m)
64 const bool __cond1 = __m == memory_order_release;
65 const bool __cond2 = __m == memory_order_acq_rel;
66 memory_order __mo1(__cond1 ? memory_order_relaxed : __m);
67 memory_order __mo2(__cond2 ? memory_order_acquire : __mo1);
72 // Three nested namespaces for atomic implementation details.
74 // The nested namespace inlined into std:: is determined by the value
75 // of the _GLIBCXX_ATOMIC_PROPERTY macro and the resulting
76 // ATOMIC_*_LOCK_FREE macros. See file atomic_base.h.
78 // 0 == __atomic0 == Never lock-free
79 // 1 == __atomic1 == Best available, sometimes lock-free
80 // 2 == __atomic2 == Always lock-free
81 #include <bits/atomic_0.h>
82 #include <bits/atomic_2.h>
85 /// 29.4.3, Generic atomic type, primary class template.
86 template<typename _Tp>
95 atomic(const atomic&) = delete;
96 atomic& operator=(const atomic&) volatile = delete;
98 atomic(_Tp __i) : _M_i(__i) { }
100 operator _Tp() const;
103 operator=(_Tp __i) { store(__i); return __i; }
106 is_lock_free() const volatile;
109 store(_Tp, memory_order = memory_order_seq_cst) volatile;
112 load(memory_order = memory_order_seq_cst) const volatile;
115 exchange(_Tp __i, memory_order = memory_order_seq_cst) volatile;
118 compare_exchange_weak(_Tp&, _Tp, memory_order, memory_order) volatile;
121 compare_exchange_strong(_Tp&, _Tp, memory_order, memory_order) volatile;
124 compare_exchange_weak(_Tp&, _Tp,
125 memory_order = memory_order_seq_cst) volatile;
128 compare_exchange_strong(_Tp&, _Tp,
129 memory_order = memory_order_seq_cst) volatile;
133 /// Partial specialization for pointer types.
134 template<typename _Tp>
135 struct atomic<_Tp*> : atomic_address
139 atomic(const atomic&) = delete;
140 atomic& operator=(const atomic&) volatile = delete;
142 atomic(_Tp* __v) : atomic_address(__v) { }
145 store(_Tp*, memory_order = memory_order_seq_cst);
148 load(memory_order = memory_order_seq_cst) const;
151 exchange(_Tp*, memory_order = memory_order_seq_cst);
154 compare_exchange_weak(_Tp*&, _Tp*, memory_order, memory_order);
157 compare_exchange_strong(_Tp*&, _Tp*, memory_order, memory_order);
160 compare_exchange_weak(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
163 compare_exchange_strong(_Tp*&, _Tp*, memory_order = memory_order_seq_cst);
166 fetch_add(ptrdiff_t, memory_order = memory_order_seq_cst);
169 fetch_sub(ptrdiff_t, memory_order = memory_order_seq_cst);
171 operator _Tp*() const
182 operator++(int) { return fetch_add(1); }
185 operator--(int) { return fetch_sub(1); }
188 operator++() { return fetch_add(1) + 1; }
191 operator--() { return fetch_sub(1) - 1; }
194 operator+=(ptrdiff_t __d)
195 { return fetch_add(__d) + __d; }
198 operator-=(ptrdiff_t __d)
199 { return fetch_sub(__d) - __d; }
203 /// Explicit specialization for void*
205 struct atomic<void*> : public atomic_address
207 typedef void* __integral_type;
208 typedef atomic_address __base_type;
212 atomic(const atomic&) = delete;
213 atomic& operator=(const atomic&) volatile = delete;
215 atomic(__integral_type __i) : __base_type(__i) { }
217 using __base_type::operator __integral_type;
218 using __base_type::operator=;
221 /// Explicit specialization for bool.
223 struct atomic<bool> : public atomic_bool
225 typedef bool __integral_type;
226 typedef atomic_bool __base_type;
230 atomic(const atomic&) = delete;
231 atomic& operator=(const atomic&) volatile = delete;
233 atomic(__integral_type __i) : __base_type(__i) { }
235 using __base_type::operator __integral_type;
236 using __base_type::operator=;
239 /// Explicit specialization for char.
241 struct atomic<char> : public atomic_char
243 typedef char __integral_type;
244 typedef atomic_char __base_type;
248 atomic(const atomic&) = delete;
249 atomic& operator=(const atomic&) volatile = delete;
251 atomic(__integral_type __i) : __base_type(__i) { }
253 using __base_type::operator __integral_type;
254 using __base_type::operator=;
257 /// Explicit specialization for signed char.
259 struct atomic<signed char> : public atomic_schar
261 typedef signed char __integral_type;
262 typedef atomic_schar __base_type;
266 atomic(const atomic&) = delete;
267 atomic& operator=(const atomic&) volatile = delete;
269 atomic(__integral_type __i) : __base_type(__i) { }
271 using __base_type::operator __integral_type;
272 using __base_type::operator=;
275 /// Explicit specialization for unsigned char.
277 struct atomic<unsigned char> : public atomic_uchar
279 typedef unsigned char __integral_type;
280 typedef atomic_uchar __base_type;
284 atomic(const atomic&) = delete;
285 atomic& operator=(const atomic&) volatile = delete;
287 atomic(__integral_type __i) : __base_type(__i) { }
289 using __base_type::operator __integral_type;
290 using __base_type::operator=;
293 /// Explicit specialization for short.
295 struct atomic<short> : public atomic_short
297 typedef short __integral_type;
298 typedef atomic_short __base_type;
302 atomic(const atomic&) = delete;
303 atomic& operator=(const atomic&) volatile = delete;
305 atomic(__integral_type __i) : __base_type(__i) { }
307 using __base_type::operator __integral_type;
308 using __base_type::operator=;
311 /// Explicit specialization for unsigned short.
313 struct atomic<unsigned short> : public atomic_ushort
315 typedef unsigned short __integral_type;
316 typedef atomic_ushort __base_type;
320 atomic(const atomic&) = delete;
321 atomic& operator=(const atomic&) volatile = delete;
323 atomic(__integral_type __i) : __base_type(__i) { }
325 using __base_type::operator __integral_type;
326 using __base_type::operator=;
329 /// Explicit specialization for int.
331 struct atomic<int> : atomic_int
333 typedef int __integral_type;
334 typedef atomic_int __base_type;
338 atomic(const atomic&) = delete;
339 atomic& operator=(const atomic&) volatile = delete;
341 atomic(__integral_type __i) : __base_type(__i) { }
343 using __base_type::operator __integral_type;
344 using __base_type::operator=;
347 /// Explicit specialization for unsigned int.
349 struct atomic<unsigned int> : public atomic_uint
351 typedef unsigned int __integral_type;
352 typedef atomic_uint __base_type;
356 atomic(const atomic&) = delete;
357 atomic& operator=(const atomic&) volatile = delete;
359 atomic(__integral_type __i) : __base_type(__i) { }
361 using __base_type::operator __integral_type;
362 using __base_type::operator=;
365 /// Explicit specialization for long.
367 struct atomic<long> : public atomic_long
369 typedef long __integral_type;
370 typedef atomic_long __base_type;
374 atomic(const atomic&) = delete;
375 atomic& operator=(const atomic&) volatile = delete;
377 atomic(__integral_type __i) : __base_type(__i) { }
379 using __base_type::operator __integral_type;
380 using __base_type::operator=;
383 /// Explicit specialization for unsigned long.
385 struct atomic<unsigned long> : public atomic_ulong
387 typedef unsigned long __integral_type;
388 typedef atomic_ulong __base_type;
392 atomic(const atomic&) = delete;
393 atomic& operator=(const atomic&) volatile = delete;
395 atomic(__integral_type __i) : __base_type(__i) { }
397 using __base_type::operator __integral_type;
398 using __base_type::operator=;
401 /// Explicit specialization for long long.
403 struct atomic<long long> : public atomic_llong
405 typedef long long __integral_type;
406 typedef atomic_llong __base_type;
410 atomic(const atomic&) = delete;
411 atomic& operator=(const atomic&) volatile = delete;
413 atomic(__integral_type __i) : __base_type(__i) { }
415 using __base_type::operator __integral_type;
416 using __base_type::operator=;
419 /// Explicit specialization for unsigned long long.
421 struct atomic<unsigned long long> : public atomic_ullong
423 typedef unsigned long long __integral_type;
424 typedef atomic_ullong __base_type;
428 atomic(const atomic&) = delete;
429 atomic& operator=(const atomic&) volatile = delete;
431 atomic(__integral_type __i) : __base_type(__i) { }
433 using __base_type::operator __integral_type;
434 using __base_type::operator=;
437 /// Explicit specialization for wchar_t.
439 struct atomic<wchar_t> : public atomic_wchar_t
441 typedef wchar_t __integral_type;
442 typedef atomic_wchar_t __base_type;
446 atomic(const atomic&) = delete;
447 atomic& operator=(const atomic&) volatile = delete;
449 atomic(__integral_type __i) : __base_type(__i) { }
451 using __base_type::operator __integral_type;
452 using __base_type::operator=;
455 /// Explicit specialization for char16_t.
457 struct atomic<char16_t> : public atomic_char16_t
459 typedef char16_t __integral_type;
460 typedef atomic_char16_t __base_type;
464 atomic(const atomic&) = delete;
465 atomic& operator=(const atomic&) volatile = delete;
467 atomic(__integral_type __i) : __base_type(__i) { }
469 using __base_type::operator __integral_type;
470 using __base_type::operator=;
473 /// Explicit specialization for char32_t.
475 struct atomic<char32_t> : public atomic_char32_t
477 typedef char32_t __integral_type;
478 typedef atomic_char32_t __base_type;
482 atomic(const atomic&) = delete;
483 atomic& operator=(const atomic&) volatile = delete;
485 atomic(__integral_type __i) : __base_type(__i) { }
487 using __base_type::operator __integral_type;
488 using __base_type::operator=;
492 template<typename _Tp>
494 atomic<_Tp*>::load(memory_order __m) const
495 { return static_cast<_Tp*>(atomic_address::load(__m)); }
497 template<typename _Tp>
499 atomic<_Tp*>::exchange(_Tp* __v, memory_order __m)
500 { return static_cast<_Tp*>(atomic_address::exchange(__v, __m)); }
502 template<typename _Tp>
504 atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v, memory_order __m1,
507 void** __vr = reinterpret_cast<void**>(&__r);
508 void* __vv = static_cast<void*>(__v);
509 return atomic_address::compare_exchange_weak(*__vr, __vv, __m1, __m2);
512 template<typename _Tp>
514 atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
518 void** __vr = reinterpret_cast<void**>(&__r);
519 void* __vv = static_cast<void*>(__v);
520 return atomic_address::compare_exchange_strong(*__vr, __vv, __m1, __m2);
523 template<typename _Tp>
525 atomic<_Tp*>::compare_exchange_weak(_Tp*& __r, _Tp* __v,
528 return compare_exchange_weak(__r, __v, __m,
529 __calculate_memory_order(__m));
532 template<typename _Tp>
534 atomic<_Tp*>::compare_exchange_strong(_Tp*& __r, _Tp* __v,
537 return compare_exchange_strong(__r, __v, __m,
538 __calculate_memory_order(__m));
541 template<typename _Tp>
543 atomic<_Tp*>::fetch_add(ptrdiff_t __d, memory_order __m)
545 void* __p = atomic_fetch_add_explicit(this, sizeof(_Tp) * __d, __m);
546 return static_cast<_Tp*>(__p);
549 template<typename _Tp>
551 atomic<_Tp*>::fetch_sub(ptrdiff_t __d, memory_order __m)
553 void* __p = atomic_fetch_sub_explicit(this, sizeof(_Tp) * __d, __m);
554 return static_cast<_Tp*>(__p);
557 // Convenience function definitions, atomic_flag.
559 atomic_flag_test_and_set_explicit(atomic_flag* __a, memory_order __m)
560 { return __a->test_and_set(__m); }
563 atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m)
564 { return __a->clear(__m); }
567 // Convenience function definitions, atomic_address.
569 atomic_is_lock_free(const atomic_address* __a)
570 { return __a->is_lock_free(); }
573 atomic_store(atomic_address* __a, void* __v)
577 atomic_store_explicit(atomic_address* __a, void* __v, memory_order __m)
578 { __a->store(__v, __m); }
581 atomic_load(const atomic_address* __a)
582 { return __a->load(); }
585 atomic_load_explicit(const atomic_address* __a, memory_order __m)
586 { return __a->load(__m); }
589 atomic_exchange(atomic_address* __a, void* __v)
590 { return __a->exchange(__v); }
593 atomic_exchange_explicit(atomic_address* __a, void* __v, memory_order __m)
594 { return __a->exchange(__v, __m); }
597 atomic_compare_exchange_weak(atomic_address* __a, void** __v1, void* __v2)
599 return __a->compare_exchange_weak(*__v1, __v2, memory_order_seq_cst,
600 memory_order_seq_cst);
604 atomic_compare_exchange_strong(atomic_address* __a,
605 void** __v1, void* __v2)
607 return __a->compare_exchange_strong(*__v1, __v2, memory_order_seq_cst,
608 memory_order_seq_cst);
612 atomic_compare_exchange_weak_explicit(atomic_address* __a,
613 void** __v1, void* __v2,
614 memory_order __m1, memory_order __m2)
615 { return __a->compare_exchange_weak(*__v1, __v2, __m1, __m2); }
618 atomic_compare_exchange_strong_explicit(atomic_address* __a,
619 void** __v1, void* __v2,
620 memory_order __m1, memory_order __m2)
621 { return __a->compare_exchange_strong(*__v1, __v2, __m1, __m2); }
624 atomic_fetch_add_explicit(atomic_address* __a, ptrdiff_t __d,
626 { return __a->fetch_add(__d, __m); }
629 atomic_fetch_add(atomic_address* __a, ptrdiff_t __d)
630 { return __a->fetch_add(__d); }
633 atomic_fetch_sub_explicit(atomic_address* __a, ptrdiff_t __d,
635 { return __a->fetch_sub(__d, __m); }
638 atomic_fetch_sub(atomic_address* __a, ptrdiff_t __d)
639 { return __a->fetch_sub(__d); }
642 // Convenience function definitions, atomic_bool.
644 atomic_is_lock_free(const atomic_bool* __a)
645 { return __a->is_lock_free(); }
648 atomic_store(atomic_bool* __a, bool __i)
652 atomic_store_explicit(atomic_bool* __a, bool __i, memory_order __m)
653 { __a->store(__i, __m); }
656 atomic_load(const atomic_bool* __a)
657 { return __a->load(); }
660 atomic_load_explicit(const atomic_bool* __a, memory_order __m)
661 { return __a->load(__m); }
664 atomic_exchange(atomic_bool* __a, bool __i)
665 { return __a->exchange(__i); }
668 atomic_exchange_explicit(atomic_bool* __a, bool __i, memory_order __m)
669 { return __a->exchange(__i, __m); }
672 atomic_compare_exchange_weak(atomic_bool* __a, bool* __i1, bool __i2)
674 return __a->compare_exchange_weak(*__i1, __i2, memory_order_seq_cst,
675 memory_order_seq_cst);
679 atomic_compare_exchange_strong(atomic_bool* __a, bool* __i1, bool __i2)
681 return __a->compare_exchange_strong(*__i1, __i2, memory_order_seq_cst,
682 memory_order_seq_cst);
686 atomic_compare_exchange_weak_explicit(atomic_bool* __a, bool* __i1,
687 bool __i2, memory_order __m1,
689 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
692 atomic_compare_exchange_strong_explicit(atomic_bool* __a,
693 bool* __i1, bool __i2,
694 memory_order __m1, memory_order __m2)
695 { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
699 // Free standing functions. Template argument should be constricted
700 // to intergral types as specified in the standard.
701 template<typename _ITp>
703 atomic_store_explicit(__atomic_base<_ITp>* __a, _ITp __i, memory_order __m)
704 { __a->store(__i, __m); }
706 template<typename _ITp>
708 atomic_load_explicit(const __atomic_base<_ITp>* __a, memory_order __m)
709 { return __a->load(__m); }
711 template<typename _ITp>
713 atomic_exchange_explicit(__atomic_base<_ITp>* __a, _ITp __i,
715 { return __a->exchange(__i, __m); }
717 template<typename _ITp>
719 atomic_compare_exchange_weak_explicit(__atomic_base<_ITp>* __a,
720 _ITp* __i1, _ITp __i2,
721 memory_order __m1, memory_order __m2)
722 { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); }
724 template<typename _ITp>
726 atomic_compare_exchange_strong_explicit(__atomic_base<_ITp>* __a,
727 _ITp* __i1, _ITp __i2,
730 { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); }
732 template<typename _ITp>
734 atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i,
736 { return __a->fetch_add(__i, __m); }
738 template<typename _ITp>
740 atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i,
742 { return __a->fetch_sub(__i, __m); }
744 template<typename _ITp>
746 atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i,
748 { return __a->fetch_and(__i, __m); }
750 template<typename _ITp>
752 atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i,
754 { return __a->fetch_or(__i, __m); }
756 template<typename _ITp>
758 atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i,
760 { return __a->fetch_xor(__i, __m); }
762 template<typename _ITp>
764 atomic_is_lock_free(const __atomic_base<_ITp>* __a)
765 { return __a->is_lock_free(); }
767 template<typename _ITp>
769 atomic_store(__atomic_base<_ITp>* __a, _ITp __i)
770 { atomic_store_explicit(__a, __i, memory_order_seq_cst); }
772 template<typename _ITp>
774 atomic_load(const __atomic_base<_ITp>* __a)
775 { return atomic_load_explicit(__a, memory_order_seq_cst); }
777 template<typename _ITp>
779 atomic_exchange(__atomic_base<_ITp>* __a, _ITp __i)
780 { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); }
782 template<typename _ITp>
784 atomic_compare_exchange_weak(__atomic_base<_ITp>* __a,
785 _ITp* __i1, _ITp __i2)
787 return atomic_compare_exchange_weak_explicit(__a, __i1, __i2,
788 memory_order_seq_cst,
789 memory_order_seq_cst);
792 template<typename _ITp>
794 atomic_compare_exchange_strong(__atomic_base<_ITp>* __a,
795 _ITp* __i1, _ITp __i2)
797 return atomic_compare_exchange_strong_explicit(__a, __i1, __i2,
798 memory_order_seq_cst,
799 memory_order_seq_cst);
802 template<typename _ITp>
804 atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i)
805 { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); }
807 template<typename _ITp>
809 atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i)
810 { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); }
812 template<typename _ITp>
814 atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i)
815 { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); }
817 template<typename _ITp>
819 atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i)
820 { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); }
822 template<typename _ITp>
824 atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i)
825 { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); }
829 _GLIBCXX_END_NAMESPACE