3 // Copyright (C) 2008, 2009, 2010
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_2.h
27 * This is an internal header file, included by other library headers.
28 * You should not attempt to use it directly.
31 #ifndef _GLIBCXX_ATOMIC_2_H
32 #define _GLIBCXX_ATOMIC_2_H 1
34 #pragma GCC system_header
36 _GLIBCXX_BEGIN_NAMESPACE(std)
38 // 2 == __atomic2 == Always lock-free
40 // _GLIBCXX_ATOMIC_BUILTINS_1
41 // _GLIBCXX_ATOMIC_BUILTINS_2
42 // _GLIBCXX_ATOMIC_BUILTINS_4
43 // _GLIBCXX_ATOMIC_BUILTINS_8
47 struct atomic_flag : public __atomic_flag_base
49 atomic_flag() = default;
50 ~atomic_flag() = default;
51 atomic_flag(const atomic_flag&) = delete;
52 atomic_flag& operator=(const atomic_flag&) = delete;
53 atomic_flag& operator=(const atomic_flag&) volatile = delete;
55 // Conversion to ATOMIC_FLAG_INIT.
56 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
59 test_and_set(memory_order __m = memory_order_seq_cst)
61 // Redundant synchronize if built-in for lock is a full barrier.
62 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
64 return __sync_lock_test_and_set(&_M_i, 1);
68 test_and_set(memory_order __m = memory_order_seq_cst) volatile
70 // Redundant synchronize if built-in for lock is a full barrier.
71 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
73 return __sync_lock_test_and_set(&_M_i, 1);
77 clear(memory_order __m = memory_order_seq_cst)
79 __glibcxx_assert(__m != memory_order_consume);
80 __glibcxx_assert(__m != memory_order_acquire);
81 __glibcxx_assert(__m != memory_order_acq_rel);
83 __sync_lock_release(&_M_i);
84 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
89 clear(memory_order __m = memory_order_seq_cst) volatile
91 __glibcxx_assert(__m != memory_order_consume);
92 __glibcxx_assert(__m != memory_order_acquire);
93 __glibcxx_assert(__m != memory_order_acq_rel);
95 __sync_lock_release(&_M_i);
96 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
103 struct atomic_address
109 atomic_address() = default;
110 ~atomic_address() = default;
111 atomic_address(const atomic_address&) = delete;
112 atomic_address& operator=(const atomic_address&) = delete;
113 atomic_address& operator=(const atomic_address&) volatile = delete;
115 constexpr atomic_address(void* __v): _M_i (__v) { }
118 is_lock_free() const { return true; }
121 is_lock_free() const volatile { return true; }
124 store(void* __v, memory_order __m = memory_order_seq_cst)
126 __glibcxx_assert(__m != memory_order_acquire);
127 __glibcxx_assert(__m != memory_order_acq_rel);
128 __glibcxx_assert(__m != memory_order_consume);
130 if (__m == memory_order_relaxed)
134 // write_mem_barrier();
136 if (__m == memory_order_seq_cst)
137 __sync_synchronize();
142 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
144 __glibcxx_assert(__m != memory_order_acquire);
145 __glibcxx_assert(__m != memory_order_acq_rel);
146 __glibcxx_assert(__m != memory_order_consume);
148 if (__m == memory_order_relaxed)
152 // write_mem_barrier();
154 if (__m == memory_order_seq_cst)
155 __sync_synchronize();
160 load(memory_order __m = memory_order_seq_cst) const
162 __glibcxx_assert(__m != memory_order_release);
163 __glibcxx_assert(__m != memory_order_acq_rel);
165 __sync_synchronize();
167 __sync_synchronize();
172 load(memory_order __m = memory_order_seq_cst) const volatile
174 __glibcxx_assert(__m != memory_order_release);
175 __glibcxx_assert(__m != memory_order_acq_rel);
177 __sync_synchronize();
179 __sync_synchronize();
184 exchange(void* __v, memory_order __m = memory_order_seq_cst)
186 // XXX built-in assumes memory_order_acquire.
187 return __sync_lock_test_and_set(&_M_i, __v);
191 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
193 // XXX built-in assumes memory_order_acquire.
194 return __sync_lock_test_and_set(&_M_i, __v);
198 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
200 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
203 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
204 memory_order __m2) volatile
205 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
208 compare_exchange_weak(void*& __v1, void* __v2,
209 memory_order __m = memory_order_seq_cst)
211 return compare_exchange_weak(__v1, __v2, __m,
212 __calculate_memory_order(__m));
216 compare_exchange_weak(void*& __v1, void* __v2,
217 memory_order __m = memory_order_seq_cst) volatile
219 return compare_exchange_weak(__v1, __v2, __m,
220 __calculate_memory_order(__m));
224 compare_exchange_weak(const void*& __v1, const void* __v2,
225 memory_order __m1, memory_order __m2)
226 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
229 compare_exchange_weak(const void*& __v1, const void* __v2,
230 memory_order __m1, memory_order __m2) volatile
231 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
234 compare_exchange_weak(const void*& __v1, const void* __v2,
235 memory_order __m = memory_order_seq_cst)
237 return compare_exchange_weak(__v1, __v2, __m,
238 __calculate_memory_order(__m));
242 compare_exchange_weak(const void*& __v1, const void* __v2,
243 memory_order __m = memory_order_seq_cst) volatile
245 return compare_exchange_weak(__v1, __v2, __m,
246 __calculate_memory_order(__m));
250 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
253 __glibcxx_assert(__m2 != memory_order_release);
254 __glibcxx_assert(__m2 != memory_order_acq_rel);
255 __glibcxx_assert(__m2 <= __m1);
258 void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
260 // Assume extra stores (of same value) allowed in true case.
262 return __v1o == __v1n;
266 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
267 memory_order __m2) volatile
269 __glibcxx_assert(__m2 != memory_order_release);
270 __glibcxx_assert(__m2 != memory_order_acq_rel);
271 __glibcxx_assert(__m2 <= __m1);
274 void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
276 // Assume extra stores (of same value) allowed in true case.
278 return __v1o == __v1n;
282 compare_exchange_strong(void*& __v1, void* __v2,
283 memory_order __m = memory_order_seq_cst)
285 return compare_exchange_strong(__v1, __v2, __m,
286 __calculate_memory_order(__m));
290 compare_exchange_strong(void*& __v1, void* __v2,
291 memory_order __m = memory_order_seq_cst) volatile
293 return compare_exchange_strong(__v1, __v2, __m,
294 __calculate_memory_order(__m));
298 compare_exchange_strong(const void*& __v1, const void* __v2,
299 memory_order __m1, memory_order __m2)
301 __glibcxx_assert(__m2 != memory_order_release);
302 __glibcxx_assert(__m2 != memory_order_acq_rel);
303 __glibcxx_assert(__m2 <= __m1);
305 const void* __v1o = __v1;
306 const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
308 // Assume extra stores (of same value) allowed in true case.
310 return __v1o == __v1n;
314 compare_exchange_strong(const void*& __v1, const void* __v2,
315 memory_order __m1, memory_order __m2) volatile
317 __glibcxx_assert(__m2 != memory_order_release);
318 __glibcxx_assert(__m2 != memory_order_acq_rel);
319 __glibcxx_assert(__m2 <= __m1);
321 const void* __v1o = __v1;
322 const void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
324 // Assume extra stores (of same value) allowed in true case.
326 return __v1o == __v1n;
330 compare_exchange_strong(const void*& __v1, const void* __v2,
331 memory_order __m = memory_order_seq_cst)
333 return compare_exchange_strong(__v1, __v2, __m,
334 __calculate_memory_order(__m));
338 compare_exchange_strong(const void*& __v1, const void* __v2,
339 memory_order __m = memory_order_seq_cst) volatile
341 return compare_exchange_strong(__v1, __v2, __m,
342 __calculate_memory_order(__m));
346 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
347 { return __sync_fetch_and_add(&_M_i, __d); }
350 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
351 { return __sync_fetch_and_add(&_M_i, __d); }
354 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
355 { return __sync_fetch_and_sub(&_M_i, __d); }
358 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
359 { return __sync_fetch_and_sub(&_M_i, __d); }
361 operator void*() const
364 operator void*() const volatile
369 // XXX as specified but won't compile as store takes void*,
370 // invalid conversion from const void* to void*
371 // CD1 had this signature
372 operator=(const void* __v)
383 // XXX as specified but won't compile as store takes void*,
384 // invalid conversion from const void* to void*
385 // CD1 had this signature, but store and this could both be const void*?
386 operator=(const void* __v) volatile
388 operator=(void* __v) volatile
396 operator+=(ptrdiff_t __d)
397 { return __sync_add_and_fetch(&_M_i, __d); }
400 operator+=(ptrdiff_t __d) volatile
401 { return __sync_add_and_fetch(&_M_i, __d); }
404 operator-=(ptrdiff_t __d)
405 { return __sync_sub_and_fetch(&_M_i, __d); }
408 operator-=(ptrdiff_t __d) volatile
409 { return __sync_sub_and_fetch(&_M_i, __d); }
413 /// Base class for atomic integrals.
415 // For each of the integral types, define atomic_[integral type] struct
419 // atomic_schar signed char
420 // atomic_uchar unsigned char
421 // atomic_short short
422 // atomic_ushort unsigned short
424 // atomic_uint unsigned int
426 // atomic_ulong unsigned long
427 // atomic_llong long long
428 // atomic_ullong unsigned long long
429 // atomic_char16_t char16_t
430 // atomic_char32_t char32_t
431 // atomic_wchar_t wchar_t
433 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
434 // 8 bytes, since that is what GCC built-in functions for atomic
435 // memory access expect.
436 template<typename _ITp>
440 typedef _ITp __int_type;
445 __atomic_base() = default;
446 ~__atomic_base() = default;
447 __atomic_base(const __atomic_base&) = delete;
448 __atomic_base& operator=(const __atomic_base&) = delete;
449 __atomic_base& operator=(const __atomic_base&) volatile = delete;
451 // Requires __int_type convertible to _M_i.
452 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
454 operator __int_type() const
457 operator __int_type() const volatile
461 operator=(__int_type __i)
468 operator=(__int_type __i) volatile
476 { return fetch_add(1); }
479 operator++(int) volatile
480 { return fetch_add(1); }
484 { return fetch_sub(1); }
487 operator--(int) volatile
488 { return fetch_sub(1); }
492 { return __sync_add_and_fetch(&_M_i, 1); }
495 operator++() volatile
496 { return __sync_add_and_fetch(&_M_i, 1); }
500 { return __sync_sub_and_fetch(&_M_i, 1); }
503 operator--() volatile
504 { return __sync_sub_and_fetch(&_M_i, 1); }
507 operator+=(__int_type __i)
508 { return __sync_add_and_fetch(&_M_i, __i); }
511 operator+=(__int_type __i) volatile
512 { return __sync_add_and_fetch(&_M_i, __i); }
515 operator-=(__int_type __i)
516 { return __sync_sub_and_fetch(&_M_i, __i); }
519 operator-=(__int_type __i) volatile
520 { return __sync_sub_and_fetch(&_M_i, __i); }
523 operator&=(__int_type __i)
524 { return __sync_and_and_fetch(&_M_i, __i); }
527 operator&=(__int_type __i) volatile
528 { return __sync_and_and_fetch(&_M_i, __i); }
531 operator|=(__int_type __i)
532 { return __sync_or_and_fetch(&_M_i, __i); }
535 operator|=(__int_type __i) volatile
536 { return __sync_or_and_fetch(&_M_i, __i); }
539 operator^=(__int_type __i)
540 { return __sync_xor_and_fetch(&_M_i, __i); }
543 operator^=(__int_type __i) volatile
544 { return __sync_xor_and_fetch(&_M_i, __i); }
551 is_lock_free() const volatile
555 store(__int_type __i, memory_order __m = memory_order_seq_cst)
557 __glibcxx_assert(__m != memory_order_acquire);
558 __glibcxx_assert(__m != memory_order_acq_rel);
559 __glibcxx_assert(__m != memory_order_consume);
561 if (__m == memory_order_relaxed)
565 // write_mem_barrier();
567 if (__m == memory_order_seq_cst)
568 __sync_synchronize();
573 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
575 __glibcxx_assert(__m != memory_order_acquire);
576 __glibcxx_assert(__m != memory_order_acq_rel);
577 __glibcxx_assert(__m != memory_order_consume);
579 if (__m == memory_order_relaxed)
583 // write_mem_barrier();
585 if (__m == memory_order_seq_cst)
586 __sync_synchronize();
591 load(memory_order __m = memory_order_seq_cst) const
593 __glibcxx_assert(__m != memory_order_release);
594 __glibcxx_assert(__m != memory_order_acq_rel);
596 __sync_synchronize();
597 __int_type __ret = _M_i;
598 __sync_synchronize();
603 load(memory_order __m = memory_order_seq_cst) const volatile
605 __glibcxx_assert(__m != memory_order_release);
606 __glibcxx_assert(__m != memory_order_acq_rel);
608 __sync_synchronize();
609 __int_type __ret = _M_i;
610 __sync_synchronize();
615 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
617 // XXX built-in assumes memory_order_acquire.
618 return __sync_lock_test_and_set(&_M_i, __i);
623 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
625 // XXX built-in assumes memory_order_acquire.
626 return __sync_lock_test_and_set(&_M_i, __i);
630 compare_exchange_weak(__int_type& __i1, __int_type __i2,
631 memory_order __m1, memory_order __m2)
632 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
635 compare_exchange_weak(__int_type& __i1, __int_type __i2,
636 memory_order __m1, memory_order __m2) volatile
637 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
640 compare_exchange_weak(__int_type& __i1, __int_type __i2,
641 memory_order __m = memory_order_seq_cst)
643 return compare_exchange_weak(__i1, __i2, __m,
644 __calculate_memory_order(__m));
648 compare_exchange_weak(__int_type& __i1, __int_type __i2,
649 memory_order __m = memory_order_seq_cst) volatile
651 return compare_exchange_weak(__i1, __i2, __m,
652 __calculate_memory_order(__m));
656 compare_exchange_strong(__int_type& __i1, __int_type __i2,
657 memory_order __m1, memory_order __m2)
659 __glibcxx_assert(__m2 != memory_order_release);
660 __glibcxx_assert(__m2 != memory_order_acq_rel);
661 __glibcxx_assert(__m2 <= __m1);
663 __int_type __i1o = __i1;
664 __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
666 // Assume extra stores (of same value) allowed in true case.
668 return __i1o == __i1n;
672 compare_exchange_strong(__int_type& __i1, __int_type __i2,
673 memory_order __m1, memory_order __m2) volatile
675 __glibcxx_assert(__m2 != memory_order_release);
676 __glibcxx_assert(__m2 != memory_order_acq_rel);
677 __glibcxx_assert(__m2 <= __m1);
679 __int_type __i1o = __i1;
680 __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
682 // Assume extra stores (of same value) allowed in true case.
684 return __i1o == __i1n;
688 compare_exchange_strong(__int_type& __i1, __int_type __i2,
689 memory_order __m = memory_order_seq_cst)
691 return compare_exchange_strong(__i1, __i2, __m,
692 __calculate_memory_order(__m));
696 compare_exchange_strong(__int_type& __i1, __int_type __i2,
697 memory_order __m = memory_order_seq_cst) volatile
699 return compare_exchange_strong(__i1, __i2, __m,
700 __calculate_memory_order(__m));
704 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
705 { return __sync_fetch_and_add(&_M_i, __i); }
708 fetch_add(__int_type __i,
709 memory_order __m = memory_order_seq_cst) volatile
710 { return __sync_fetch_and_add(&_M_i, __i); }
713 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
714 { return __sync_fetch_and_sub(&_M_i, __i); }
717 fetch_sub(__int_type __i,
718 memory_order __m = memory_order_seq_cst) volatile
719 { return __sync_fetch_and_sub(&_M_i, __i); }
722 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
723 { return __sync_fetch_and_and(&_M_i, __i); }
726 fetch_and(__int_type __i,
727 memory_order __m = memory_order_seq_cst) volatile
728 { return __sync_fetch_and_and(&_M_i, __i); }
731 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
732 { return __sync_fetch_and_or(&_M_i, __i); }
735 fetch_or(__int_type __i,
736 memory_order __m = memory_order_seq_cst) volatile
737 { return __sync_fetch_and_or(&_M_i, __i); }
740 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
741 { return __sync_fetch_and_xor(&_M_i, __i); }
744 fetch_xor(__int_type __i,
745 memory_order __m = memory_order_seq_cst) volatile
746 { return __sync_fetch_and_xor(&_M_i, __i); }
748 } // namespace __atomic2
750 _GLIBCXX_END_NAMESPACE