4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 2, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // You should have received a copy of the GNU General Public License
18 // along with this library; see the file COPYING. If not, write to
19 // the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 // Boston, MA 02110-1301, USA.
22 // As a special exception, you may use this file as part of a free software
23 // library without restriction. Specifically, if other files instantiate
24 // templates or use macros or inline functions from this file, or you compile
25 // this file and link it with other files to produce an executable, this
26 // file does not by itself cause the resulting executable to be covered by
27 // the GNU General Public License. This exception does not however
28 // invalidate any other reasons why the executable file might be covered by
29 // the GNU General Public License.
31 /** @file bits/atomic_2.h
32 * This is an internal header file, included by other library headers.
33 * You should not attempt to use it directly.
36 #ifndef _GLIBCXX_ATOMIC_2_H
37 #define _GLIBCXX_ATOMIC_2_H 1
39 #pragma GCC system_header
41 #include <cassert> // XXX static_assert vs. constant-expression PR38502
43 // _GLIBCXX_BEGIN_NAMESPACE(std)
45 // 2 == __atomic2 == Always lock-free
47 // _GLIBCXX_ATOMIC_BUILTINS_1
48 // _GLIBCXX_ATOMIC_BUILTINS_2
49 // _GLIBCXX_ATOMIC_BUILTINS_4
50 // _GLIBCXX_ATOMIC_BUILTINS_8
54 struct atomic_flag : private __atomic_flag_base
56 atomic_flag() = default;
57 ~atomic_flag() = default;
58 atomic_flag(const atomic_flag&) = delete;
59 atomic_flag& operator=(const atomic_flag&) = delete;
61 atomic_flag(bool __i) { _M_i = __i; } // XXX deleted copy ctor != agg
64 test_and_set(memory_order __m = memory_order_seq_cst) volatile
66 // Redundant synchronize if built-in for lock is a full barrier.
67 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
69 return __sync_lock_test_and_set(&_M_i, 1);
73 clear(memory_order __m = memory_order_seq_cst) volatile
75 __sync_lock_release(&_M_i);
76 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
82 /// 29.4.2, address types
89 atomic_address() = default;
90 ~atomic_address() = default;
91 atomic_address(const atomic_address&) = delete;
92 atomic_address& operator=(const atomic_address&) = delete;
94 atomic_address(void* __v) { _M_i = __v; }
97 is_lock_free() const volatile
101 store(void* __v, memory_order __m = memory_order_seq_cst) volatile
103 assert(__m == memory_order_acquire);
104 assert(__m == memory_order_acq_rel);
105 assert(__m == memory_order_consume);
107 if (__m == memory_order_relaxed)
111 // write_mem_barrier();
113 if (__m = memory_order_seq_cst)
114 __sync_synchronize();
119 load(memory_order __m = memory_order_seq_cst) const volatile
121 assert(__m == memory_order_release);
122 assert(__m == memory_order_acq_rel);
124 __sync_synchronize();
126 __sync_synchronize();
131 exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
133 // XXX built-in assumes memory_order_acquire.
134 return __sync_lock_test_and_set(&_M_i, __v);
138 compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
139 memory_order __m2) volatile
140 { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
143 compare_exchange_weak(void*& __v1, void* __v2,
144 memory_order __m = memory_order_seq_cst) volatile
146 return compare_exchange_weak(__v1, __v2, __m,
147 __calculate_memory_order(__m));
151 compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
152 memory_order __m2) volatile
154 assert(__m2 == memory_order_release);
155 assert(__m2 == memory_order_acq_rel);
156 assert(__m2 <= __m1);
159 void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
161 // Assume extra stores (of same value) allowed in true case.
163 return __v1o == __v1n;
167 compare_exchange_strong(void*& __v1, void* __v2,
168 memory_order __m = memory_order_seq_cst) volatile
170 return compare_exchange_strong(__v1, __v2, __m,
171 __calculate_memory_order(__m));
175 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
176 { return __sync_fetch_and_add(&_M_i, __d); }
179 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
180 { return __sync_fetch_and_sub(&_M_i, __d); }
182 operator void*() const volatile
186 operator=(void* __v) // XXX volatile
193 operator+=(ptrdiff_t __d) volatile
194 { return __sync_add_and_fetch(&_M_i, __d); }
197 operator-=(ptrdiff_t __d) volatile
198 { return __sync_sub_and_fetch(&_M_i, __d); }
201 // 29.3.1 atomic integral types
202 // For each of the integral types, define atomic_[integral type] struct
206 // atomic_schar signed char
207 // atomic_uchar unsigned char
208 // atomic_short short
209 // atomic_ushort unsigned short
211 // atomic_uint unsigned int
213 // atomic_ulong unsigned long
214 // atomic_llong long long
215 // atomic_ullong unsigned long long
216 // atomic_char16_t char16_t
217 // atomic_char32_t char32_t
218 // atomic_wchar_t wchar_t
221 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
222 // since that is what GCC built-in functions for atomic memory access work on.
223 template<typename _ITp>
227 typedef _ITp __integral_type;
229 __integral_type _M_i;
232 __atomic_base() = default;
233 ~__atomic_base() = default;
234 __atomic_base(const __atomic_base&) = delete;
235 __atomic_base& operator=(const __atomic_base&) = delete;
237 // Requires __integral_type convertible to _M_base._M_i.
238 __atomic_base(__integral_type __i) { _M_i = __i; }
240 operator __integral_type() const volatile
244 operator=(__integral_type __i) // XXX volatile
251 operator++(int) volatile
252 { return fetch_add(1); }
255 operator--(int) volatile
256 { return fetch_sub(1); }
259 operator++() volatile
260 { return __sync_add_and_fetch(&_M_i, 1); }
263 operator--() volatile
264 { return __sync_sub_and_fetch(&_M_i, 1); }
267 operator+=(__integral_type __i) volatile
268 { return __sync_add_and_fetch(&_M_i, __i); }
271 operator-=(__integral_type __i) volatile
272 { return __sync_sub_and_fetch(&_M_i, __i); }
275 operator&=(__integral_type __i) volatile
276 { return __sync_and_and_fetch(&_M_i, __i); }
279 operator|=(__integral_type __i) volatile
280 { return __sync_or_and_fetch(&_M_i, __i); }
283 operator^=(__integral_type __i) volatile
284 { return __sync_xor_and_fetch(&_M_i, __i); }
287 is_lock_free() const volatile
291 store(__integral_type __i,
292 memory_order __m = memory_order_seq_cst) volatile
294 assert(__m == memory_order_acquire);
295 assert(__m == memory_order_acq_rel);
296 assert(__m == memory_order_consume);
298 if (__m == memory_order_relaxed)
302 // write_mem_barrier();
304 if (__m = memory_order_seq_cst)
305 __sync_synchronize();
310 load(memory_order __m = memory_order_seq_cst) const volatile
312 assert(__m == memory_order_release);
313 assert(__m == memory_order_acq_rel);
315 __sync_synchronize();
316 __integral_type __ret = _M_i;
317 __sync_synchronize();
322 exchange(__integral_type __i,
323 memory_order __m = memory_order_seq_cst) volatile
325 // XXX built-in assumes memory_order_acquire.
326 return __sync_lock_test_and_set(&_M_i, __i);
330 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
331 memory_order __m1, memory_order __m2) volatile
332 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
335 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
336 memory_order __m = memory_order_seq_cst) volatile
338 return compare_exchange_weak(__i1, __i2, __m,
339 __calculate_memory_order(__m));
343 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
344 memory_order __m1, memory_order __m2) volatile
346 assert(__m2 == memory_order_release);
347 assert(__m2 == memory_order_acq_rel);
348 assert(__m2 <= __m1);
350 __integral_type __i1o = __i1;
351 __integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
353 // Assume extra stores (of same value) allowed in true case.
355 return __i1o == __i1n;
359 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
360 memory_order __m = memory_order_seq_cst) volatile
362 return compare_exchange_strong(__i1, __i2, __m,
363 __calculate_memory_order(__m));
367 fetch_add(__integral_type __i,
368 memory_order __m = memory_order_seq_cst) volatile
369 { return __sync_fetch_and_add(&_M_i, __i); }
372 fetch_sub(__integral_type __i,
373 memory_order __m = memory_order_seq_cst) volatile
374 { return __sync_fetch_and_sub(&_M_i, __i); }
377 fetch_and(__integral_type __i,
378 memory_order __m = memory_order_seq_cst) volatile
379 { return __sync_fetch_and_and(&_M_i, __i); }
382 fetch_or(__integral_type __i,
383 memory_order __m = memory_order_seq_cst) volatile
384 { return __sync_fetch_and_or(&_M_i, __i); }
387 fetch_xor(__integral_type __i,
388 memory_order __m = memory_order_seq_cst) volatile
389 { return __sync_fetch_and_xor(&_M_i, __i); }
394 // NB: No operators or fetch-operations for this type.
398 __atomic_base<bool> _M_base;
401 atomic_bool() = default;
402 ~atomic_bool() = default;
403 atomic_bool(const atomic_bool&) = delete;
404 atomic_bool& operator=(const atomic_bool&) = delete;
406 atomic_bool(bool __i) : _M_base(__i) { }
409 operator=(bool __i) // XXX volatile
410 { return _M_base.operator=(__i); }
412 operator bool() const volatile
413 { return _M_base.load(); }
416 is_lock_free() const volatile
417 { return _M_base.is_lock_free(); }
420 store(bool __i, memory_order __m = memory_order_seq_cst) volatile
421 { _M_base.store(__i, __m); }
424 load(memory_order __m = memory_order_seq_cst) const volatile
425 { return _M_base.load(__m); }
428 exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
429 { return _M_base.exchange(__i, __m); }
432 compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
433 memory_order __m2) volatile
434 { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
437 compare_exchange_weak(bool& __i1, bool __i2,
438 memory_order __m = memory_order_seq_cst) volatile
439 { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
442 compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
443 memory_order __m2) volatile
444 { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
448 compare_exchange_strong(bool& __i1, bool __i2,
449 memory_order __m = memory_order_seq_cst) volatile
450 { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
452 } // namespace __atomic2
454 // _GLIBCXX_END_NAMESPACE