3 // Copyright (C) 2008, 2009, 2010, 2011
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file bits/atomic_2.h
27 * This is an internal header file, included by other library headers.
28 * Do not attempt to use it directly. @headername{atomic}
31 #ifndef _GLIBCXX_ATOMIC_2_H
32 #define _GLIBCXX_ATOMIC_2_H 1
34 #pragma GCC system_header
36 namespace std _GLIBCXX_VISIBILITY(default)
38 _GLIBCXX_BEGIN_NAMESPACE_VERSION
40 // 2 == __atomic2 == Always lock-free
42 // _GLIBCXX_ATOMIC_BUILTINS_1
43 // _GLIBCXX_ATOMIC_BUILTINS_2
44 // _GLIBCXX_ATOMIC_BUILTINS_4
45 // _GLIBCXX_ATOMIC_BUILTINS_8
49 struct atomic_flag : public __atomic_flag_base
51 atomic_flag() = default;
52 ~atomic_flag() = default;
53 atomic_flag(const atomic_flag&) = delete;
54 atomic_flag& operator=(const atomic_flag&) = delete;
55 atomic_flag& operator=(const atomic_flag&) volatile = delete;
57 // Conversion to ATOMIC_FLAG_INIT.
58 atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
61 test_and_set(memory_order __m = memory_order_seq_cst)
63 // Redundant synchronize if built-in for lock is a full barrier.
64 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
66 return __sync_lock_test_and_set(&_M_i, 1);
70 test_and_set(memory_order __m = memory_order_seq_cst) volatile
72 // Redundant synchronize if built-in for lock is a full barrier.
73 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
75 return __sync_lock_test_and_set(&_M_i, 1);
79 clear(memory_order __m = memory_order_seq_cst)
81 __glibcxx_assert(__m != memory_order_consume);
82 __glibcxx_assert(__m != memory_order_acquire);
83 __glibcxx_assert(__m != memory_order_acq_rel);
85 __sync_lock_release(&_M_i);
86 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
91 clear(memory_order __m = memory_order_seq_cst) volatile
93 __glibcxx_assert(__m != memory_order_consume);
94 __glibcxx_assert(__m != memory_order_acquire);
95 __glibcxx_assert(__m != memory_order_acq_rel);
97 __sync_lock_release(&_M_i);
98 if (__m != memory_order_acquire && __m != memory_order_acq_rel)
104 /// Base class for atomic integrals.
106 // For each of the integral types, define atomic_[integral type] struct
110 // atomic_schar signed char
111 // atomic_uchar unsigned char
112 // atomic_short short
113 // atomic_ushort unsigned short
115 // atomic_uint unsigned int
117 // atomic_ulong unsigned long
118 // atomic_llong long long
119 // atomic_ullong unsigned long long
120 // atomic_char16_t char16_t
121 // atomic_char32_t char32_t
122 // atomic_wchar_t wchar_t
124 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
125 // 8 bytes, since that is what GCC built-in functions for atomic
126 // memory access expect.
127 template<typename _ITp>
131 typedef _ITp __int_type;
136 __atomic_base() = default;
137 ~__atomic_base() = default;
138 __atomic_base(const __atomic_base&) = delete;
139 __atomic_base& operator=(const __atomic_base&) = delete;
140 __atomic_base& operator=(const __atomic_base&) volatile = delete;
142 // Requires __int_type convertible to _M_i.
143 constexpr __atomic_base(__int_type __i): _M_i (__i) { }
145 operator __int_type() const
148 operator __int_type() const volatile
152 operator=(__int_type __i)
159 operator=(__int_type __i) volatile
167 { return fetch_add(1); }
170 operator++(int) volatile
171 { return fetch_add(1); }
175 { return fetch_sub(1); }
178 operator--(int) volatile
179 { return fetch_sub(1); }
183 { return __sync_add_and_fetch(&_M_i, 1); }
186 operator++() volatile
187 { return __sync_add_and_fetch(&_M_i, 1); }
191 { return __sync_sub_and_fetch(&_M_i, 1); }
194 operator--() volatile
195 { return __sync_sub_and_fetch(&_M_i, 1); }
198 operator+=(__int_type __i)
199 { return __sync_add_and_fetch(&_M_i, __i); }
202 operator+=(__int_type __i) volatile
203 { return __sync_add_and_fetch(&_M_i, __i); }
206 operator-=(__int_type __i)
207 { return __sync_sub_and_fetch(&_M_i, __i); }
210 operator-=(__int_type __i) volatile
211 { return __sync_sub_and_fetch(&_M_i, __i); }
214 operator&=(__int_type __i)
215 { return __sync_and_and_fetch(&_M_i, __i); }
218 operator&=(__int_type __i) volatile
219 { return __sync_and_and_fetch(&_M_i, __i); }
222 operator|=(__int_type __i)
223 { return __sync_or_and_fetch(&_M_i, __i); }
226 operator|=(__int_type __i) volatile
227 { return __sync_or_and_fetch(&_M_i, __i); }
230 operator^=(__int_type __i)
231 { return __sync_xor_and_fetch(&_M_i, __i); }
234 operator^=(__int_type __i) volatile
235 { return __sync_xor_and_fetch(&_M_i, __i); }
242 is_lock_free() const volatile
246 store(__int_type __i, memory_order __m = memory_order_seq_cst)
248 __glibcxx_assert(__m != memory_order_acquire);
249 __glibcxx_assert(__m != memory_order_acq_rel);
250 __glibcxx_assert(__m != memory_order_consume);
252 if (__m == memory_order_relaxed)
256 // write_mem_barrier();
258 if (__m == memory_order_seq_cst)
259 __sync_synchronize();
264 store(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
266 __glibcxx_assert(__m != memory_order_acquire);
267 __glibcxx_assert(__m != memory_order_acq_rel);
268 __glibcxx_assert(__m != memory_order_consume);
270 if (__m == memory_order_relaxed)
274 // write_mem_barrier();
276 if (__m == memory_order_seq_cst)
277 __sync_synchronize();
282 load(memory_order __m = memory_order_seq_cst) const
284 __glibcxx_assert(__m != memory_order_release);
285 __glibcxx_assert(__m != memory_order_acq_rel);
287 __sync_synchronize();
288 __int_type __ret = _M_i;
289 __sync_synchronize();
294 load(memory_order __m = memory_order_seq_cst) const volatile
296 __glibcxx_assert(__m != memory_order_release);
297 __glibcxx_assert(__m != memory_order_acq_rel);
299 __sync_synchronize();
300 __int_type __ret = _M_i;
301 __sync_synchronize();
306 exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
308 // XXX built-in assumes memory_order_acquire.
309 return __sync_lock_test_and_set(&_M_i, __i);
314 exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
316 // XXX built-in assumes memory_order_acquire.
317 return __sync_lock_test_and_set(&_M_i, __i);
321 compare_exchange_weak(__int_type& __i1, __int_type __i2,
322 memory_order __m1, memory_order __m2)
323 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
326 compare_exchange_weak(__int_type& __i1, __int_type __i2,
327 memory_order __m1, memory_order __m2) volatile
328 { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
331 compare_exchange_weak(__int_type& __i1, __int_type __i2,
332 memory_order __m = memory_order_seq_cst)
334 return compare_exchange_weak(__i1, __i2, __m,
335 __calculate_memory_order(__m));
339 compare_exchange_weak(__int_type& __i1, __int_type __i2,
340 memory_order __m = memory_order_seq_cst) volatile
342 return compare_exchange_weak(__i1, __i2, __m,
343 __calculate_memory_order(__m));
347 compare_exchange_strong(__int_type& __i1, __int_type __i2,
348 memory_order __m1, memory_order __m2)
350 __glibcxx_assert(__m2 != memory_order_release);
351 __glibcxx_assert(__m2 != memory_order_acq_rel);
352 __glibcxx_assert(__m2 <= __m1);
354 __int_type __i1o = __i1;
355 __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
357 // Assume extra stores (of same value) allowed in true case.
359 return __i1o == __i1n;
363 compare_exchange_strong(__int_type& __i1, __int_type __i2,
364 memory_order __m1, memory_order __m2) volatile
366 __glibcxx_assert(__m2 != memory_order_release);
367 __glibcxx_assert(__m2 != memory_order_acq_rel);
368 __glibcxx_assert(__m2 <= __m1);
370 __int_type __i1o = __i1;
371 __int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
373 // Assume extra stores (of same value) allowed in true case.
375 return __i1o == __i1n;
379 compare_exchange_strong(__int_type& __i1, __int_type __i2,
380 memory_order __m = memory_order_seq_cst)
382 return compare_exchange_strong(__i1, __i2, __m,
383 __calculate_memory_order(__m));
387 compare_exchange_strong(__int_type& __i1, __int_type __i2,
388 memory_order __m = memory_order_seq_cst) volatile
390 return compare_exchange_strong(__i1, __i2, __m,
391 __calculate_memory_order(__m));
395 fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
396 { return __sync_fetch_and_add(&_M_i, __i); }
399 fetch_add(__int_type __i,
400 memory_order __m = memory_order_seq_cst) volatile
401 { return __sync_fetch_and_add(&_M_i, __i); }
404 fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
405 { return __sync_fetch_and_sub(&_M_i, __i); }
408 fetch_sub(__int_type __i,
409 memory_order __m = memory_order_seq_cst) volatile
410 { return __sync_fetch_and_sub(&_M_i, __i); }
413 fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
414 { return __sync_fetch_and_and(&_M_i, __i); }
417 fetch_and(__int_type __i,
418 memory_order __m = memory_order_seq_cst) volatile
419 { return __sync_fetch_and_and(&_M_i, __i); }
422 fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
423 { return __sync_fetch_and_or(&_M_i, __i); }
426 fetch_or(__int_type __i,
427 memory_order __m = memory_order_seq_cst) volatile
428 { return __sync_fetch_and_or(&_M_i, __i); }
431 fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
432 { return __sync_fetch_and_xor(&_M_i, __i); }
435 fetch_xor(__int_type __i,
436 memory_order __m = memory_order_seq_cst) volatile
437 { return __sync_fetch_and_xor(&_M_i, __i); }
441 /// Partial specialization for pointer types.
442 template<typename _PTp>
443 struct __atomic_base<_PTp*>
446 typedef _PTp* __pointer_type;
451 __atomic_base() = default;
452 ~__atomic_base() = default;
453 __atomic_base(const __atomic_base&) = delete;
454 __atomic_base& operator=(const __atomic_base&) = delete;
455 __atomic_base& operator=(const __atomic_base&) volatile = delete;
457 // Requires __pointer_type convertible to _M_p.
458 constexpr __atomic_base(__pointer_type __p): _M_p (__p) { }
460 operator __pointer_type() const
463 operator __pointer_type() const volatile
467 operator=(__pointer_type __p)
474 operator=(__pointer_type __p) volatile
482 { return fetch_add(1); }
485 operator++(int) volatile
486 { return fetch_add(1); }
490 { return fetch_sub(1); }
493 operator--(int) volatile
494 { return fetch_sub(1); }
498 { return fetch_add(1) + 1; }
501 operator++() volatile
502 { return fetch_add(1) + 1; }
506 { return fetch_sub(1) -1; }
509 operator--() volatile
510 { return fetch_sub(1) -1; }
513 operator+=(ptrdiff_t __d)
514 { return fetch_add(__d) + __d; }
517 operator+=(ptrdiff_t __d) volatile
518 { return fetch_add(__d) + __d; }
521 operator-=(ptrdiff_t __d)
522 { return fetch_sub(__d) - __d; }
525 operator-=(ptrdiff_t __d) volatile
526 { return fetch_sub(__d) - __d; }
533 is_lock_free() const volatile
537 store(__pointer_type __p, memory_order __m = memory_order_seq_cst)
539 __glibcxx_assert(__m != memory_order_acquire);
540 __glibcxx_assert(__m != memory_order_acq_rel);
541 __glibcxx_assert(__m != memory_order_consume);
543 if (__m == memory_order_relaxed)
547 // write_mem_barrier();
549 if (__m == memory_order_seq_cst)
550 __sync_synchronize();
555 store(__pointer_type __p,
556 memory_order __m = memory_order_seq_cst) volatile
558 __glibcxx_assert(__m != memory_order_acquire);
559 __glibcxx_assert(__m != memory_order_acq_rel);
560 __glibcxx_assert(__m != memory_order_consume);
562 if (__m == memory_order_relaxed)
566 // write_mem_barrier();
568 if (__m == memory_order_seq_cst)
569 __sync_synchronize();
574 load(memory_order __m = memory_order_seq_cst) const
576 __glibcxx_assert(__m != memory_order_release);
577 __glibcxx_assert(__m != memory_order_acq_rel);
579 __sync_synchronize();
580 __pointer_type __ret = _M_p;
581 __sync_synchronize();
586 load(memory_order __m = memory_order_seq_cst) const volatile
588 __glibcxx_assert(__m != memory_order_release);
589 __glibcxx_assert(__m != memory_order_acq_rel);
591 __sync_synchronize();
592 __pointer_type __ret = _M_p;
593 __sync_synchronize();
598 exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
600 // XXX built-in assumes memory_order_acquire.
601 return __sync_lock_test_and_set(&_M_p, __p);
606 exchange(__pointer_type __p,
607 memory_order __m = memory_order_seq_cst) volatile
609 // XXX built-in assumes memory_order_acquire.
610 return __sync_lock_test_and_set(&_M_p, __p);
614 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
615 memory_order __m1, memory_order __m2)
617 __glibcxx_assert(__m2 != memory_order_release);
618 __glibcxx_assert(__m2 != memory_order_acq_rel);
619 __glibcxx_assert(__m2 <= __m1);
621 __pointer_type __p1o = __p1;
622 __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
624 // Assume extra stores (of same value) allowed in true case.
626 return __p1o == __p1n;
630 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
631 memory_order __m1, memory_order __m2) volatile
633 __glibcxx_assert(__m2 != memory_order_release);
634 __glibcxx_assert(__m2 != memory_order_acq_rel);
635 __glibcxx_assert(__m2 <= __m1);
637 __pointer_type __p1o = __p1;
638 __pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
640 // Assume extra stores (of same value) allowed in true case.
642 return __p1o == __p1n;
646 fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
647 { return __sync_fetch_and_add(&_M_p, __d); }
650 fetch_add(ptrdiff_t __d,
651 memory_order __m = memory_order_seq_cst) volatile
652 { return __sync_fetch_and_add(&_M_p, __d); }
655 fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
656 { return __sync_fetch_and_sub(&_M_p, __d); }
659 fetch_sub(ptrdiff_t __d,
660 memory_order __m = memory_order_seq_cst) volatile
661 { return __sync_fetch_and_sub(&_M_p, __d); }
664 } // namespace __atomic2
666 _GLIBCXX_END_NAMESPACE_VERSION