libstdc++
atomic_base.h
Go to the documentation of this file.
1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2024 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
37#include <bits/c++config.h>
38#include <new> // For placement new
40#include <bits/move.h>
41
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
43#include <bits/atomic_wait.h>
44#endif
45
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48#endif
49
50#include <bits/version.h>
51
52namespace std _GLIBCXX_VISIBILITY(default)
53{
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
55
56 /**
57 * @defgroup atomics Atomics
58 *
59 * Components for performing atomic operations.
60 * @{
61 */
62
63 /// Enumeration for memory_order
64#if __cplusplus > 201703L
65 enum class memory_order : int
66 {
67 relaxed,
68 consume,
69 acquire,
70 release,
71 acq_rel,
72 seq_cst
73 };
74
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
81#else
82 enum memory_order : int
83 {
84 memory_order_relaxed,
85 memory_order_consume,
86 memory_order_acquire,
87 memory_order_release,
88 memory_order_acq_rel,
89 memory_order_seq_cst
90 };
91#endif
92
93 /// @cond undocumented
94 enum __memory_order_modifier
95 {
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
100 };
101 /// @endcond
102
103 constexpr memory_order
104 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
105 {
106 return memory_order(int(__m) | int(__mod));
107 }
108
109 constexpr memory_order
110 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
111 {
112 return memory_order(int(__m) & int(__mod));
113 }
114
115 /// @cond undocumented
116
117 // Drop release ordering as per [atomics.types.operations.req]/21
118 constexpr memory_order
119 __cmpexch_failure_order2(memory_order __m) noexcept
120 {
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
123 }
124
125 constexpr memory_order
126 __cmpexch_failure_order(memory_order __m) noexcept
127 {
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
130 }
131
132 constexpr bool
133 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
134 {
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
137 }
138
139 // Base types for atomics.
140 template<typename _IntTp>
141 struct __atomic_base;
142
143 /// @endcond
144
145 _GLIBCXX_ALWAYS_INLINE void
146 atomic_thread_fence(memory_order __m) noexcept
147 { __atomic_thread_fence(int(__m)); }
148
149 _GLIBCXX_ALWAYS_INLINE void
150 atomic_signal_fence(memory_order __m) noexcept
151 { __atomic_signal_fence(int(__m)); }
152
153 /// kill_dependency
154 template<typename _Tp>
155 inline _Tp
156 kill_dependency(_Tp __y) noexcept
157 {
158 _Tp __ret(__y);
159 return __ret;
160 }
161
162/// @cond undocumented
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
165#else
166# define _GLIBCXX20_INIT(I)
167#endif
168/// @endcond
169
170#define ATOMIC_VAR_INIT(_VI) { _VI }
171
172 template<typename _Tp>
173 struct atomic;
174
175 template<typename _Tp>
176 struct atomic<_Tp*>;
177
178 /* The target's "set" value for test-and-set may not be exactly 1. */
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
181#else
182 typedef unsigned char __atomic_flag_data_type;
183#endif
184
185 /// @cond undocumented
186
187 /*
188 * Base type for atomic_flag.
189 *
190 * Base type is POD with data, allowing atomic_flag to derive from
191 * it and meet the standard layout type requirement. In addition to
192 * compatibility with a C interface, this allows different
193 * implementations of atomic_flag to use the same atomic operation
194 * functions, via a standard conversion to the __atomic_flag_base
195 * argument.
196 */
197 _GLIBCXX_BEGIN_EXTERN_C
198
199 struct __atomic_flag_base
200 {
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202 };
203
204 _GLIBCXX_END_EXTERN_C
205
206 /// @endcond
207
208#define ATOMIC_FLAG_INIT { 0 }
209
210 /// atomic_flag
211 struct atomic_flag : public __atomic_flag_base
212 {
213 atomic_flag() noexcept = default;
214 ~atomic_flag() noexcept = default;
215 atomic_flag(const atomic_flag&) = delete;
216 atomic_flag& operator=(const atomic_flag&) = delete;
217 atomic_flag& operator=(const atomic_flag&) volatile = delete;
218
219 // Conversion to ATOMIC_FLAG_INIT.
220 constexpr atomic_flag(bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
222 { }
223
224 _GLIBCXX_ALWAYS_INLINE bool
225 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
226 {
227 return __atomic_test_and_set (&_M_i, int(__m));
228 }
229
230 _GLIBCXX_ALWAYS_INLINE bool
231 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
232 {
233 return __atomic_test_and_set (&_M_i, int(__m));
234 }
235
236#ifdef __glibcxx_atomic_flag_test // C++ >= 20
237 _GLIBCXX_ALWAYS_INLINE bool
238 test(memory_order __m = memory_order_seq_cst) const noexcept
239 {
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v, int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243 }
244
245 _GLIBCXX_ALWAYS_INLINE bool
246 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
247 {
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v, int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
251 }
252#endif
253
254#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
255 _GLIBCXX_ALWAYS_INLINE void
256 wait(bool __old,
257 memory_order __m = memory_order_seq_cst) const noexcept
258 {
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
264 }
265
266 // TODO add const volatile overload
267
268 _GLIBCXX_ALWAYS_INLINE void
269 notify_one() noexcept
270 { std::__atomic_notify_address(&_M_i, false); }
271
272 // TODO add const volatile overload
273
274 _GLIBCXX_ALWAYS_INLINE void
275 notify_all() noexcept
276 { std::__atomic_notify_address(&_M_i, true); }
277
278 // TODO add const volatile overload
279#endif // __glibcxx_atomic_wait
280
281 _GLIBCXX_ALWAYS_INLINE void
282 clear(memory_order __m = memory_order_seq_cst) noexcept
283 {
284 memory_order __b __attribute__ ((__unused__))
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
289
290 __atomic_clear (&_M_i, int(__m));
291 }
292
293 _GLIBCXX_ALWAYS_INLINE void
294 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
295 {
296 memory_order __b __attribute__ ((__unused__))
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
301
302 __atomic_clear (&_M_i, int(__m));
303 }
304
305 private:
306 static constexpr __atomic_flag_data_type
307 _S_init(bool __i)
308 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
309 };
310
311 /// @cond undocumented
312
313 /// Base class for atomic integrals.
314 //
315 // For each of the integral types, define atomic_[integral type] struct
316 //
317 // atomic_bool bool
318 // atomic_char char
319 // atomic_schar signed char
320 // atomic_uchar unsigned char
321 // atomic_short short
322 // atomic_ushort unsigned short
323 // atomic_int int
324 // atomic_uint unsigned int
325 // atomic_long long
326 // atomic_ulong unsigned long
327 // atomic_llong long long
328 // atomic_ullong unsigned long long
329 // atomic_char8_t char8_t
330 // atomic_char16_t char16_t
331 // atomic_char32_t char32_t
332 // atomic_wchar_t wchar_t
333 //
334 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
335 // 8 bytes, since that is what GCC built-in functions for atomic
336 // memory access expect.
337 template<typename _ITp>
338 struct __atomic_base
339 {
340 using value_type = _ITp;
341 using difference_type = value_type;
342
343 private:
344 typedef _ITp __int_type;
345
346 static constexpr int _S_alignment =
347 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
348
349 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
350
351 public:
352 __atomic_base() noexcept = default;
353 ~__atomic_base() noexcept = default;
354 __atomic_base(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) = delete;
356 __atomic_base& operator=(const __atomic_base&) volatile = delete;
357
358 // Requires __int_type convertible to _M_i.
359 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
360
361 operator __int_type() const noexcept
362 { return load(); }
363
364 operator __int_type() const volatile noexcept
365 { return load(); }
366
367 __int_type
368 operator=(__int_type __i) noexcept
369 {
370 store(__i);
371 return __i;
372 }
373
374 __int_type
375 operator=(__int_type __i) volatile noexcept
376 {
377 store(__i);
378 return __i;
379 }
380
381 __int_type
382 operator++(int) noexcept
383 { return fetch_add(1); }
384
385 __int_type
386 operator++(int) volatile noexcept
387 { return fetch_add(1); }
388
389 __int_type
390 operator--(int) noexcept
391 { return fetch_sub(1); }
392
393 __int_type
394 operator--(int) volatile noexcept
395 { return fetch_sub(1); }
396
397 __int_type
398 operator++() noexcept
399 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
400
401 __int_type
402 operator++() volatile noexcept
403 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
404
405 __int_type
406 operator--() noexcept
407 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
408
409 __int_type
410 operator--() volatile noexcept
411 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
412
413 __int_type
414 operator+=(__int_type __i) noexcept
415 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
416
417 __int_type
418 operator+=(__int_type __i) volatile noexcept
419 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
420
421 __int_type
422 operator-=(__int_type __i) noexcept
423 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
424
425 __int_type
426 operator-=(__int_type __i) volatile noexcept
427 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
428
429 __int_type
430 operator&=(__int_type __i) noexcept
431 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
432
433 __int_type
434 operator&=(__int_type __i) volatile noexcept
435 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
436
437 __int_type
438 operator|=(__int_type __i) noexcept
439 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
440
441 __int_type
442 operator|=(__int_type __i) volatile noexcept
443 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
444
445 __int_type
446 operator^=(__int_type __i) noexcept
447 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
448
449 __int_type
450 operator^=(__int_type __i) volatile noexcept
451 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
452
453 bool
454 is_lock_free() const noexcept
455 {
456 // Use a fake, minimally aligned pointer.
457 return __atomic_is_lock_free(sizeof(_M_i),
458 reinterpret_cast<void *>(-_S_alignment));
459 }
460
461 bool
462 is_lock_free() const volatile noexcept
463 {
464 // Use a fake, minimally aligned pointer.
465 return __atomic_is_lock_free(sizeof(_M_i),
466 reinterpret_cast<void *>(-_S_alignment));
467 }
468
469 _GLIBCXX_ALWAYS_INLINE void
470 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
471 {
472 memory_order __b __attribute__ ((__unused__))
473 = __m & __memory_order_mask;
474 __glibcxx_assert(__b != memory_order_acquire);
475 __glibcxx_assert(__b != memory_order_acq_rel);
476 __glibcxx_assert(__b != memory_order_consume);
477
478 __atomic_store_n(&_M_i, __i, int(__m));
479 }
480
481 _GLIBCXX_ALWAYS_INLINE void
482 store(__int_type __i,
483 memory_order __m = memory_order_seq_cst) volatile noexcept
484 {
485 memory_order __b __attribute__ ((__unused__))
486 = __m & __memory_order_mask;
487 __glibcxx_assert(__b != memory_order_acquire);
488 __glibcxx_assert(__b != memory_order_acq_rel);
489 __glibcxx_assert(__b != memory_order_consume);
490
491 __atomic_store_n(&_M_i, __i, int(__m));
492 }
493
494 _GLIBCXX_ALWAYS_INLINE __int_type
495 load(memory_order __m = memory_order_seq_cst) const noexcept
496 {
497 memory_order __b __attribute__ ((__unused__))
498 = __m & __memory_order_mask;
499 __glibcxx_assert(__b != memory_order_release);
500 __glibcxx_assert(__b != memory_order_acq_rel);
501
502 return __atomic_load_n(&_M_i, int(__m));
503 }
504
505 _GLIBCXX_ALWAYS_INLINE __int_type
506 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
507 {
508 memory_order __b __attribute__ ((__unused__))
509 = __m & __memory_order_mask;
510 __glibcxx_assert(__b != memory_order_release);
511 __glibcxx_assert(__b != memory_order_acq_rel);
512
513 return __atomic_load_n(&_M_i, int(__m));
514 }
515
516 _GLIBCXX_ALWAYS_INLINE __int_type
517 exchange(__int_type __i,
518 memory_order __m = memory_order_seq_cst) noexcept
519 {
520 return __atomic_exchange_n(&_M_i, __i, int(__m));
521 }
522
523
524 _GLIBCXX_ALWAYS_INLINE __int_type
525 exchange(__int_type __i,
526 memory_order __m = memory_order_seq_cst) volatile noexcept
527 {
528 return __atomic_exchange_n(&_M_i, __i, int(__m));
529 }
530
531 _GLIBCXX_ALWAYS_INLINE bool
532 compare_exchange_weak(__int_type& __i1, __int_type __i2,
533 memory_order __m1, memory_order __m2) noexcept
534 {
535 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
536
537 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
538 int(__m1), int(__m2));
539 }
540
541 _GLIBCXX_ALWAYS_INLINE bool
542 compare_exchange_weak(__int_type& __i1, __int_type __i2,
543 memory_order __m1,
544 memory_order __m2) volatile noexcept
545 {
546 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
547
548 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
549 int(__m1), int(__m2));
550 }
551
552 _GLIBCXX_ALWAYS_INLINE bool
553 compare_exchange_weak(__int_type& __i1, __int_type __i2,
554 memory_order __m = memory_order_seq_cst) noexcept
555 {
556 return compare_exchange_weak(__i1, __i2, __m,
557 __cmpexch_failure_order(__m));
558 }
559
560 _GLIBCXX_ALWAYS_INLINE bool
561 compare_exchange_weak(__int_type& __i1, __int_type __i2,
562 memory_order __m = memory_order_seq_cst) volatile noexcept
563 {
564 return compare_exchange_weak(__i1, __i2, __m,
565 __cmpexch_failure_order(__m));
566 }
567
568 _GLIBCXX_ALWAYS_INLINE bool
569 compare_exchange_strong(__int_type& __i1, __int_type __i2,
570 memory_order __m1, memory_order __m2) noexcept
571 {
572 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
573
574 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
575 int(__m1), int(__m2));
576 }
577
578 _GLIBCXX_ALWAYS_INLINE bool
579 compare_exchange_strong(__int_type& __i1, __int_type __i2,
580 memory_order __m1,
581 memory_order __m2) volatile noexcept
582 {
583 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
584
585 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
586 int(__m1), int(__m2));
587 }
588
589 _GLIBCXX_ALWAYS_INLINE bool
590 compare_exchange_strong(__int_type& __i1, __int_type __i2,
591 memory_order __m = memory_order_seq_cst) noexcept
592 {
593 return compare_exchange_strong(__i1, __i2, __m,
594 __cmpexch_failure_order(__m));
595 }
596
597 _GLIBCXX_ALWAYS_INLINE bool
598 compare_exchange_strong(__int_type& __i1, __int_type __i2,
599 memory_order __m = memory_order_seq_cst) volatile noexcept
600 {
601 return compare_exchange_strong(__i1, __i2, __m,
602 __cmpexch_failure_order(__m));
603 }
604
605#if __glibcxx_atomic_wait
606 _GLIBCXX_ALWAYS_INLINE void
607 wait(__int_type __old,
608 memory_order __m = memory_order_seq_cst) const noexcept
609 {
610 std::__atomic_wait_address_v(&_M_i, __old,
611 [__m, this] { return this->load(__m); });
612 }
613
614 // TODO add const volatile overload
615
616 _GLIBCXX_ALWAYS_INLINE void
617 notify_one() noexcept
618 { std::__atomic_notify_address(&_M_i, false); }
619
620 // TODO add const volatile overload
621
622 _GLIBCXX_ALWAYS_INLINE void
623 notify_all() noexcept
624 { std::__atomic_notify_address(&_M_i, true); }
625
626 // TODO add const volatile overload
627#endif // __glibcxx_atomic_wait
628
629 _GLIBCXX_ALWAYS_INLINE __int_type
630 fetch_add(__int_type __i,
631 memory_order __m = memory_order_seq_cst) noexcept
632 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
633
634 _GLIBCXX_ALWAYS_INLINE __int_type
635 fetch_add(__int_type __i,
636 memory_order __m = memory_order_seq_cst) volatile noexcept
637 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
638
639 _GLIBCXX_ALWAYS_INLINE __int_type
640 fetch_sub(__int_type __i,
641 memory_order __m = memory_order_seq_cst) noexcept
642 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
643
644 _GLIBCXX_ALWAYS_INLINE __int_type
645 fetch_sub(__int_type __i,
646 memory_order __m = memory_order_seq_cst) volatile noexcept
647 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
648
649 _GLIBCXX_ALWAYS_INLINE __int_type
650 fetch_and(__int_type __i,
651 memory_order __m = memory_order_seq_cst) noexcept
652 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
653
654 _GLIBCXX_ALWAYS_INLINE __int_type
655 fetch_and(__int_type __i,
656 memory_order __m = memory_order_seq_cst) volatile noexcept
657 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
658
659 _GLIBCXX_ALWAYS_INLINE __int_type
660 fetch_or(__int_type __i,
661 memory_order __m = memory_order_seq_cst) noexcept
662 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
663
664 _GLIBCXX_ALWAYS_INLINE __int_type
665 fetch_or(__int_type __i,
666 memory_order __m = memory_order_seq_cst) volatile noexcept
667 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
668
669 _GLIBCXX_ALWAYS_INLINE __int_type
670 fetch_xor(__int_type __i,
671 memory_order __m = memory_order_seq_cst) noexcept
672 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
673
674 _GLIBCXX_ALWAYS_INLINE __int_type
675 fetch_xor(__int_type __i,
676 memory_order __m = memory_order_seq_cst) volatile noexcept
677 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
678 };
679
680
681 /// Partial specialization for pointer types.
682 template<typename _PTp>
683 struct __atomic_base<_PTp*>
684 {
685 private:
686 typedef _PTp* __pointer_type;
687
688 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
689
690 static constexpr ptrdiff_t
691 _S_type_size(ptrdiff_t __d)
692 { return __d * sizeof(_PTp); }
693
694 public:
695 __atomic_base() noexcept = default;
696 ~__atomic_base() noexcept = default;
697 __atomic_base(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) = delete;
699 __atomic_base& operator=(const __atomic_base&) volatile = delete;
700
701 // Requires __pointer_type convertible to _M_p.
702 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
703
704 operator __pointer_type() const noexcept
705 { return load(); }
706
707 operator __pointer_type() const volatile noexcept
708 { return load(); }
709
710 __pointer_type
711 operator=(__pointer_type __p) noexcept
712 {
713 store(__p);
714 return __p;
715 }
716
717 __pointer_type
718 operator=(__pointer_type __p) volatile noexcept
719 {
720 store(__p);
721 return __p;
722 }
723
724 __pointer_type
725 operator++(int) noexcept
726 { return fetch_add(1); }
727
728 __pointer_type
729 operator++(int) volatile noexcept
730 { return fetch_add(1); }
731
732 __pointer_type
733 operator--(int) noexcept
734 { return fetch_sub(1); }
735
736 __pointer_type
737 operator--(int) volatile noexcept
738 { return fetch_sub(1); }
739
740 __pointer_type
741 operator++() noexcept
742 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
743 int(memory_order_seq_cst)); }
744
745 __pointer_type
746 operator++() volatile noexcept
747 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
748 int(memory_order_seq_cst)); }
749
750 __pointer_type
751 operator--() noexcept
752 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
753 int(memory_order_seq_cst)); }
754
755 __pointer_type
756 operator--() volatile noexcept
757 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
758 int(memory_order_seq_cst)); }
759
760 __pointer_type
761 operator+=(ptrdiff_t __d) noexcept
762 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
763 int(memory_order_seq_cst)); }
764
765 __pointer_type
766 operator+=(ptrdiff_t __d) volatile noexcept
767 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
768 int(memory_order_seq_cst)); }
769
770 __pointer_type
771 operator-=(ptrdiff_t __d) noexcept
772 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
773 int(memory_order_seq_cst)); }
774
775 __pointer_type
776 operator-=(ptrdiff_t __d) volatile noexcept
777 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
778 int(memory_order_seq_cst)); }
779
780 bool
781 is_lock_free() const noexcept
782 {
783 // Produce a fake, minimally aligned pointer.
784 return __atomic_is_lock_free(sizeof(_M_p),
785 reinterpret_cast<void *>(-__alignof(_M_p)));
786 }
787
788 bool
789 is_lock_free() const volatile noexcept
790 {
791 // Produce a fake, minimally aligned pointer.
792 return __atomic_is_lock_free(sizeof(_M_p),
793 reinterpret_cast<void *>(-__alignof(_M_p)));
794 }
795
796 _GLIBCXX_ALWAYS_INLINE void
797 store(__pointer_type __p,
798 memory_order __m = memory_order_seq_cst) noexcept
799 {
800 memory_order __b __attribute__ ((__unused__))
801 = __m & __memory_order_mask;
802
803 __glibcxx_assert(__b != memory_order_acquire);
804 __glibcxx_assert(__b != memory_order_acq_rel);
805 __glibcxx_assert(__b != memory_order_consume);
806
807 __atomic_store_n(&_M_p, __p, int(__m));
808 }
809
810 _GLIBCXX_ALWAYS_INLINE void
811 store(__pointer_type __p,
812 memory_order __m = memory_order_seq_cst) volatile noexcept
813 {
814 memory_order __b __attribute__ ((__unused__))
815 = __m & __memory_order_mask;
816 __glibcxx_assert(__b != memory_order_acquire);
817 __glibcxx_assert(__b != memory_order_acq_rel);
818 __glibcxx_assert(__b != memory_order_consume);
819
820 __atomic_store_n(&_M_p, __p, int(__m));
821 }
822
823 _GLIBCXX_ALWAYS_INLINE __pointer_type
824 load(memory_order __m = memory_order_seq_cst) const noexcept
825 {
826 memory_order __b __attribute__ ((__unused__))
827 = __m & __memory_order_mask;
828 __glibcxx_assert(__b != memory_order_release);
829 __glibcxx_assert(__b != memory_order_acq_rel);
830
831 return __atomic_load_n(&_M_p, int(__m));
832 }
833
834 _GLIBCXX_ALWAYS_INLINE __pointer_type
835 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
836 {
837 memory_order __b __attribute__ ((__unused__))
838 = __m & __memory_order_mask;
839 __glibcxx_assert(__b != memory_order_release);
840 __glibcxx_assert(__b != memory_order_acq_rel);
841
842 return __atomic_load_n(&_M_p, int(__m));
843 }
844
845 _GLIBCXX_ALWAYS_INLINE __pointer_type
846 exchange(__pointer_type __p,
847 memory_order __m = memory_order_seq_cst) noexcept
848 {
849 return __atomic_exchange_n(&_M_p, __p, int(__m));
850 }
851
852
853 _GLIBCXX_ALWAYS_INLINE __pointer_type
854 exchange(__pointer_type __p,
855 memory_order __m = memory_order_seq_cst) volatile noexcept
856 {
857 return __atomic_exchange_n(&_M_p, __p, int(__m));
858 }
859
860 _GLIBCXX_ALWAYS_INLINE bool
861 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
862 memory_order __m1,
863 memory_order __m2) noexcept
864 {
865 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
866
867 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
868 int(__m1), int(__m2));
869 }
870
871 _GLIBCXX_ALWAYS_INLINE bool
872 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
873 memory_order __m1,
874 memory_order __m2) volatile noexcept
875 {
876 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
877
878 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
879 int(__m1), int(__m2));
880 }
881
882 _GLIBCXX_ALWAYS_INLINE bool
883 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
884 memory_order __m1,
885 memory_order __m2) noexcept
886 {
887 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
888
889 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
890 int(__m1), int(__m2));
891 }
892
893 _GLIBCXX_ALWAYS_INLINE bool
894 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
895 memory_order __m1,
896 memory_order __m2) volatile noexcept
897 {
898 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
899
900 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
901 int(__m1), int(__m2));
902 }
903
904#if __glibcxx_atomic_wait
905 _GLIBCXX_ALWAYS_INLINE void
906 wait(__pointer_type __old,
907 memory_order __m = memory_order_seq_cst) const noexcept
908 {
909 std::__atomic_wait_address_v(&_M_p, __old,
910 [__m, this]
911 { return this->load(__m); });
912 }
913
914 // TODO add const volatile overload
915
916 _GLIBCXX_ALWAYS_INLINE void
917 notify_one() const noexcept
918 { std::__atomic_notify_address(&_M_p, false); }
919
920 // TODO add const volatile overload
921
922 _GLIBCXX_ALWAYS_INLINE void
923 notify_all() const noexcept
924 { std::__atomic_notify_address(&_M_p, true); }
925
926 // TODO add const volatile overload
927#endif // __glibcxx_atomic_wait
928
929 _GLIBCXX_ALWAYS_INLINE __pointer_type
930 fetch_add(ptrdiff_t __d,
931 memory_order __m = memory_order_seq_cst) noexcept
932 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
933
934 _GLIBCXX_ALWAYS_INLINE __pointer_type
935 fetch_add(ptrdiff_t __d,
936 memory_order __m = memory_order_seq_cst) volatile noexcept
937 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
938
939 _GLIBCXX_ALWAYS_INLINE __pointer_type
940 fetch_sub(ptrdiff_t __d,
941 memory_order __m = memory_order_seq_cst) noexcept
942 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
943
944 _GLIBCXX_ALWAYS_INLINE __pointer_type
945 fetch_sub(ptrdiff_t __d,
946 memory_order __m = memory_order_seq_cst) volatile noexcept
947 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
948 };
949
950 namespace __atomic_impl
951 {
952 // Implementation details of atomic padding handling
953
954 template<typename _Tp>
955 constexpr bool
956 __maybe_has_padding()
957 {
958#if ! __has_builtin(__builtin_clear_padding)
959 return false;
960#elif __has_builtin(__has_unique_object_representations)
961 return !__has_unique_object_representations(_Tp)
962 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
963#else
964 return true;
965#endif
966 }
967
968 template<typename _Tp>
969 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
970 __clear_padding(_Tp& __val) noexcept
971 {
972 auto* __ptr = std::__addressof(__val);
973#if __has_builtin(__builtin_clear_padding)
974 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
975 __builtin_clear_padding(__ptr);
976#endif
977 return __ptr;
978 }
979
980 // Remove volatile and create a non-deduced context for value arguments.
981 template<typename _Tp>
982 using _Val = typename remove_volatile<_Tp>::type;
983
984#pragma GCC diagnostic push
985#pragma GCC diagnostic ignored "-Wc++17-extensions"
986
987 template<bool _AtomicRef = false, typename _Tp>
988 _GLIBCXX_ALWAYS_INLINE bool
989 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
990 bool __is_weak,
991 memory_order __s, memory_order __f) noexcept
992 {
993 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
994
995 using _Vp = _Val<_Tp>;
996 _Tp* const __pval = std::__addressof(__val);
997
998 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
999 {
1000 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1001 std::__addressof(__i), __is_weak,
1002 int(__s), int(__f));
1003 }
1004 else if constexpr (!_AtomicRef) // std::atomic<T>
1005 {
1006 // Clear padding of the value we want to set:
1007 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1008 // Only allowed to modify __e on failure, so make a copy:
1009 _Vp __exp = __e;
1010 // Clear padding of the expected value:
1011 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1012
1013 // For std::atomic<T> we know that the contained value will already
1014 // have zeroed padding, so trivial memcmp semantics are OK.
1015 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1016 __is_weak, int(__s), int(__f)))
1017 return true;
1018 // Value bits must be different, copy from __exp back to __e:
1019 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1020 return false;
1021 }
1022 else // std::atomic_ref<T> where T has padding bits.
1023 {
1024 // Clear padding of the value we want to set:
1025 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1026
1027 // Only allowed to modify __e on failure, so make a copy:
1028 _Vp __exp = __e;
1029 // Optimistically assume that a previous store had zeroed padding
1030 // so that zeroing it in the expected value will match first time.
1031 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1032
1033 // compare_exchange is specified to compare value representations.
1034 // Need to check whether a failure is 'real' or just due to
1035 // differences in padding bits. This loop should run no more than
1036 // three times, because the worst case scenario is:
1037 // First CAS fails because the actual value has non-zero padding.
1038 // Second CAS fails because another thread stored the same value,
1039 // but now with padding cleared. Third CAS succeeds.
1040 // We will never need to loop a fourth time, because any value
1041 // written by another thread (whether via store, exchange or
1042 // compare_exchange) will have had its padding cleared.
1043 while (true)
1044 {
1045 // Copy of the expected value so we can clear its padding.
1046 _Vp __orig = __exp;
1047
1048 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1049 __is_weak, int(__s), int(__f)))
1050 return true;
1051
1052 // Copy of the actual value so we can clear its padding.
1053 _Vp __curr = __exp;
1054
1055 // Compare value representations (i.e. ignoring padding).
1056 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1057 __atomic_impl::__clear_padding(__curr),
1058 sizeof(_Vp)))
1059 {
1060 // Value representations compare unequal, real failure.
1061 __builtin_memcpy(std::__addressof(__e), __pexp,
1062 sizeof(_Vp));
1063 return false;
1064 }
1065 }
1066 }
1067 }
1068#pragma GCC diagnostic pop
1069 } // namespace __atomic_impl
1070
1071#if __cplusplus > 201703L
1072 // Implementation details of atomic_ref and atomic<floating-point>.
1073 namespace __atomic_impl
1074 {
1075 // Like _Val<T> above, but for difference_type arguments.
1076 template<typename _Tp>
1077 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1078
1079 template<size_t _Size, size_t _Align>
1080 _GLIBCXX_ALWAYS_INLINE bool
1081 is_lock_free() noexcept
1082 {
1083 // Produce a fake, minimally aligned pointer.
1084 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1085 }
1086
1087 template<typename _Tp>
1088 _GLIBCXX_ALWAYS_INLINE void
1089 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1090 {
1091 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1092 }
1093
1094 template<typename _Tp>
1095 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1096 load(const _Tp* __ptr, memory_order __m) noexcept
1097 {
1098 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1099 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1100 __atomic_load(__ptr, __dest, int(__m));
1101 return *__dest;
1102 }
1103
1104 template<typename _Tp>
1105 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1106 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1107 {
1108 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1109 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1110 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1111 __dest, int(__m));
1112 return *__dest;
1113 }
1114
1115 template<bool _AtomicRef = false, typename _Tp>
1116 _GLIBCXX_ALWAYS_INLINE bool
1117 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1118 _Val<_Tp> __desired, memory_order __success,
1119 memory_order __failure) noexcept
1120 {
1121 return __atomic_impl::__compare_exchange<_AtomicRef>(
1122 *__ptr, __expected, __desired, true, __success, __failure);
1123 }
1124
1125 template<bool _AtomicRef = false, typename _Tp>
1126 _GLIBCXX_ALWAYS_INLINE bool
1127 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1128 _Val<_Tp> __desired, memory_order __success,
1129 memory_order __failure) noexcept
1130 {
1131 return __atomic_impl::__compare_exchange<_AtomicRef>(
1132 *__ptr, __expected, __desired, false, __success, __failure);
1133 }
1134
1135#if __glibcxx_atomic_wait
1136 template<typename _Tp>
1137 _GLIBCXX_ALWAYS_INLINE void
1138 wait(const _Tp* __ptr, _Val<_Tp> __old,
1139 memory_order __m = memory_order_seq_cst) noexcept
1140 {
1141 std::__atomic_wait_address_v(__ptr, __old,
1142 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1143 }
1144
1145 // TODO add const volatile overload
1146
1147 template<typename _Tp>
1148 _GLIBCXX_ALWAYS_INLINE void
1149 notify_one(const _Tp* __ptr) noexcept
1150 { std::__atomic_notify_address(__ptr, false); }
1151
1152 // TODO add const volatile overload
1153
1154 template<typename _Tp>
1155 _GLIBCXX_ALWAYS_INLINE void
1156 notify_all(const _Tp* __ptr) noexcept
1157 { std::__atomic_notify_address(__ptr, true); }
1158
1159 // TODO add const volatile overload
1160#endif // __glibcxx_atomic_wait
1161
1162 template<typename _Tp>
1163 _GLIBCXX_ALWAYS_INLINE _Tp
1164 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1165 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1166
1167 template<typename _Tp>
1168 _GLIBCXX_ALWAYS_INLINE _Tp
1169 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1170 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1171
1172 template<typename _Tp>
1173 _GLIBCXX_ALWAYS_INLINE _Tp
1174 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1175 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1176
1177 template<typename _Tp>
1178 _GLIBCXX_ALWAYS_INLINE _Tp
1179 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1180 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1181
1182 template<typename _Tp>
1183 _GLIBCXX_ALWAYS_INLINE _Tp
1184 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1185 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1186
1187 template<typename _Tp>
1188 _GLIBCXX_ALWAYS_INLINE _Tp
1189 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1190 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1191
1192 template<typename _Tp>
1193 _GLIBCXX_ALWAYS_INLINE _Tp
1194 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1195 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1196
1197 template<typename _Tp>
1198 _GLIBCXX_ALWAYS_INLINE _Tp
1199 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1200 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1201
1202 template<typename _Tp>
1203 _GLIBCXX_ALWAYS_INLINE _Tp
1204 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1205 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1206
1207 template<typename _Tp>
1208 _GLIBCXX_ALWAYS_INLINE _Tp
1209 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1210 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1211
1212 template<typename _Tp>
1213 _Tp
1214 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1215 {
1216 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1217 _Val<_Tp> __newval = __oldval + __i;
1218 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1219 memory_order_relaxed))
1220 __newval = __oldval + __i;
1221 return __oldval;
1222 }
1223
1224 template<typename _Tp>
1225 _Tp
1226 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1227 {
1228 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1229 _Val<_Tp> __newval = __oldval - __i;
1230 while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
1231 memory_order_relaxed))
1232 __newval = __oldval - __i;
1233 return __oldval;
1234 }
1235
1236 template<typename _Tp>
1237 _Tp
1238 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1239 {
1240 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1241 _Val<_Tp> __newval = __oldval + __i;
1242 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1243 memory_order_seq_cst,
1244 memory_order_relaxed))
1245 __newval = __oldval + __i;
1246 return __newval;
1247 }
1248
1249 template<typename _Tp>
1250 _Tp
1251 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1252 {
1253 _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
1254 _Val<_Tp> __newval = __oldval - __i;
1255 while (!compare_exchange_weak(__ptr, __oldval, __newval,
1256 memory_order_seq_cst,
1257 memory_order_relaxed))
1258 __newval = __oldval - __i;
1259 return __newval;
1260 }
1261 } // namespace __atomic_impl
1262
1263 // base class for atomic<floating-point-type>
1264 template<typename _Fp>
1265 struct __atomic_float
1266 {
1267 static_assert(is_floating_point_v<_Fp>);
1268
1269 static constexpr size_t _S_alignment = __alignof__(_Fp);
1270
1271 public:
1272 using value_type = _Fp;
1273 using difference_type = value_type;
1274
1275 static constexpr bool is_always_lock_free
1276 = __atomic_always_lock_free(sizeof(_Fp), 0);
1277
1278 __atomic_float() = default;
1279
1280 constexpr
1281 __atomic_float(_Fp __t) : _M_fp(__t)
1282 { __atomic_impl::__clear_padding(_M_fp); }
1283
1284 __atomic_float(const __atomic_float&) = delete;
1285 __atomic_float& operator=(const __atomic_float&) = delete;
1286 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1287
1288 _Fp
1289 operator=(_Fp __t) volatile noexcept
1290 {
1291 this->store(__t);
1292 return __t;
1293 }
1294
1295 _Fp
1296 operator=(_Fp __t) noexcept
1297 {
1298 this->store(__t);
1299 return __t;
1300 }
1301
1302 bool
1303 is_lock_free() const volatile noexcept
1304 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1305
1306 bool
1307 is_lock_free() const noexcept
1308 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1309
1310 void
1311 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1312 { __atomic_impl::store(&_M_fp, __t, __m); }
1313
1314 void
1315 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1316 { __atomic_impl::store(&_M_fp, __t, __m); }
1317
1318 _Fp
1319 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1320 { return __atomic_impl::load(&_M_fp, __m); }
1321
1322 _Fp
1323 load(memory_order __m = memory_order_seq_cst) const noexcept
1324 { return __atomic_impl::load(&_M_fp, __m); }
1325
1326 operator _Fp() const volatile noexcept { return this->load(); }
1327 operator _Fp() const noexcept { return this->load(); }
1328
1329 _Fp
1330 exchange(_Fp __desired,
1331 memory_order __m = memory_order_seq_cst) volatile noexcept
1332 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1333
1334 _Fp
1335 exchange(_Fp __desired,
1336 memory_order __m = memory_order_seq_cst) noexcept
1337 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1338
1339 bool
1340 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1341 memory_order __success,
1342 memory_order __failure) noexcept
1343 {
1344 return __atomic_impl::compare_exchange_weak(&_M_fp,
1345 __expected, __desired,
1346 __success, __failure);
1347 }
1348
1349 bool
1350 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1351 memory_order __success,
1352 memory_order __failure) volatile noexcept
1353 {
1354 return __atomic_impl::compare_exchange_weak(&_M_fp,
1355 __expected, __desired,
1356 __success, __failure);
1357 }
1358
1359 bool
1360 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1361 memory_order __success,
1362 memory_order __failure) noexcept
1363 {
1364 return __atomic_impl::compare_exchange_strong(&_M_fp,
1365 __expected, __desired,
1366 __success, __failure);
1367 }
1368
1369 bool
1370 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1371 memory_order __success,
1372 memory_order __failure) volatile noexcept
1373 {
1374 return __atomic_impl::compare_exchange_strong(&_M_fp,
1375 __expected, __desired,
1376 __success, __failure);
1377 }
1378
1379 bool
1380 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1381 memory_order __order = memory_order_seq_cst)
1382 noexcept
1383 {
1384 return compare_exchange_weak(__expected, __desired, __order,
1385 __cmpexch_failure_order(__order));
1386 }
1387
1388 bool
1389 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1390 memory_order __order = memory_order_seq_cst)
1391 volatile noexcept
1392 {
1393 return compare_exchange_weak(__expected, __desired, __order,
1394 __cmpexch_failure_order(__order));
1395 }
1396
1397 bool
1398 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1399 memory_order __order = memory_order_seq_cst)
1400 noexcept
1401 {
1402 return compare_exchange_strong(__expected, __desired, __order,
1403 __cmpexch_failure_order(__order));
1404 }
1405
1406 bool
1407 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1408 memory_order __order = memory_order_seq_cst)
1409 volatile noexcept
1410 {
1411 return compare_exchange_strong(__expected, __desired, __order,
1412 __cmpexch_failure_order(__order));
1413 }
1414
1415#if __glibcxx_atomic_wait
1416 _GLIBCXX_ALWAYS_INLINE void
1417 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1418 { __atomic_impl::wait(&_M_fp, __old, __m); }
1419
1420 // TODO add const volatile overload
1421
1422 _GLIBCXX_ALWAYS_INLINE void
1423 notify_one() const noexcept
1424 { __atomic_impl::notify_one(&_M_fp); }
1425
1426 // TODO add const volatile overload
1427
1428 _GLIBCXX_ALWAYS_INLINE void
1429 notify_all() const noexcept
1430 { __atomic_impl::notify_all(&_M_fp); }
1431
1432 // TODO add const volatile overload
1433#endif // __glibcxx_atomic_wait
1434
1435 value_type
1436 fetch_add(value_type __i,
1437 memory_order __m = memory_order_seq_cst) noexcept
1438 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1439
1440 value_type
1441 fetch_add(value_type __i,
1442 memory_order __m = memory_order_seq_cst) volatile noexcept
1443 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1444
1445 value_type
1446 fetch_sub(value_type __i,
1447 memory_order __m = memory_order_seq_cst) noexcept
1448 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1449
1450 value_type
1451 fetch_sub(value_type __i,
1452 memory_order __m = memory_order_seq_cst) volatile noexcept
1453 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1454
1455 value_type
1456 operator+=(value_type __i) noexcept
1457 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1458
1459 value_type
1460 operator+=(value_type __i) volatile noexcept
1461 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1462
1463 value_type
1464 operator-=(value_type __i) noexcept
1465 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1466
1467 value_type
1468 operator-=(value_type __i) volatile noexcept
1469 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1470
1471 private:
1472 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1473 };
1474#undef _GLIBCXX20_INIT
1475
1476 template<typename _Tp,
1477 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1478 bool = is_floating_point_v<_Tp>>
1479 struct __atomic_ref;
1480
1481 // base class for non-integral, non-floating-point, non-pointer types
1482 template<typename _Tp>
1483 struct __atomic_ref<_Tp, false, false>
1484 {
1485 static_assert(is_trivially_copyable_v<_Tp>);
1486
1487 // 1/2/4/8/16-byte types must be aligned to at least their size.
1488 static constexpr int _S_min_alignment
1489 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1490 ? 0 : sizeof(_Tp);
1491
1492 public:
1493 using value_type = _Tp;
1494
1495 static constexpr bool is_always_lock_free
1496 = __atomic_always_lock_free(sizeof(_Tp), 0);
1497
1498 static constexpr size_t required_alignment
1499 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1500
1501 __atomic_ref& operator=(const __atomic_ref&) = delete;
1502
1503 explicit
1504 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1505 {
1506 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1507 }
1508
1509 __atomic_ref(const __atomic_ref&) noexcept = default;
1510
1511 _Tp
1512 operator=(_Tp __t) const noexcept
1513 {
1514 this->store(__t);
1515 return __t;
1516 }
1517
1518 operator _Tp() const noexcept { return this->load(); }
1519
1520 bool
1521 is_lock_free() const noexcept
1522 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1523
1524 void
1525 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1526 { __atomic_impl::store(_M_ptr, __t, __m); }
1527
1528 _Tp
1529 load(memory_order __m = memory_order_seq_cst) const noexcept
1530 { return __atomic_impl::load(_M_ptr, __m); }
1531
1532 _Tp
1533 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1534 const noexcept
1535 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1536
1537 bool
1538 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1539 memory_order __success,
1540 memory_order __failure) const noexcept
1541 {
1542 return __atomic_impl::compare_exchange_weak<true>(
1543 _M_ptr, __expected, __desired, __success, __failure);
1544 }
1545
1546 bool
1547 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1548 memory_order __success,
1549 memory_order __failure) const noexcept
1550 {
1551 return __atomic_impl::compare_exchange_strong<true>(
1552 _M_ptr, __expected, __desired, __success, __failure);
1553 }
1554
1555 bool
1556 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1557 memory_order __order = memory_order_seq_cst)
1558 const noexcept
1559 {
1560 return compare_exchange_weak(__expected, __desired, __order,
1561 __cmpexch_failure_order(__order));
1562 }
1563
1564 bool
1565 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1566 memory_order __order = memory_order_seq_cst)
1567 const noexcept
1568 {
1569 return compare_exchange_strong(__expected, __desired, __order,
1570 __cmpexch_failure_order(__order));
1571 }
1572
1573#if __glibcxx_atomic_wait
1574 _GLIBCXX_ALWAYS_INLINE void
1575 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1576 { __atomic_impl::wait(_M_ptr, __old, __m); }
1577
1578 // TODO add const volatile overload
1579
1580 _GLIBCXX_ALWAYS_INLINE void
1581 notify_one() const noexcept
1582 { __atomic_impl::notify_one(_M_ptr); }
1583
1584 // TODO add const volatile overload
1585
1586 _GLIBCXX_ALWAYS_INLINE void
1587 notify_all() const noexcept
1588 { __atomic_impl::notify_all(_M_ptr); }
1589
1590 // TODO add const volatile overload
1591#endif // __glibcxx_atomic_wait
1592
1593 private:
1594 _Tp* _M_ptr;
1595 };
1596
1597 // base class for atomic_ref<integral-type>
1598 template<typename _Tp>
1599 struct __atomic_ref<_Tp, true, false>
1600 {
1601 static_assert(is_integral_v<_Tp>);
1602
1603 public:
1604 using value_type = _Tp;
1605 using difference_type = value_type;
1606
1607 static constexpr bool is_always_lock_free
1608 = __atomic_always_lock_free(sizeof(_Tp), 0);
1609
1610 static constexpr size_t required_alignment
1611 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1612
1613 __atomic_ref() = delete;
1614 __atomic_ref& operator=(const __atomic_ref&) = delete;
1615
1616 explicit
1617 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1618 {
1619 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1620 }
1621
1622 __atomic_ref(const __atomic_ref&) noexcept = default;
1623
1624 _Tp
1625 operator=(_Tp __t) const noexcept
1626 {
1627 this->store(__t);
1628 return __t;
1629 }
1630
1631 operator _Tp() const noexcept { return this->load(); }
1632
1633 bool
1634 is_lock_free() const noexcept
1635 {
1636 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1637 }
1638
1639 void
1640 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1641 { __atomic_impl::store(_M_ptr, __t, __m); }
1642
1643 _Tp
1644 load(memory_order __m = memory_order_seq_cst) const noexcept
1645 { return __atomic_impl::load(_M_ptr, __m); }
1646
1647 _Tp
1648 exchange(_Tp __desired,
1649 memory_order __m = memory_order_seq_cst) const noexcept
1650 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1651
1652 bool
1653 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1654 memory_order __success,
1655 memory_order __failure) const noexcept
1656 {
1657 return __atomic_impl::compare_exchange_weak<true>(
1658 _M_ptr, __expected, __desired, __success, __failure);
1659 }
1660
1661 bool
1662 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1663 memory_order __success,
1664 memory_order __failure) const noexcept
1665 {
1666 return __atomic_impl::compare_exchange_strong<true>(
1667 _M_ptr, __expected, __desired, __success, __failure);
1668 }
1669
1670 bool
1671 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1672 memory_order __order = memory_order_seq_cst)
1673 const noexcept
1674 {
1675 return compare_exchange_weak(__expected, __desired, __order,
1676 __cmpexch_failure_order(__order));
1677 }
1678
1679 bool
1680 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1681 memory_order __order = memory_order_seq_cst)
1682 const noexcept
1683 {
1684 return compare_exchange_strong(__expected, __desired, __order,
1685 __cmpexch_failure_order(__order));
1686 }
1687
1688#if __glibcxx_atomic_wait
1689 _GLIBCXX_ALWAYS_INLINE void
1690 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1691 { __atomic_impl::wait(_M_ptr, __old, __m); }
1692
1693 // TODO add const volatile overload
1694
1695 _GLIBCXX_ALWAYS_INLINE void
1696 notify_one() const noexcept
1697 { __atomic_impl::notify_one(_M_ptr); }
1698
1699 // TODO add const volatile overload
1700
1701 _GLIBCXX_ALWAYS_INLINE void
1702 notify_all() const noexcept
1703 { __atomic_impl::notify_all(_M_ptr); }
1704
1705 // TODO add const volatile overload
1706#endif // __glibcxx_atomic_wait
1707
1708 value_type
1709 fetch_add(value_type __i,
1710 memory_order __m = memory_order_seq_cst) const noexcept
1711 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1712
1713 value_type
1714 fetch_sub(value_type __i,
1715 memory_order __m = memory_order_seq_cst) const noexcept
1716 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1717
1718 value_type
1719 fetch_and(value_type __i,
1720 memory_order __m = memory_order_seq_cst) const noexcept
1721 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1722
1723 value_type
1724 fetch_or(value_type __i,
1725 memory_order __m = memory_order_seq_cst) const noexcept
1726 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1727
1728 value_type
1729 fetch_xor(value_type __i,
1730 memory_order __m = memory_order_seq_cst) const noexcept
1731 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1732
1733 _GLIBCXX_ALWAYS_INLINE value_type
1734 operator++(int) const noexcept
1735 { return fetch_add(1); }
1736
1737 _GLIBCXX_ALWAYS_INLINE value_type
1738 operator--(int) const noexcept
1739 { return fetch_sub(1); }
1740
1741 value_type
1742 operator++() const noexcept
1743 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1744
1745 value_type
1746 operator--() const noexcept
1747 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1748
1749 value_type
1750 operator+=(value_type __i) const noexcept
1751 { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1752
1753 value_type
1754 operator-=(value_type __i) const noexcept
1755 { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1756
1757 value_type
1758 operator&=(value_type __i) const noexcept
1759 { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1760
1761 value_type
1762 operator|=(value_type __i) const noexcept
1763 { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1764
1765 value_type
1766 operator^=(value_type __i) const noexcept
1767 { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1768
1769 private:
1770 _Tp* _M_ptr;
1771 };
1772
1773 // base class for atomic_ref<floating-point-type>
1774 template<typename _Fp>
1775 struct __atomic_ref<_Fp, false, true>
1776 {
1777 static_assert(is_floating_point_v<_Fp>);
1778
1779 public:
1780 using value_type = _Fp;
1781 using difference_type = value_type;
1782
1783 static constexpr bool is_always_lock_free
1784 = __atomic_always_lock_free(sizeof(_Fp), 0);
1785
1786 static constexpr size_t required_alignment = __alignof__(_Fp);
1787
1788 __atomic_ref() = delete;
1789 __atomic_ref& operator=(const __atomic_ref&) = delete;
1790
1791 explicit
1792 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1793 {
1794 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1795 }
1796
1797 __atomic_ref(const __atomic_ref&) noexcept = default;
1798
1799 _Fp
1800 operator=(_Fp __t) const noexcept
1801 {
1802 this->store(__t);
1803 return __t;
1804 }
1805
1806 operator _Fp() const noexcept { return this->load(); }
1807
1808 bool
1809 is_lock_free() const noexcept
1810 {
1811 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1812 }
1813
1814 void
1815 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1816 { __atomic_impl::store(_M_ptr, __t, __m); }
1817
1818 _Fp
1819 load(memory_order __m = memory_order_seq_cst) const noexcept
1820 { return __atomic_impl::load(_M_ptr, __m); }
1821
1822 _Fp
1823 exchange(_Fp __desired,
1824 memory_order __m = memory_order_seq_cst) const noexcept
1825 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1826
1827 bool
1828 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1829 memory_order __success,
1830 memory_order __failure) const noexcept
1831 {
1832 return __atomic_impl::compare_exchange_weak<true>(
1833 _M_ptr, __expected, __desired, __success, __failure);
1834 }
1835
1836 bool
1837 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1838 memory_order __success,
1839 memory_order __failure) const noexcept
1840 {
1841 return __atomic_impl::compare_exchange_strong<true>(
1842 _M_ptr, __expected, __desired, __success, __failure);
1843 }
1844
1845 bool
1846 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1847 memory_order __order = memory_order_seq_cst)
1848 const noexcept
1849 {
1850 return compare_exchange_weak(__expected, __desired, __order,
1851 __cmpexch_failure_order(__order));
1852 }
1853
1854 bool
1855 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1856 memory_order __order = memory_order_seq_cst)
1857 const noexcept
1858 {
1859 return compare_exchange_strong(__expected, __desired, __order,
1860 __cmpexch_failure_order(__order));
1861 }
1862
1863#if __glibcxx_atomic_wait
1864 _GLIBCXX_ALWAYS_INLINE void
1865 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1866 { __atomic_impl::wait(_M_ptr, __old, __m); }
1867
1868 // TODO add const volatile overload
1869
1870 _GLIBCXX_ALWAYS_INLINE void
1871 notify_one() const noexcept
1872 { __atomic_impl::notify_one(_M_ptr); }
1873
1874 // TODO add const volatile overload
1875
1876 _GLIBCXX_ALWAYS_INLINE void
1877 notify_all() const noexcept
1878 { __atomic_impl::notify_all(_M_ptr); }
1879
1880 // TODO add const volatile overload
1881#endif // __glibcxx_atomic_wait
1882
1883 value_type
1884 fetch_add(value_type __i,
1885 memory_order __m = memory_order_seq_cst) const noexcept
1886 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1887
1888 value_type
1889 fetch_sub(value_type __i,
1890 memory_order __m = memory_order_seq_cst) const noexcept
1891 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1892
1893 value_type
1894 operator+=(value_type __i) const noexcept
1895 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1896
1897 value_type
1898 operator-=(value_type __i) const noexcept
1899 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1900
1901 private:
1902 _Fp* _M_ptr;
1903 };
1904
1905 // base class for atomic_ref<pointer-type>
1906 template<typename _Tp>
1907 struct __atomic_ref<_Tp*, false, false>
1908 {
1909 public:
1910 using value_type = _Tp*;
1911 using difference_type = ptrdiff_t;
1912
1913 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1914
1915 static constexpr size_t required_alignment = __alignof__(_Tp*);
1916
1917 __atomic_ref() = delete;
1918 __atomic_ref& operator=(const __atomic_ref&) = delete;
1919
1920 explicit
1921 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1922 {
1923 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1924 }
1925
1926 __atomic_ref(const __atomic_ref&) noexcept = default;
1927
1928 _Tp*
1929 operator=(_Tp* __t) const noexcept
1930 {
1931 this->store(__t);
1932 return __t;
1933 }
1934
1935 operator _Tp*() const noexcept { return this->load(); }
1936
1937 bool
1938 is_lock_free() const noexcept
1939 {
1940 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1941 }
1942
1943 void
1944 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1945 { __atomic_impl::store(_M_ptr, __t, __m); }
1946
1947 _Tp*
1948 load(memory_order __m = memory_order_seq_cst) const noexcept
1949 { return __atomic_impl::load(_M_ptr, __m); }
1950
1951 _Tp*
1952 exchange(_Tp* __desired,
1953 memory_order __m = memory_order_seq_cst) const noexcept
1954 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1955
1956 bool
1957 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1958 memory_order __success,
1959 memory_order __failure) const noexcept
1960 {
1961 return __atomic_impl::compare_exchange_weak<true>(
1962 _M_ptr, __expected, __desired, __success, __failure);
1963 }
1964
1965 bool
1966 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1967 memory_order __success,
1968 memory_order __failure) const noexcept
1969 {
1970 return __atomic_impl::compare_exchange_strong<true>(
1971 _M_ptr, __expected, __desired, __success, __failure);
1972 }
1973
1974 bool
1975 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1976 memory_order __order = memory_order_seq_cst)
1977 const noexcept
1978 {
1979 return compare_exchange_weak(__expected, __desired, __order,
1980 __cmpexch_failure_order(__order));
1981 }
1982
1983 bool
1984 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
1985 memory_order __order = memory_order_seq_cst)
1986 const noexcept
1987 {
1988 return compare_exchange_strong(__expected, __desired, __order,
1989 __cmpexch_failure_order(__order));
1990 }
1991
1992#if __glibcxx_atomic_wait
1993 _GLIBCXX_ALWAYS_INLINE void
1994 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
1995 { __atomic_impl::wait(_M_ptr, __old, __m); }
1996
1997 // TODO add const volatile overload
1998
1999 _GLIBCXX_ALWAYS_INLINE void
2000 notify_one() const noexcept
2001 { __atomic_impl::notify_one(_M_ptr); }
2002
2003 // TODO add const volatile overload
2004
2005 _GLIBCXX_ALWAYS_INLINE void
2006 notify_all() const noexcept
2007 { __atomic_impl::notify_all(_M_ptr); }
2008
2009 // TODO add const volatile overload
2010#endif // __glibcxx_atomic_wait
2011
2012 _GLIBCXX_ALWAYS_INLINE value_type
2013 fetch_add(difference_type __d,
2014 memory_order __m = memory_order_seq_cst) const noexcept
2015 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2016
2017 _GLIBCXX_ALWAYS_INLINE value_type
2018 fetch_sub(difference_type __d,
2019 memory_order __m = memory_order_seq_cst) const noexcept
2020 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2021
2022 value_type
2023 operator++(int) const noexcept
2024 { return fetch_add(1); }
2025
2026 value_type
2027 operator--(int) const noexcept
2028 { return fetch_sub(1); }
2029
2030 value_type
2031 operator++() const noexcept
2032 {
2033 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2034 }
2035
2036 value_type
2037 operator--() const noexcept
2038 {
2039 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2040 }
2041
2042 value_type
2043 operator+=(difference_type __d) const noexcept
2044 {
2045 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2046 }
2047
2048 value_type
2049 operator-=(difference_type __d) const noexcept
2050 {
2051 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2052 }
2053
2054 private:
2055 static constexpr ptrdiff_t
2056 _S_type_size(ptrdiff_t __d) noexcept
2057 {
2058 static_assert(is_object_v<_Tp>);
2059 return __d * sizeof(_Tp);
2060 }
2061
2062 _Tp** _M_ptr;
2063 };
2064#endif // C++2a
2065
2066 /// @endcond
2067
2068 /// @} group atomics
2069
2070_GLIBCXX_END_NAMESPACE_VERSION
2071} // namespace std
2072
2073#endif
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:51
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
Definition atomic_base.h:66
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1572
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1562
atomic_flag