atomic_base.h 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946
  1. // -*- C++ -*- header.
  2. // Copyright (C) 2008-2022 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file bits/atomic_base.h
  21. * This is an internal header file, included by other library headers.
  22. * Do not attempt to use it directly. @headername{atomic}
  23. */
  24. #ifndef _GLIBCXX_ATOMIC_BASE_H
  25. #define _GLIBCXX_ATOMIC_BASE_H 1
  26. #pragma GCC system_header
  27. #include <bits/c++config.h>
  28. #include <stdint.h>
  29. #include <bits/atomic_lockfree_defines.h>
  30. #include <bits/move.h>
  31. #if __cplusplus > 201703L && _GLIBCXX_HOSTED
  32. #include <bits/atomic_wait.h>
  33. #endif
  34. #ifndef _GLIBCXX_ALWAYS_INLINE
  35. #define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
  36. #endif
  37. namespace std _GLIBCXX_VISIBILITY(default)
  38. {
  39. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  40. /**
  41. * @defgroup atomics Atomics
  42. *
  43. * Components for performing atomic operations.
  44. * @{
  45. */
  46. /// Enumeration for memory_order
  47. #if __cplusplus > 201703L
  48. enum class memory_order : int
  49. {
  50. relaxed,
  51. consume,
  52. acquire,
  53. release,
  54. acq_rel,
  55. seq_cst
  56. };
  57. inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
  58. inline constexpr memory_order memory_order_consume = memory_order::consume;
  59. inline constexpr memory_order memory_order_acquire = memory_order::acquire;
  60. inline constexpr memory_order memory_order_release = memory_order::release;
  61. inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
  62. inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
  63. #else
  64. typedef enum memory_order
  65. {
  66. memory_order_relaxed,
  67. memory_order_consume,
  68. memory_order_acquire,
  69. memory_order_release,
  70. memory_order_acq_rel,
  71. memory_order_seq_cst
  72. } memory_order;
  73. #endif
  74. enum __memory_order_modifier
  75. {
  76. __memory_order_mask = 0x0ffff,
  77. __memory_order_modifier_mask = 0xffff0000,
  78. __memory_order_hle_acquire = 0x10000,
  79. __memory_order_hle_release = 0x20000
  80. };
  81. constexpr memory_order
  82. operator|(memory_order __m, __memory_order_modifier __mod)
  83. {
  84. return memory_order(int(__m) | int(__mod));
  85. }
  86. constexpr memory_order
  87. operator&(memory_order __m, __memory_order_modifier __mod)
  88. {
  89. return memory_order(int(__m) & int(__mod));
  90. }
  91. // Drop release ordering as per [atomics.types.operations.req]/21
  92. constexpr memory_order
  93. __cmpexch_failure_order2(memory_order __m) noexcept
  94. {
  95. return __m == memory_order_acq_rel ? memory_order_acquire
  96. : __m == memory_order_release ? memory_order_relaxed : __m;
  97. }
  98. constexpr memory_order
  99. __cmpexch_failure_order(memory_order __m) noexcept
  100. {
  101. return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
  102. | __memory_order_modifier(__m & __memory_order_modifier_mask));
  103. }
  104. constexpr bool
  105. __is_valid_cmpexch_failure_order(memory_order __m) noexcept
  106. {
  107. return (__m & __memory_order_mask) != memory_order_release
  108. && (__m & __memory_order_mask) != memory_order_acq_rel;
  109. }
  110. _GLIBCXX_ALWAYS_INLINE void
  111. atomic_thread_fence(memory_order __m) noexcept
  112. { __atomic_thread_fence(int(__m)); }
  113. _GLIBCXX_ALWAYS_INLINE void
  114. atomic_signal_fence(memory_order __m) noexcept
  115. { __atomic_signal_fence(int(__m)); }
  116. /// kill_dependency
  117. template<typename _Tp>
  118. inline _Tp
  119. kill_dependency(_Tp __y) noexcept
  120. {
  121. _Tp __ret(__y);
  122. return __ret;
  123. }
  124. // Base types for atomics.
  125. template<typename _IntTp>
  126. struct __atomic_base;
  127. #if __cplusplus <= 201703L
  128. # define _GLIBCXX20_INIT(I)
  129. #else
  130. # define __cpp_lib_atomic_value_initialization 201911L
  131. # define _GLIBCXX20_INIT(I) = I
  132. #endif
  133. #define ATOMIC_VAR_INIT(_VI) { _VI }
  134. template<typename _Tp>
  135. struct atomic;
  136. template<typename _Tp>
  137. struct atomic<_Tp*>;
  138. /* The target's "set" value for test-and-set may not be exactly 1. */
  139. #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
  140. typedef bool __atomic_flag_data_type;
  141. #else
  142. typedef unsigned char __atomic_flag_data_type;
  143. #endif
  144. /**
  145. * @brief Base type for atomic_flag.
  146. *
  147. * Base type is POD with data, allowing atomic_flag to derive from
  148. * it and meet the standard layout type requirement. In addition to
  149. * compatibility with a C interface, this allows different
  150. * implementations of atomic_flag to use the same atomic operation
  151. * functions, via a standard conversion to the __atomic_flag_base
  152. * argument.
  153. */
  154. _GLIBCXX_BEGIN_EXTERN_C
  155. struct __atomic_flag_base
  156. {
  157. __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
  158. };
  159. _GLIBCXX_END_EXTERN_C
  160. #define ATOMIC_FLAG_INIT { 0 }
  161. /// atomic_flag
  162. struct atomic_flag : public __atomic_flag_base
  163. {
  164. atomic_flag() noexcept = default;
  165. ~atomic_flag() noexcept = default;
  166. atomic_flag(const atomic_flag&) = delete;
  167. atomic_flag& operator=(const atomic_flag&) = delete;
  168. atomic_flag& operator=(const atomic_flag&) volatile = delete;
  169. // Conversion to ATOMIC_FLAG_INIT.
  170. constexpr atomic_flag(bool __i) noexcept
  171. : __atomic_flag_base{ _S_init(__i) }
  172. { }
  173. _GLIBCXX_ALWAYS_INLINE bool
  174. test_and_set(memory_order __m = memory_order_seq_cst) noexcept
  175. {
  176. return __atomic_test_and_set (&_M_i, int(__m));
  177. }
  178. _GLIBCXX_ALWAYS_INLINE bool
  179. test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
  180. {
  181. return __atomic_test_and_set (&_M_i, int(__m));
  182. }
  183. #if __cplusplus > 201703L
  184. #define __cpp_lib_atomic_flag_test 201907L
  185. _GLIBCXX_ALWAYS_INLINE bool
  186. test(memory_order __m = memory_order_seq_cst) const noexcept
  187. {
  188. __atomic_flag_data_type __v;
  189. __atomic_load(&_M_i, &__v, int(__m));
  190. return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
  191. }
  192. _GLIBCXX_ALWAYS_INLINE bool
  193. test(memory_order __m = memory_order_seq_cst) const volatile noexcept
  194. {
  195. __atomic_flag_data_type __v;
  196. __atomic_load(&_M_i, &__v, int(__m));
  197. return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
  198. }
  199. #if __cpp_lib_atomic_wait
  200. _GLIBCXX_ALWAYS_INLINE void
  201. wait(bool __old,
  202. memory_order __m = memory_order_seq_cst) const noexcept
  203. {
  204. const __atomic_flag_data_type __v
  205. = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
  206. std::__atomic_wait_address_v(&_M_i, __v,
  207. [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
  208. }
  209. // TODO add const volatile overload
  210. _GLIBCXX_ALWAYS_INLINE void
  211. notify_one() noexcept
  212. { std::__atomic_notify_address(&_M_i, false); }
  213. // TODO add const volatile overload
  214. _GLIBCXX_ALWAYS_INLINE void
  215. notify_all() noexcept
  216. { std::__atomic_notify_address(&_M_i, true); }
  217. // TODO add const volatile overload
  218. #endif // __cpp_lib_atomic_wait
  219. #endif // C++20
  220. _GLIBCXX_ALWAYS_INLINE void
  221. clear(memory_order __m = memory_order_seq_cst) noexcept
  222. {
  223. memory_order __b __attribute__ ((__unused__))
  224. = __m & __memory_order_mask;
  225. __glibcxx_assert(__b != memory_order_consume);
  226. __glibcxx_assert(__b != memory_order_acquire);
  227. __glibcxx_assert(__b != memory_order_acq_rel);
  228. __atomic_clear (&_M_i, int(__m));
  229. }
  230. _GLIBCXX_ALWAYS_INLINE void
  231. clear(memory_order __m = memory_order_seq_cst) volatile noexcept
  232. {
  233. memory_order __b __attribute__ ((__unused__))
  234. = __m & __memory_order_mask;
  235. __glibcxx_assert(__b != memory_order_consume);
  236. __glibcxx_assert(__b != memory_order_acquire);
  237. __glibcxx_assert(__b != memory_order_acq_rel);
  238. __atomic_clear (&_M_i, int(__m));
  239. }
  240. private:
  241. static constexpr __atomic_flag_data_type
  242. _S_init(bool __i)
  243. { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
  244. };
  245. /// Base class for atomic integrals.
  246. //
  247. // For each of the integral types, define atomic_[integral type] struct
  248. //
  249. // atomic_bool bool
  250. // atomic_char char
  251. // atomic_schar signed char
  252. // atomic_uchar unsigned char
  253. // atomic_short short
  254. // atomic_ushort unsigned short
  255. // atomic_int int
  256. // atomic_uint unsigned int
  257. // atomic_long long
  258. // atomic_ulong unsigned long
  259. // atomic_llong long long
  260. // atomic_ullong unsigned long long
  261. // atomic_char8_t char8_t
  262. // atomic_char16_t char16_t
  263. // atomic_char32_t char32_t
  264. // atomic_wchar_t wchar_t
  265. //
  266. // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
  267. // 8 bytes, since that is what GCC built-in functions for atomic
  268. // memory access expect.
  269. template<typename _ITp>
  270. struct __atomic_base
  271. {
  272. using value_type = _ITp;
  273. using difference_type = value_type;
  274. private:
  275. typedef _ITp __int_type;
  276. static constexpr int _S_alignment =
  277. sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
  278. alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
  279. public:
  280. __atomic_base() noexcept = default;
  281. ~__atomic_base() noexcept = default;
  282. __atomic_base(const __atomic_base&) = delete;
  283. __atomic_base& operator=(const __atomic_base&) = delete;
  284. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  285. // Requires __int_type convertible to _M_i.
  286. constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
  287. operator __int_type() const noexcept
  288. { return load(); }
  289. operator __int_type() const volatile noexcept
  290. { return load(); }
  291. __int_type
  292. operator=(__int_type __i) noexcept
  293. {
  294. store(__i);
  295. return __i;
  296. }
  297. __int_type
  298. operator=(__int_type __i) volatile noexcept
  299. {
  300. store(__i);
  301. return __i;
  302. }
  303. __int_type
  304. operator++(int) noexcept
  305. { return fetch_add(1); }
  306. __int_type
  307. operator++(int) volatile noexcept
  308. { return fetch_add(1); }
  309. __int_type
  310. operator--(int) noexcept
  311. { return fetch_sub(1); }
  312. __int_type
  313. operator--(int) volatile noexcept
  314. { return fetch_sub(1); }
  315. __int_type
  316. operator++() noexcept
  317. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  318. __int_type
  319. operator++() volatile noexcept
  320. { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  321. __int_type
  322. operator--() noexcept
  323. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  324. __int_type
  325. operator--() volatile noexcept
  326. { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
  327. __int_type
  328. operator+=(__int_type __i) noexcept
  329. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  330. __int_type
  331. operator+=(__int_type __i) volatile noexcept
  332. { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  333. __int_type
  334. operator-=(__int_type __i) noexcept
  335. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  336. __int_type
  337. operator-=(__int_type __i) volatile noexcept
  338. { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  339. __int_type
  340. operator&=(__int_type __i) noexcept
  341. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  342. __int_type
  343. operator&=(__int_type __i) volatile noexcept
  344. { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  345. __int_type
  346. operator|=(__int_type __i) noexcept
  347. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  348. __int_type
  349. operator|=(__int_type __i) volatile noexcept
  350. { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  351. __int_type
  352. operator^=(__int_type __i) noexcept
  353. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  354. __int_type
  355. operator^=(__int_type __i) volatile noexcept
  356. { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
  357. bool
  358. is_lock_free() const noexcept
  359. {
  360. // Use a fake, minimally aligned pointer.
  361. return __atomic_is_lock_free(sizeof(_M_i),
  362. reinterpret_cast<void *>(-_S_alignment));
  363. }
  364. bool
  365. is_lock_free() const volatile noexcept
  366. {
  367. // Use a fake, minimally aligned pointer.
  368. return __atomic_is_lock_free(sizeof(_M_i),
  369. reinterpret_cast<void *>(-_S_alignment));
  370. }
  371. _GLIBCXX_ALWAYS_INLINE void
  372. store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
  373. {
  374. memory_order __b __attribute__ ((__unused__))
  375. = __m & __memory_order_mask;
  376. __glibcxx_assert(__b != memory_order_acquire);
  377. __glibcxx_assert(__b != memory_order_acq_rel);
  378. __glibcxx_assert(__b != memory_order_consume);
  379. __atomic_store_n(&_M_i, __i, int(__m));
  380. }
  381. _GLIBCXX_ALWAYS_INLINE void
  382. store(__int_type __i,
  383. memory_order __m = memory_order_seq_cst) volatile noexcept
  384. {
  385. memory_order __b __attribute__ ((__unused__))
  386. = __m & __memory_order_mask;
  387. __glibcxx_assert(__b != memory_order_acquire);
  388. __glibcxx_assert(__b != memory_order_acq_rel);
  389. __glibcxx_assert(__b != memory_order_consume);
  390. __atomic_store_n(&_M_i, __i, int(__m));
  391. }
  392. _GLIBCXX_ALWAYS_INLINE __int_type
  393. load(memory_order __m = memory_order_seq_cst) const noexcept
  394. {
  395. memory_order __b __attribute__ ((__unused__))
  396. = __m & __memory_order_mask;
  397. __glibcxx_assert(__b != memory_order_release);
  398. __glibcxx_assert(__b != memory_order_acq_rel);
  399. return __atomic_load_n(&_M_i, int(__m));
  400. }
  401. _GLIBCXX_ALWAYS_INLINE __int_type
  402. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  403. {
  404. memory_order __b __attribute__ ((__unused__))
  405. = __m & __memory_order_mask;
  406. __glibcxx_assert(__b != memory_order_release);
  407. __glibcxx_assert(__b != memory_order_acq_rel);
  408. return __atomic_load_n(&_M_i, int(__m));
  409. }
  410. _GLIBCXX_ALWAYS_INLINE __int_type
  411. exchange(__int_type __i,
  412. memory_order __m = memory_order_seq_cst) noexcept
  413. {
  414. return __atomic_exchange_n(&_M_i, __i, int(__m));
  415. }
  416. _GLIBCXX_ALWAYS_INLINE __int_type
  417. exchange(__int_type __i,
  418. memory_order __m = memory_order_seq_cst) volatile noexcept
  419. {
  420. return __atomic_exchange_n(&_M_i, __i, int(__m));
  421. }
  422. _GLIBCXX_ALWAYS_INLINE bool
  423. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  424. memory_order __m1, memory_order __m2) noexcept
  425. {
  426. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  427. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  428. int(__m1), int(__m2));
  429. }
  430. _GLIBCXX_ALWAYS_INLINE bool
  431. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  432. memory_order __m1,
  433. memory_order __m2) volatile noexcept
  434. {
  435. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  436. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
  437. int(__m1), int(__m2));
  438. }
  439. _GLIBCXX_ALWAYS_INLINE bool
  440. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  441. memory_order __m = memory_order_seq_cst) noexcept
  442. {
  443. return compare_exchange_weak(__i1, __i2, __m,
  444. __cmpexch_failure_order(__m));
  445. }
  446. _GLIBCXX_ALWAYS_INLINE bool
  447. compare_exchange_weak(__int_type& __i1, __int_type __i2,
  448. memory_order __m = memory_order_seq_cst) volatile noexcept
  449. {
  450. return compare_exchange_weak(__i1, __i2, __m,
  451. __cmpexch_failure_order(__m));
  452. }
  453. _GLIBCXX_ALWAYS_INLINE bool
  454. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  455. memory_order __m1, memory_order __m2) noexcept
  456. {
  457. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  458. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  459. int(__m1), int(__m2));
  460. }
  461. _GLIBCXX_ALWAYS_INLINE bool
  462. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  463. memory_order __m1,
  464. memory_order __m2) volatile noexcept
  465. {
  466. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  467. return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
  468. int(__m1), int(__m2));
  469. }
  470. _GLIBCXX_ALWAYS_INLINE bool
  471. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  472. memory_order __m = memory_order_seq_cst) noexcept
  473. {
  474. return compare_exchange_strong(__i1, __i2, __m,
  475. __cmpexch_failure_order(__m));
  476. }
  477. _GLIBCXX_ALWAYS_INLINE bool
  478. compare_exchange_strong(__int_type& __i1, __int_type __i2,
  479. memory_order __m = memory_order_seq_cst) volatile noexcept
  480. {
  481. return compare_exchange_strong(__i1, __i2, __m,
  482. __cmpexch_failure_order(__m));
  483. }
  484. #if __cpp_lib_atomic_wait
  485. _GLIBCXX_ALWAYS_INLINE void
  486. wait(__int_type __old,
  487. memory_order __m = memory_order_seq_cst) const noexcept
  488. {
  489. std::__atomic_wait_address_v(&_M_i, __old,
  490. [__m, this] { return this->load(__m); });
  491. }
  492. // TODO add const volatile overload
  493. _GLIBCXX_ALWAYS_INLINE void
  494. notify_one() noexcept
  495. { std::__atomic_notify_address(&_M_i, false); }
  496. // TODO add const volatile overload
  497. _GLIBCXX_ALWAYS_INLINE void
  498. notify_all() noexcept
  499. { std::__atomic_notify_address(&_M_i, true); }
  500. // TODO add const volatile overload
  501. #endif // __cpp_lib_atomic_wait
  502. _GLIBCXX_ALWAYS_INLINE __int_type
  503. fetch_add(__int_type __i,
  504. memory_order __m = memory_order_seq_cst) noexcept
  505. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  506. _GLIBCXX_ALWAYS_INLINE __int_type
  507. fetch_add(__int_type __i,
  508. memory_order __m = memory_order_seq_cst) volatile noexcept
  509. { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
  510. _GLIBCXX_ALWAYS_INLINE __int_type
  511. fetch_sub(__int_type __i,
  512. memory_order __m = memory_order_seq_cst) noexcept
  513. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  514. _GLIBCXX_ALWAYS_INLINE __int_type
  515. fetch_sub(__int_type __i,
  516. memory_order __m = memory_order_seq_cst) volatile noexcept
  517. { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
  518. _GLIBCXX_ALWAYS_INLINE __int_type
  519. fetch_and(__int_type __i,
  520. memory_order __m = memory_order_seq_cst) noexcept
  521. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  522. _GLIBCXX_ALWAYS_INLINE __int_type
  523. fetch_and(__int_type __i,
  524. memory_order __m = memory_order_seq_cst) volatile noexcept
  525. { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
  526. _GLIBCXX_ALWAYS_INLINE __int_type
  527. fetch_or(__int_type __i,
  528. memory_order __m = memory_order_seq_cst) noexcept
  529. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  530. _GLIBCXX_ALWAYS_INLINE __int_type
  531. fetch_or(__int_type __i,
  532. memory_order __m = memory_order_seq_cst) volatile noexcept
  533. { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
  534. _GLIBCXX_ALWAYS_INLINE __int_type
  535. fetch_xor(__int_type __i,
  536. memory_order __m = memory_order_seq_cst) noexcept
  537. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  538. _GLIBCXX_ALWAYS_INLINE __int_type
  539. fetch_xor(__int_type __i,
  540. memory_order __m = memory_order_seq_cst) volatile noexcept
  541. { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
  542. };
  543. /// Partial specialization for pointer types.
  544. template<typename _PTp>
  545. struct __atomic_base<_PTp*>
  546. {
  547. private:
  548. typedef _PTp* __pointer_type;
  549. __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
  550. // Factored out to facilitate explicit specialization.
  551. constexpr ptrdiff_t
  552. _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); }
  553. constexpr ptrdiff_t
  554. _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); }
  555. public:
  556. __atomic_base() noexcept = default;
  557. ~__atomic_base() noexcept = default;
  558. __atomic_base(const __atomic_base&) = delete;
  559. __atomic_base& operator=(const __atomic_base&) = delete;
  560. __atomic_base& operator=(const __atomic_base&) volatile = delete;
  561. // Requires __pointer_type convertible to _M_p.
  562. constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
  563. operator __pointer_type() const noexcept
  564. { return load(); }
  565. operator __pointer_type() const volatile noexcept
  566. { return load(); }
  567. __pointer_type
  568. operator=(__pointer_type __p) noexcept
  569. {
  570. store(__p);
  571. return __p;
  572. }
  573. __pointer_type
  574. operator=(__pointer_type __p) volatile noexcept
  575. {
  576. store(__p);
  577. return __p;
  578. }
  579. __pointer_type
  580. operator++(int) noexcept
  581. { return fetch_add(1); }
  582. __pointer_type
  583. operator++(int) volatile noexcept
  584. { return fetch_add(1); }
  585. __pointer_type
  586. operator--(int) noexcept
  587. { return fetch_sub(1); }
  588. __pointer_type
  589. operator--(int) volatile noexcept
  590. { return fetch_sub(1); }
  591. __pointer_type
  592. operator++() noexcept
  593. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  594. int(memory_order_seq_cst)); }
  595. __pointer_type
  596. operator++() volatile noexcept
  597. { return __atomic_add_fetch(&_M_p, _M_type_size(1),
  598. int(memory_order_seq_cst)); }
  599. __pointer_type
  600. operator--() noexcept
  601. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  602. int(memory_order_seq_cst)); }
  603. __pointer_type
  604. operator--() volatile noexcept
  605. { return __atomic_sub_fetch(&_M_p, _M_type_size(1),
  606. int(memory_order_seq_cst)); }
  607. __pointer_type
  608. operator+=(ptrdiff_t __d) noexcept
  609. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  610. int(memory_order_seq_cst)); }
  611. __pointer_type
  612. operator+=(ptrdiff_t __d) volatile noexcept
  613. { return __atomic_add_fetch(&_M_p, _M_type_size(__d),
  614. int(memory_order_seq_cst)); }
  615. __pointer_type
  616. operator-=(ptrdiff_t __d) noexcept
  617. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  618. int(memory_order_seq_cst)); }
  619. __pointer_type
  620. operator-=(ptrdiff_t __d) volatile noexcept
  621. { return __atomic_sub_fetch(&_M_p, _M_type_size(__d),
  622. int(memory_order_seq_cst)); }
  623. bool
  624. is_lock_free() const noexcept
  625. {
  626. // Produce a fake, minimally aligned pointer.
  627. return __atomic_is_lock_free(sizeof(_M_p),
  628. reinterpret_cast<void *>(-__alignof(_M_p)));
  629. }
  630. bool
  631. is_lock_free() const volatile noexcept
  632. {
  633. // Produce a fake, minimally aligned pointer.
  634. return __atomic_is_lock_free(sizeof(_M_p),
  635. reinterpret_cast<void *>(-__alignof(_M_p)));
  636. }
  637. _GLIBCXX_ALWAYS_INLINE void
  638. store(__pointer_type __p,
  639. memory_order __m = memory_order_seq_cst) noexcept
  640. {
  641. memory_order __b __attribute__ ((__unused__))
  642. = __m & __memory_order_mask;
  643. __glibcxx_assert(__b != memory_order_acquire);
  644. __glibcxx_assert(__b != memory_order_acq_rel);
  645. __glibcxx_assert(__b != memory_order_consume);
  646. __atomic_store_n(&_M_p, __p, int(__m));
  647. }
  648. _GLIBCXX_ALWAYS_INLINE void
  649. store(__pointer_type __p,
  650. memory_order __m = memory_order_seq_cst) volatile noexcept
  651. {
  652. memory_order __b __attribute__ ((__unused__))
  653. = __m & __memory_order_mask;
  654. __glibcxx_assert(__b != memory_order_acquire);
  655. __glibcxx_assert(__b != memory_order_acq_rel);
  656. __glibcxx_assert(__b != memory_order_consume);
  657. __atomic_store_n(&_M_p, __p, int(__m));
  658. }
  659. _GLIBCXX_ALWAYS_INLINE __pointer_type
  660. load(memory_order __m = memory_order_seq_cst) const noexcept
  661. {
  662. memory_order __b __attribute__ ((__unused__))
  663. = __m & __memory_order_mask;
  664. __glibcxx_assert(__b != memory_order_release);
  665. __glibcxx_assert(__b != memory_order_acq_rel);
  666. return __atomic_load_n(&_M_p, int(__m));
  667. }
  668. _GLIBCXX_ALWAYS_INLINE __pointer_type
  669. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  670. {
  671. memory_order __b __attribute__ ((__unused__))
  672. = __m & __memory_order_mask;
  673. __glibcxx_assert(__b != memory_order_release);
  674. __glibcxx_assert(__b != memory_order_acq_rel);
  675. return __atomic_load_n(&_M_p, int(__m));
  676. }
  677. _GLIBCXX_ALWAYS_INLINE __pointer_type
  678. exchange(__pointer_type __p,
  679. memory_order __m = memory_order_seq_cst) noexcept
  680. {
  681. return __atomic_exchange_n(&_M_p, __p, int(__m));
  682. }
  683. _GLIBCXX_ALWAYS_INLINE __pointer_type
  684. exchange(__pointer_type __p,
  685. memory_order __m = memory_order_seq_cst) volatile noexcept
  686. {
  687. return __atomic_exchange_n(&_M_p, __p, int(__m));
  688. }
  689. _GLIBCXX_ALWAYS_INLINE bool
  690. compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
  691. memory_order __m1,
  692. memory_order __m2) noexcept
  693. {
  694. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  695. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
  696. int(__m1), int(__m2));
  697. }
  698. _GLIBCXX_ALWAYS_INLINE bool
  699. compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
  700. memory_order __m1,
  701. memory_order __m2) volatile noexcept
  702. {
  703. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  704. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
  705. int(__m1), int(__m2));
  706. }
  707. _GLIBCXX_ALWAYS_INLINE bool
  708. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  709. memory_order __m1,
  710. memory_order __m2) noexcept
  711. {
  712. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  713. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  714. int(__m1), int(__m2));
  715. }
  716. _GLIBCXX_ALWAYS_INLINE bool
  717. compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
  718. memory_order __m1,
  719. memory_order __m2) volatile noexcept
  720. {
  721. __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
  722. return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
  723. int(__m1), int(__m2));
  724. }
  725. #if __cpp_lib_atomic_wait
  726. _GLIBCXX_ALWAYS_INLINE void
  727. wait(__pointer_type __old,
  728. memory_order __m = memory_order_seq_cst) const noexcept
  729. {
  730. std::__atomic_wait_address_v(&_M_p, __old,
  731. [__m, this]
  732. { return this->load(__m); });
  733. }
  734. // TODO add const volatile overload
  735. _GLIBCXX_ALWAYS_INLINE void
  736. notify_one() const noexcept
  737. { std::__atomic_notify_address(&_M_p, false); }
  738. // TODO add const volatile overload
  739. _GLIBCXX_ALWAYS_INLINE void
  740. notify_all() const noexcept
  741. { std::__atomic_notify_address(&_M_p, true); }
  742. // TODO add const volatile overload
  743. #endif // __cpp_lib_atomic_wait
  744. _GLIBCXX_ALWAYS_INLINE __pointer_type
  745. fetch_add(ptrdiff_t __d,
  746. memory_order __m = memory_order_seq_cst) noexcept
  747. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  748. _GLIBCXX_ALWAYS_INLINE __pointer_type
  749. fetch_add(ptrdiff_t __d,
  750. memory_order __m = memory_order_seq_cst) volatile noexcept
  751. { return __atomic_fetch_add(&_M_p, _M_type_size(__d), int(__m)); }
  752. _GLIBCXX_ALWAYS_INLINE __pointer_type
  753. fetch_sub(ptrdiff_t __d,
  754. memory_order __m = memory_order_seq_cst) noexcept
  755. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  756. _GLIBCXX_ALWAYS_INLINE __pointer_type
  757. fetch_sub(ptrdiff_t __d,
  758. memory_order __m = memory_order_seq_cst) volatile noexcept
  759. { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), int(__m)); }
  760. };
  761. #if __cplusplus > 201703L
  762. // Implementation details of atomic_ref and atomic<floating-point>.
  763. namespace __atomic_impl
  764. {
  765. // Remove volatile and create a non-deduced context for value arguments.
  766. template<typename _Tp>
  767. using _Val = remove_volatile_t<_Tp>;
  768. // As above, but for difference_type arguments.
  769. template<typename _Tp>
  770. using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
  771. template<size_t _Size, size_t _Align>
  772. _GLIBCXX_ALWAYS_INLINE bool
  773. is_lock_free() noexcept
  774. {
  775. // Produce a fake, minimally aligned pointer.
  776. return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
  777. }
  778. template<typename _Tp>
  779. _GLIBCXX_ALWAYS_INLINE void
  780. store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
  781. { __atomic_store(__ptr, std::__addressof(__t), int(__m)); }
  782. template<typename _Tp>
  783. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  784. load(const _Tp* __ptr, memory_order __m) noexcept
  785. {
  786. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  787. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  788. __atomic_load(__ptr, __dest, int(__m));
  789. return *__dest;
  790. }
  791. template<typename _Tp>
  792. _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
  793. exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
  794. {
  795. alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
  796. auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
  797. __atomic_exchange(__ptr, std::__addressof(__desired), __dest, int(__m));
  798. return *__dest;
  799. }
  800. template<typename _Tp>
  801. _GLIBCXX_ALWAYS_INLINE bool
  802. compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
  803. _Val<_Tp> __desired, memory_order __success,
  804. memory_order __failure) noexcept
  805. {
  806. __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
  807. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  808. std::__addressof(__desired), true,
  809. int(__success), int(__failure));
  810. }
  811. template<typename _Tp>
  812. _GLIBCXX_ALWAYS_INLINE bool
  813. compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
  814. _Val<_Tp> __desired, memory_order __success,
  815. memory_order __failure) noexcept
  816. {
  817. __glibcxx_assert(__is_valid_cmpexch_failure_order(__failure));
  818. return __atomic_compare_exchange(__ptr, std::__addressof(__expected),
  819. std::__addressof(__desired), false,
  820. int(__success), int(__failure));
  821. }
  822. #if __cpp_lib_atomic_wait
  823. template<typename _Tp>
  824. _GLIBCXX_ALWAYS_INLINE void
  825. wait(const _Tp* __ptr, _Val<_Tp> __old,
  826. memory_order __m = memory_order_seq_cst) noexcept
  827. {
  828. std::__atomic_wait_address_v(__ptr, __old,
  829. [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
  830. }
  831. // TODO add const volatile overload
  832. template<typename _Tp>
  833. _GLIBCXX_ALWAYS_INLINE void
  834. notify_one(const _Tp* __ptr) noexcept
  835. { std::__atomic_notify_address(__ptr, false); }
  836. // TODO add const volatile overload
  837. template<typename _Tp>
  838. _GLIBCXX_ALWAYS_INLINE void
  839. notify_all(const _Tp* __ptr) noexcept
  840. { std::__atomic_notify_address(__ptr, true); }
  841. // TODO add const volatile overload
  842. #endif // __cpp_lib_atomic_wait
  843. template<typename _Tp>
  844. _GLIBCXX_ALWAYS_INLINE _Tp
  845. fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  846. { return __atomic_fetch_add(__ptr, __i, int(__m)); }
  847. template<typename _Tp>
  848. _GLIBCXX_ALWAYS_INLINE _Tp
  849. fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
  850. { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
  851. template<typename _Tp>
  852. _GLIBCXX_ALWAYS_INLINE _Tp
  853. fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  854. { return __atomic_fetch_and(__ptr, __i, int(__m)); }
  855. template<typename _Tp>
  856. _GLIBCXX_ALWAYS_INLINE _Tp
  857. fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  858. { return __atomic_fetch_or(__ptr, __i, int(__m)); }
  859. template<typename _Tp>
  860. _GLIBCXX_ALWAYS_INLINE _Tp
  861. fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  862. { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
  863. template<typename _Tp>
  864. _GLIBCXX_ALWAYS_INLINE _Tp
  865. __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  866. { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  867. template<typename _Tp>
  868. _GLIBCXX_ALWAYS_INLINE _Tp
  869. __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
  870. { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  871. template<typename _Tp>
  872. _GLIBCXX_ALWAYS_INLINE _Tp
  873. __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  874. { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  875. template<typename _Tp>
  876. _GLIBCXX_ALWAYS_INLINE _Tp
  877. __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  878. { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  879. template<typename _Tp>
  880. _GLIBCXX_ALWAYS_INLINE _Tp
  881. __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
  882. { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
  883. template<typename _Tp>
  884. _Tp
  885. __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  886. {
  887. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  888. _Val<_Tp> __newval = __oldval + __i;
  889. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  890. memory_order_relaxed))
  891. __newval = __oldval + __i;
  892. return __oldval;
  893. }
  894. template<typename _Tp>
  895. _Tp
  896. __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
  897. {
  898. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  899. _Val<_Tp> __newval = __oldval - __i;
  900. while (!compare_exchange_weak(__ptr, __oldval, __newval, __m,
  901. memory_order_relaxed))
  902. __newval = __oldval - __i;
  903. return __oldval;
  904. }
  905. template<typename _Tp>
  906. _Tp
  907. __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  908. {
  909. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  910. _Val<_Tp> __newval = __oldval + __i;
  911. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  912. memory_order_seq_cst,
  913. memory_order_relaxed))
  914. __newval = __oldval + __i;
  915. return __newval;
  916. }
  917. template<typename _Tp>
  918. _Tp
  919. __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
  920. {
  921. _Val<_Tp> __oldval = load(__ptr, memory_order_relaxed);
  922. _Val<_Tp> __newval = __oldval - __i;
  923. while (!compare_exchange_weak(__ptr, __oldval, __newval,
  924. memory_order_seq_cst,
  925. memory_order_relaxed))
  926. __newval = __oldval - __i;
  927. return __newval;
  928. }
  929. } // namespace __atomic_impl
  930. // base class for atomic<floating-point-type>
  931. template<typename _Fp>
  932. struct __atomic_float
  933. {
  934. static_assert(is_floating_point_v<_Fp>);
  935. static constexpr size_t _S_alignment = __alignof__(_Fp);
  936. public:
  937. using value_type = _Fp;
  938. using difference_type = value_type;
  939. static constexpr bool is_always_lock_free
  940. = __atomic_always_lock_free(sizeof(_Fp), 0);
  941. __atomic_float() = default;
  942. constexpr
  943. __atomic_float(_Fp __t) : _M_fp(__t)
  944. { }
  945. __atomic_float(const __atomic_float&) = delete;
  946. __atomic_float& operator=(const __atomic_float&) = delete;
  947. __atomic_float& operator=(const __atomic_float&) volatile = delete;
  948. _Fp
  949. operator=(_Fp __t) volatile noexcept
  950. {
  951. this->store(__t);
  952. return __t;
  953. }
  954. _Fp
  955. operator=(_Fp __t) noexcept
  956. {
  957. this->store(__t);
  958. return __t;
  959. }
  960. bool
  961. is_lock_free() const volatile noexcept
  962. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  963. bool
  964. is_lock_free() const noexcept
  965. { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
  966. void
  967. store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
  968. { __atomic_impl::store(&_M_fp, __t, __m); }
  969. void
  970. store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
  971. { __atomic_impl::store(&_M_fp, __t, __m); }
  972. _Fp
  973. load(memory_order __m = memory_order_seq_cst) const volatile noexcept
  974. { return __atomic_impl::load(&_M_fp, __m); }
  975. _Fp
  976. load(memory_order __m = memory_order_seq_cst) const noexcept
  977. { return __atomic_impl::load(&_M_fp, __m); }
  978. operator _Fp() const volatile noexcept { return this->load(); }
  979. operator _Fp() const noexcept { return this->load(); }
  980. _Fp
  981. exchange(_Fp __desired,
  982. memory_order __m = memory_order_seq_cst) volatile noexcept
  983. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  984. _Fp
  985. exchange(_Fp __desired,
  986. memory_order __m = memory_order_seq_cst) noexcept
  987. { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
  988. bool
  989. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  990. memory_order __success,
  991. memory_order __failure) noexcept
  992. {
  993. return __atomic_impl::compare_exchange_weak(&_M_fp,
  994. __expected, __desired,
  995. __success, __failure);
  996. }
  997. bool
  998. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  999. memory_order __success,
  1000. memory_order __failure) volatile noexcept
  1001. {
  1002. return __atomic_impl::compare_exchange_weak(&_M_fp,
  1003. __expected, __desired,
  1004. __success, __failure);
  1005. }
  1006. bool
  1007. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1008. memory_order __success,
  1009. memory_order __failure) noexcept
  1010. {
  1011. return __atomic_impl::compare_exchange_strong(&_M_fp,
  1012. __expected, __desired,
  1013. __success, __failure);
  1014. }
  1015. bool
  1016. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1017. memory_order __success,
  1018. memory_order __failure) volatile noexcept
  1019. {
  1020. return __atomic_impl::compare_exchange_strong(&_M_fp,
  1021. __expected, __desired,
  1022. __success, __failure);
  1023. }
  1024. bool
  1025. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1026. memory_order __order = memory_order_seq_cst)
  1027. noexcept
  1028. {
  1029. return compare_exchange_weak(__expected, __desired, __order,
  1030. __cmpexch_failure_order(__order));
  1031. }
  1032. bool
  1033. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1034. memory_order __order = memory_order_seq_cst)
  1035. volatile noexcept
  1036. {
  1037. return compare_exchange_weak(__expected, __desired, __order,
  1038. __cmpexch_failure_order(__order));
  1039. }
  1040. bool
  1041. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1042. memory_order __order = memory_order_seq_cst)
  1043. noexcept
  1044. {
  1045. return compare_exchange_strong(__expected, __desired, __order,
  1046. __cmpexch_failure_order(__order));
  1047. }
  1048. bool
  1049. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1050. memory_order __order = memory_order_seq_cst)
  1051. volatile noexcept
  1052. {
  1053. return compare_exchange_strong(__expected, __desired, __order,
  1054. __cmpexch_failure_order(__order));
  1055. }
  1056. #if __cpp_lib_atomic_wait
  1057. _GLIBCXX_ALWAYS_INLINE void
  1058. wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1059. { __atomic_impl::wait(&_M_fp, __old, __m); }
  1060. // TODO add const volatile overload
  1061. _GLIBCXX_ALWAYS_INLINE void
  1062. notify_one() const noexcept
  1063. { __atomic_impl::notify_one(&_M_fp); }
  1064. // TODO add const volatile overload
  1065. _GLIBCXX_ALWAYS_INLINE void
  1066. notify_all() const noexcept
  1067. { __atomic_impl::notify_all(&_M_fp); }
  1068. // TODO add const volatile overload
  1069. #endif // __cpp_lib_atomic_wait
  1070. value_type
  1071. fetch_add(value_type __i,
  1072. memory_order __m = memory_order_seq_cst) noexcept
  1073. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  1074. value_type
  1075. fetch_add(value_type __i,
  1076. memory_order __m = memory_order_seq_cst) volatile noexcept
  1077. { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
  1078. value_type
  1079. fetch_sub(value_type __i,
  1080. memory_order __m = memory_order_seq_cst) noexcept
  1081. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  1082. value_type
  1083. fetch_sub(value_type __i,
  1084. memory_order __m = memory_order_seq_cst) volatile noexcept
  1085. { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
  1086. value_type
  1087. operator+=(value_type __i) noexcept
  1088. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  1089. value_type
  1090. operator+=(value_type __i) volatile noexcept
  1091. { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
  1092. value_type
  1093. operator-=(value_type __i) noexcept
  1094. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  1095. value_type
  1096. operator-=(value_type __i) volatile noexcept
  1097. { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
  1098. private:
  1099. alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
  1100. };
  1101. #undef _GLIBCXX20_INIT
  1102. template<typename _Tp,
  1103. bool = is_integral_v<_Tp>, bool = is_floating_point_v<_Tp>>
  1104. struct __atomic_ref;
  1105. // base class for non-integral, non-floating-point, non-pointer types
  1106. template<typename _Tp>
  1107. struct __atomic_ref<_Tp, false, false>
  1108. {
  1109. static_assert(is_trivially_copyable_v<_Tp>);
  1110. // 1/2/4/8/16-byte types must be aligned to at least their size.
  1111. static constexpr int _S_min_alignment
  1112. = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
  1113. ? 0 : sizeof(_Tp);
  1114. public:
  1115. using value_type = _Tp;
  1116. static constexpr bool is_always_lock_free
  1117. = __atomic_always_lock_free(sizeof(_Tp), 0);
  1118. static constexpr size_t required_alignment
  1119. = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
  1120. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1121. explicit
  1122. __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
  1123. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1124. __atomic_ref(const __atomic_ref&) noexcept = default;
  1125. _Tp
  1126. operator=(_Tp __t) const noexcept
  1127. {
  1128. this->store(__t);
  1129. return __t;
  1130. }
  1131. operator _Tp() const noexcept { return this->load(); }
  1132. bool
  1133. is_lock_free() const noexcept
  1134. { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
  1135. void
  1136. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1137. { __atomic_impl::store(_M_ptr, __t, __m); }
  1138. _Tp
  1139. load(memory_order __m = memory_order_seq_cst) const noexcept
  1140. { return __atomic_impl::load(_M_ptr, __m); }
  1141. _Tp
  1142. exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
  1143. const noexcept
  1144. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1145. bool
  1146. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1147. memory_order __success,
  1148. memory_order __failure) const noexcept
  1149. {
  1150. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1151. __expected, __desired,
  1152. __success, __failure);
  1153. }
  1154. bool
  1155. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1156. memory_order __success,
  1157. memory_order __failure) const noexcept
  1158. {
  1159. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1160. __expected, __desired,
  1161. __success, __failure);
  1162. }
  1163. bool
  1164. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1165. memory_order __order = memory_order_seq_cst)
  1166. const noexcept
  1167. {
  1168. return compare_exchange_weak(__expected, __desired, __order,
  1169. __cmpexch_failure_order(__order));
  1170. }
  1171. bool
  1172. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1173. memory_order __order = memory_order_seq_cst)
  1174. const noexcept
  1175. {
  1176. return compare_exchange_strong(__expected, __desired, __order,
  1177. __cmpexch_failure_order(__order));
  1178. }
  1179. #if __cpp_lib_atomic_wait
  1180. _GLIBCXX_ALWAYS_INLINE void
  1181. wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1182. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1183. // TODO add const volatile overload
  1184. _GLIBCXX_ALWAYS_INLINE void
  1185. notify_one() const noexcept
  1186. { __atomic_impl::notify_one(_M_ptr); }
  1187. // TODO add const volatile overload
  1188. _GLIBCXX_ALWAYS_INLINE void
  1189. notify_all() const noexcept
  1190. { __atomic_impl::notify_all(_M_ptr); }
  1191. // TODO add const volatile overload
  1192. #endif // __cpp_lib_atomic_wait
  1193. private:
  1194. _Tp* _M_ptr;
  1195. };
  1196. // base class for atomic_ref<integral-type>
  1197. template<typename _Tp>
  1198. struct __atomic_ref<_Tp, true, false>
  1199. {
  1200. static_assert(is_integral_v<_Tp>);
  1201. public:
  1202. using value_type = _Tp;
  1203. using difference_type = value_type;
  1204. static constexpr bool is_always_lock_free
  1205. = __atomic_always_lock_free(sizeof(_Tp), 0);
  1206. static constexpr size_t required_alignment
  1207. = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
  1208. __atomic_ref() = delete;
  1209. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1210. explicit
  1211. __atomic_ref(_Tp& __t) : _M_ptr(&__t)
  1212. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1213. __atomic_ref(const __atomic_ref&) noexcept = default;
  1214. _Tp
  1215. operator=(_Tp __t) const noexcept
  1216. {
  1217. this->store(__t);
  1218. return __t;
  1219. }
  1220. operator _Tp() const noexcept { return this->load(); }
  1221. bool
  1222. is_lock_free() const noexcept
  1223. {
  1224. return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
  1225. }
  1226. void
  1227. store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1228. { __atomic_impl::store(_M_ptr, __t, __m); }
  1229. _Tp
  1230. load(memory_order __m = memory_order_seq_cst) const noexcept
  1231. { return __atomic_impl::load(_M_ptr, __m); }
  1232. _Tp
  1233. exchange(_Tp __desired,
  1234. memory_order __m = memory_order_seq_cst) const noexcept
  1235. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1236. bool
  1237. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1238. memory_order __success,
  1239. memory_order __failure) const noexcept
  1240. {
  1241. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1242. __expected, __desired,
  1243. __success, __failure);
  1244. }
  1245. bool
  1246. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1247. memory_order __success,
  1248. memory_order __failure) const noexcept
  1249. {
  1250. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1251. __expected, __desired,
  1252. __success, __failure);
  1253. }
  1254. bool
  1255. compare_exchange_weak(_Tp& __expected, _Tp __desired,
  1256. memory_order __order = memory_order_seq_cst)
  1257. const noexcept
  1258. {
  1259. return compare_exchange_weak(__expected, __desired, __order,
  1260. __cmpexch_failure_order(__order));
  1261. }
  1262. bool
  1263. compare_exchange_strong(_Tp& __expected, _Tp __desired,
  1264. memory_order __order = memory_order_seq_cst)
  1265. const noexcept
  1266. {
  1267. return compare_exchange_strong(__expected, __desired, __order,
  1268. __cmpexch_failure_order(__order));
  1269. }
  1270. #if __cpp_lib_atomic_wait
  1271. _GLIBCXX_ALWAYS_INLINE void
  1272. wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1273. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1274. // TODO add const volatile overload
  1275. _GLIBCXX_ALWAYS_INLINE void
  1276. notify_one() const noexcept
  1277. { __atomic_impl::notify_one(_M_ptr); }
  1278. // TODO add const volatile overload
  1279. _GLIBCXX_ALWAYS_INLINE void
  1280. notify_all() const noexcept
  1281. { __atomic_impl::notify_all(_M_ptr); }
  1282. // TODO add const volatile overload
  1283. #endif // __cpp_lib_atomic_wait
  1284. value_type
  1285. fetch_add(value_type __i,
  1286. memory_order __m = memory_order_seq_cst) const noexcept
  1287. { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
  1288. value_type
  1289. fetch_sub(value_type __i,
  1290. memory_order __m = memory_order_seq_cst) const noexcept
  1291. { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
  1292. value_type
  1293. fetch_and(value_type __i,
  1294. memory_order __m = memory_order_seq_cst) const noexcept
  1295. { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
  1296. value_type
  1297. fetch_or(value_type __i,
  1298. memory_order __m = memory_order_seq_cst) const noexcept
  1299. { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
  1300. value_type
  1301. fetch_xor(value_type __i,
  1302. memory_order __m = memory_order_seq_cst) const noexcept
  1303. { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
  1304. _GLIBCXX_ALWAYS_INLINE value_type
  1305. operator++(int) const noexcept
  1306. { return fetch_add(1); }
  1307. _GLIBCXX_ALWAYS_INLINE value_type
  1308. operator--(int) const noexcept
  1309. { return fetch_sub(1); }
  1310. value_type
  1311. operator++() const noexcept
  1312. { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
  1313. value_type
  1314. operator--() const noexcept
  1315. { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
  1316. value_type
  1317. operator+=(value_type __i) const noexcept
  1318. { return __atomic_impl::__add_fetch(_M_ptr, __i); }
  1319. value_type
  1320. operator-=(value_type __i) const noexcept
  1321. { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
  1322. value_type
  1323. operator&=(value_type __i) const noexcept
  1324. { return __atomic_impl::__and_fetch(_M_ptr, __i); }
  1325. value_type
  1326. operator|=(value_type __i) const noexcept
  1327. { return __atomic_impl::__or_fetch(_M_ptr, __i); }
  1328. value_type
  1329. operator^=(value_type __i) const noexcept
  1330. { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
  1331. private:
  1332. _Tp* _M_ptr;
  1333. };
  1334. // base class for atomic_ref<floating-point-type>
  1335. template<typename _Fp>
  1336. struct __atomic_ref<_Fp, false, true>
  1337. {
  1338. static_assert(is_floating_point_v<_Fp>);
  1339. public:
  1340. using value_type = _Fp;
  1341. using difference_type = value_type;
  1342. static constexpr bool is_always_lock_free
  1343. = __atomic_always_lock_free(sizeof(_Fp), 0);
  1344. static constexpr size_t required_alignment = __alignof__(_Fp);
  1345. __atomic_ref() = delete;
  1346. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1347. explicit
  1348. __atomic_ref(_Fp& __t) : _M_ptr(&__t)
  1349. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1350. __atomic_ref(const __atomic_ref&) noexcept = default;
  1351. _Fp
  1352. operator=(_Fp __t) const noexcept
  1353. {
  1354. this->store(__t);
  1355. return __t;
  1356. }
  1357. operator _Fp() const noexcept { return this->load(); }
  1358. bool
  1359. is_lock_free() const noexcept
  1360. {
  1361. return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
  1362. }
  1363. void
  1364. store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
  1365. { __atomic_impl::store(_M_ptr, __t, __m); }
  1366. _Fp
  1367. load(memory_order __m = memory_order_seq_cst) const noexcept
  1368. { return __atomic_impl::load(_M_ptr, __m); }
  1369. _Fp
  1370. exchange(_Fp __desired,
  1371. memory_order __m = memory_order_seq_cst) const noexcept
  1372. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1373. bool
  1374. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1375. memory_order __success,
  1376. memory_order __failure) const noexcept
  1377. {
  1378. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1379. __expected, __desired,
  1380. __success, __failure);
  1381. }
  1382. bool
  1383. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1384. memory_order __success,
  1385. memory_order __failure) const noexcept
  1386. {
  1387. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1388. __expected, __desired,
  1389. __success, __failure);
  1390. }
  1391. bool
  1392. compare_exchange_weak(_Fp& __expected, _Fp __desired,
  1393. memory_order __order = memory_order_seq_cst)
  1394. const noexcept
  1395. {
  1396. return compare_exchange_weak(__expected, __desired, __order,
  1397. __cmpexch_failure_order(__order));
  1398. }
  1399. bool
  1400. compare_exchange_strong(_Fp& __expected, _Fp __desired,
  1401. memory_order __order = memory_order_seq_cst)
  1402. const noexcept
  1403. {
  1404. return compare_exchange_strong(__expected, __desired, __order,
  1405. __cmpexch_failure_order(__order));
  1406. }
  1407. #if __cpp_lib_atomic_wait
  1408. _GLIBCXX_ALWAYS_INLINE void
  1409. wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
  1410. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1411. // TODO add const volatile overload
  1412. _GLIBCXX_ALWAYS_INLINE void
  1413. notify_one() const noexcept
  1414. { __atomic_impl::notify_one(_M_ptr); }
  1415. // TODO add const volatile overload
  1416. _GLIBCXX_ALWAYS_INLINE void
  1417. notify_all() const noexcept
  1418. { __atomic_impl::notify_all(_M_ptr); }
  1419. // TODO add const volatile overload
  1420. #endif // __cpp_lib_atomic_wait
  1421. value_type
  1422. fetch_add(value_type __i,
  1423. memory_order __m = memory_order_seq_cst) const noexcept
  1424. { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
  1425. value_type
  1426. fetch_sub(value_type __i,
  1427. memory_order __m = memory_order_seq_cst) const noexcept
  1428. { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
  1429. value_type
  1430. operator+=(value_type __i) const noexcept
  1431. { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
  1432. value_type
  1433. operator-=(value_type __i) const noexcept
  1434. { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
  1435. private:
  1436. _Fp* _M_ptr;
  1437. };
  1438. // base class for atomic_ref<pointer-type>
  1439. template<typename _Tp>
  1440. struct __atomic_ref<_Tp*, false, false>
  1441. {
  1442. public:
  1443. using value_type = _Tp*;
  1444. using difference_type = ptrdiff_t;
  1445. static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
  1446. static constexpr size_t required_alignment = __alignof__(_Tp*);
  1447. __atomic_ref() = delete;
  1448. __atomic_ref& operator=(const __atomic_ref&) = delete;
  1449. explicit
  1450. __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
  1451. { __glibcxx_assert(((uintptr_t)_M_ptr % required_alignment) == 0); }
  1452. __atomic_ref(const __atomic_ref&) noexcept = default;
  1453. _Tp*
  1454. operator=(_Tp* __t) const noexcept
  1455. {
  1456. this->store(__t);
  1457. return __t;
  1458. }
  1459. operator _Tp*() const noexcept { return this->load(); }
  1460. bool
  1461. is_lock_free() const noexcept
  1462. {
  1463. return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
  1464. }
  1465. void
  1466. store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
  1467. { __atomic_impl::store(_M_ptr, __t, __m); }
  1468. _Tp*
  1469. load(memory_order __m = memory_order_seq_cst) const noexcept
  1470. { return __atomic_impl::load(_M_ptr, __m); }
  1471. _Tp*
  1472. exchange(_Tp* __desired,
  1473. memory_order __m = memory_order_seq_cst) const noexcept
  1474. { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
  1475. bool
  1476. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1477. memory_order __success,
  1478. memory_order __failure) const noexcept
  1479. {
  1480. return __atomic_impl::compare_exchange_weak(_M_ptr,
  1481. __expected, __desired,
  1482. __success, __failure);
  1483. }
  1484. bool
  1485. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1486. memory_order __success,
  1487. memory_order __failure) const noexcept
  1488. {
  1489. return __atomic_impl::compare_exchange_strong(_M_ptr,
  1490. __expected, __desired,
  1491. __success, __failure);
  1492. }
  1493. bool
  1494. compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
  1495. memory_order __order = memory_order_seq_cst)
  1496. const noexcept
  1497. {
  1498. return compare_exchange_weak(__expected, __desired, __order,
  1499. __cmpexch_failure_order(__order));
  1500. }
  1501. bool
  1502. compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
  1503. memory_order __order = memory_order_seq_cst)
  1504. const noexcept
  1505. {
  1506. return compare_exchange_strong(__expected, __desired, __order,
  1507. __cmpexch_failure_order(__order));
  1508. }
  1509. #if __cpp_lib_atomic_wait
  1510. _GLIBCXX_ALWAYS_INLINE void
  1511. wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
  1512. { __atomic_impl::wait(_M_ptr, __old, __m); }
  1513. // TODO add const volatile overload
  1514. _GLIBCXX_ALWAYS_INLINE void
  1515. notify_one() const noexcept
  1516. { __atomic_impl::notify_one(_M_ptr); }
  1517. // TODO add const volatile overload
  1518. _GLIBCXX_ALWAYS_INLINE void
  1519. notify_all() const noexcept
  1520. { __atomic_impl::notify_all(_M_ptr); }
  1521. // TODO add const volatile overload
  1522. #endif // __cpp_lib_atomic_wait
  1523. _GLIBCXX_ALWAYS_INLINE value_type
  1524. fetch_add(difference_type __d,
  1525. memory_order __m = memory_order_seq_cst) const noexcept
  1526. { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
  1527. _GLIBCXX_ALWAYS_INLINE value_type
  1528. fetch_sub(difference_type __d,
  1529. memory_order __m = memory_order_seq_cst) const noexcept
  1530. { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
  1531. value_type
  1532. operator++(int) const noexcept
  1533. { return fetch_add(1); }
  1534. value_type
  1535. operator--(int) const noexcept
  1536. { return fetch_sub(1); }
  1537. value_type
  1538. operator++() const noexcept
  1539. {
  1540. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
  1541. }
  1542. value_type
  1543. operator--() const noexcept
  1544. {
  1545. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
  1546. }
  1547. value_type
  1548. operator+=(difference_type __d) const noexcept
  1549. {
  1550. return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
  1551. }
  1552. value_type
  1553. operator-=(difference_type __d) const noexcept
  1554. {
  1555. return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
  1556. }
  1557. private:
  1558. static constexpr ptrdiff_t
  1559. _S_type_size(ptrdiff_t __d) noexcept
  1560. {
  1561. static_assert(is_object_v<_Tp>);
  1562. return __d * sizeof(_Tp);
  1563. }
  1564. _Tp** _M_ptr;
  1565. };
  1566. #endif // C++2a
  1567. /// @} group atomics
  1568. _GLIBCXX_END_NAMESPACE_VERSION
  1569. } // namespace std
  1570. #endif