shared_mutex 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. // <shared_mutex> -*- C++ -*-
  2. // Copyright (C) 2013-2022 Free Software Foundation, Inc.
  3. //
  4. // This file is part of the GNU ISO C++ Library. This library is free
  5. // software; you can redistribute it and/or modify it under the
  6. // terms of the GNU General Public License as published by the
  7. // Free Software Foundation; either version 3, or (at your option)
  8. // any later version.
  9. // This library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. // Under Section 7 of GPL version 3, you are granted additional
  14. // permissions described in the GCC Runtime Library Exception, version
  15. // 3.1, as published by the Free Software Foundation.
  16. // You should have received a copy of the GNU General Public License and
  17. // a copy of the GCC Runtime Library Exception along with this program;
  18. // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. // <http://www.gnu.org/licenses/>.
  20. /** @file include/shared_mutex
  21. * This is a Standard C++ Library header.
  22. */
  23. #ifndef _GLIBCXX_SHARED_MUTEX
  24. #define _GLIBCXX_SHARED_MUTEX 1
  25. #pragma GCC system_header
  26. #if __cplusplus >= 201402L
  27. #include <bits/chrono.h>
  28. #include <bits/functexcept.h>
  29. #include <bits/move.h> // move, __exchange
  30. #include <bits/std_mutex.h> // defer_lock_t
  31. #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  32. # include <condition_variable>
  33. #endif
  34. namespace std _GLIBCXX_VISIBILITY(default)
  35. {
  36. _GLIBCXX_BEGIN_NAMESPACE_VERSION
  37. /**
  38. * @addtogroup mutexes
  39. * @{
  40. */
  41. #ifdef _GLIBCXX_HAS_GTHREADS
  42. #if __cplusplus >= 201703L
  43. #define __cpp_lib_shared_mutex 201505L
  44. class shared_mutex;
  45. #endif
  46. #define __cpp_lib_shared_timed_mutex 201402L
  47. class shared_timed_mutex;
  48. /// @cond undocumented
  49. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  50. #ifdef __gthrw
  51. #define _GLIBCXX_GTHRW(name) \
  52. __gthrw(pthread_ ## name); \
  53. static inline int \
  54. __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
  55. { \
  56. if (__gthread_active_p ()) \
  57. return __gthrw_(pthread_ ## name) (__rwlock); \
  58. else \
  59. return 0; \
  60. }
  61. _GLIBCXX_GTHRW(rwlock_rdlock)
  62. _GLIBCXX_GTHRW(rwlock_tryrdlock)
  63. _GLIBCXX_GTHRW(rwlock_wrlock)
  64. _GLIBCXX_GTHRW(rwlock_trywrlock)
  65. _GLIBCXX_GTHRW(rwlock_unlock)
  66. # ifndef PTHREAD_RWLOCK_INITIALIZER
  67. _GLIBCXX_GTHRW(rwlock_destroy)
  68. __gthrw(pthread_rwlock_init);
  69. static inline int
  70. __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
  71. {
  72. if (__gthread_active_p ())
  73. return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
  74. else
  75. return 0;
  76. }
  77. # endif
  78. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  79. __gthrw(pthread_rwlock_timedrdlock);
  80. static inline int
  81. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  82. const timespec *__ts)
  83. {
  84. if (__gthread_active_p ())
  85. return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
  86. else
  87. return 0;
  88. }
  89. __gthrw(pthread_rwlock_timedwrlock);
  90. static inline int
  91. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  92. const timespec *__ts)
  93. {
  94. if (__gthread_active_p ())
  95. return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
  96. else
  97. return 0;
  98. }
  99. # endif
  100. #else
  101. static inline int
  102. __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
  103. { return pthread_rwlock_rdlock (__rwlock); }
  104. static inline int
  105. __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
  106. { return pthread_rwlock_tryrdlock (__rwlock); }
  107. static inline int
  108. __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
  109. { return pthread_rwlock_wrlock (__rwlock); }
  110. static inline int
  111. __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
  112. { return pthread_rwlock_trywrlock (__rwlock); }
  113. static inline int
  114. __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
  115. { return pthread_rwlock_unlock (__rwlock); }
  116. static inline int
  117. __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
  118. { return pthread_rwlock_destroy (__rwlock); }
  119. static inline int
  120. __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
  121. { return pthread_rwlock_init (__rwlock, NULL); }
  122. # if _GTHREAD_USE_MUTEX_TIMEDLOCK
  123. static inline int
  124. __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
  125. const timespec *__ts)
  126. { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
  127. static inline int
  128. __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
  129. const timespec *__ts)
  130. { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
  131. # endif
  132. #endif
  133. /// A shared mutex type implemented using pthread_rwlock_t.
  134. class __shared_mutex_pthread
  135. {
  136. friend class shared_timed_mutex;
  137. #ifdef PTHREAD_RWLOCK_INITIALIZER
  138. pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  139. public:
  140. __shared_mutex_pthread() = default;
  141. ~__shared_mutex_pthread() = default;
  142. #else
  143. pthread_rwlock_t _M_rwlock;
  144. public:
  145. __shared_mutex_pthread()
  146. {
  147. int __ret = __glibcxx_rwlock_init(&_M_rwlock);
  148. if (__ret == ENOMEM)
  149. __throw_bad_alloc();
  150. else if (__ret == EAGAIN)
  151. __throw_system_error(int(errc::resource_unavailable_try_again));
  152. else if (__ret == EPERM)
  153. __throw_system_error(int(errc::operation_not_permitted));
  154. // Errors not handled: EBUSY, EINVAL
  155. __glibcxx_assert(__ret == 0);
  156. }
  157. ~__shared_mutex_pthread()
  158. {
  159. int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
  160. // Errors not handled: EBUSY, EINVAL
  161. __glibcxx_assert(__ret == 0);
  162. }
  163. #endif
  164. __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
  165. __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
  166. void
  167. lock()
  168. {
  169. int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
  170. if (__ret == EDEADLK)
  171. __throw_system_error(int(errc::resource_deadlock_would_occur));
  172. // Errors not handled: EINVAL
  173. __glibcxx_assert(__ret == 0);
  174. }
  175. bool
  176. try_lock()
  177. {
  178. int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
  179. if (__ret == EBUSY) return false;
  180. // Errors not handled: EINVAL
  181. __glibcxx_assert(__ret == 0);
  182. return true;
  183. }
  184. void
  185. unlock()
  186. {
  187. int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
  188. // Errors not handled: EPERM, EBUSY, EINVAL
  189. __glibcxx_assert(__ret == 0);
  190. }
  191. // Shared ownership
  192. void
  193. lock_shared()
  194. {
  195. int __ret;
  196. // We retry if we exceeded the maximum number of read locks supported by
  197. // the POSIX implementation; this can result in busy-waiting, but this
  198. // is okay based on the current specification of forward progress
  199. // guarantees by the standard.
  200. do
  201. __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
  202. while (__ret == EAGAIN);
  203. if (__ret == EDEADLK)
  204. __throw_system_error(int(errc::resource_deadlock_would_occur));
  205. // Errors not handled: EINVAL
  206. __glibcxx_assert(__ret == 0);
  207. }
  208. bool
  209. try_lock_shared()
  210. {
  211. int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
  212. // If the maximum number of read locks has been exceeded, we just fail
  213. // to acquire the lock. Unlike for lock(), we are not allowed to throw
  214. // an exception.
  215. if (__ret == EBUSY || __ret == EAGAIN) return false;
  216. // Errors not handled: EINVAL
  217. __glibcxx_assert(__ret == 0);
  218. return true;
  219. }
  220. void
  221. unlock_shared()
  222. {
  223. unlock();
  224. }
  225. void* native_handle() { return &_M_rwlock; }
  226. };
  227. #endif
  228. #if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  229. /// A shared mutex type implemented using std::condition_variable.
  230. class __shared_mutex_cv
  231. {
  232. friend class shared_timed_mutex;
  233. // Based on Howard Hinnant's reference implementation from N2406.
  234. // The high bit of _M_state is the write-entered flag which is set to
  235. // indicate a writer has taken the lock or is queuing to take the lock.
  236. // The remaining bits are the count of reader locks.
  237. //
  238. // To take a reader lock, block on gate1 while the write-entered flag is
  239. // set or the maximum number of reader locks is held, then increment the
  240. // reader lock count.
  241. // To release, decrement the count, then if the write-entered flag is set
  242. // and the count is zero then signal gate2 to wake a queued writer,
  243. // otherwise if the maximum number of reader locks was held signal gate1
  244. // to wake a reader.
  245. //
  246. // To take a writer lock, block on gate1 while the write-entered flag is
  247. // set, then set the write-entered flag to start queueing, then block on
  248. // gate2 while the number of reader locks is non-zero.
  249. // To release, unset the write-entered flag and signal gate1 to wake all
  250. // blocked readers and writers.
  251. //
  252. // This means that when no reader locks are held readers and writers get
  253. // equal priority. When one or more reader locks is held a writer gets
  254. // priority and no more reader locks can be taken while the writer is
  255. // queued.
  256. // Only locked when accessing _M_state or waiting on condition variables.
  257. mutex _M_mut;
  258. // Used to block while write-entered is set or reader count at maximum.
  259. condition_variable _M_gate1;
  260. // Used to block queued writers while reader count is non-zero.
  261. condition_variable _M_gate2;
  262. // The write-entered flag and reader count.
  263. unsigned _M_state;
  264. static constexpr unsigned _S_write_entered
  265. = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
  266. static constexpr unsigned _S_max_readers = ~_S_write_entered;
  267. // Test whether the write-entered flag is set. _M_mut must be locked.
  268. bool _M_write_entered() const { return _M_state & _S_write_entered; }
  269. // The number of reader locks currently held. _M_mut must be locked.
  270. unsigned _M_readers() const { return _M_state & _S_max_readers; }
  271. public:
  272. __shared_mutex_cv() : _M_state(0) {}
  273. ~__shared_mutex_cv()
  274. {
  275. __glibcxx_assert( _M_state == 0 );
  276. }
  277. __shared_mutex_cv(const __shared_mutex_cv&) = delete;
  278. __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
  279. // Exclusive ownership
  280. void
  281. lock()
  282. {
  283. unique_lock<mutex> __lk(_M_mut);
  284. // Wait until we can set the write-entered flag.
  285. _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
  286. _M_state |= _S_write_entered;
  287. // Then wait until there are no more readers.
  288. _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
  289. }
  290. bool
  291. try_lock()
  292. {
  293. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  294. if (__lk.owns_lock() && _M_state == 0)
  295. {
  296. _M_state = _S_write_entered;
  297. return true;
  298. }
  299. return false;
  300. }
  301. void
  302. unlock()
  303. {
  304. lock_guard<mutex> __lk(_M_mut);
  305. __glibcxx_assert( _M_write_entered() );
  306. _M_state = 0;
  307. // call notify_all() while mutex is held so that another thread can't
  308. // lock and unlock the mutex then destroy *this before we make the call.
  309. _M_gate1.notify_all();
  310. }
  311. // Shared ownership
  312. void
  313. lock_shared()
  314. {
  315. unique_lock<mutex> __lk(_M_mut);
  316. _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
  317. ++_M_state;
  318. }
  319. bool
  320. try_lock_shared()
  321. {
  322. unique_lock<mutex> __lk(_M_mut, try_to_lock);
  323. if (!__lk.owns_lock())
  324. return false;
  325. if (_M_state < _S_max_readers)
  326. {
  327. ++_M_state;
  328. return true;
  329. }
  330. return false;
  331. }
  332. void
  333. unlock_shared()
  334. {
  335. lock_guard<mutex> __lk(_M_mut);
  336. __glibcxx_assert( _M_readers() > 0 );
  337. auto __prev = _M_state--;
  338. if (_M_write_entered())
  339. {
  340. // Wake the queued writer if there are no more readers.
  341. if (_M_readers() == 0)
  342. _M_gate2.notify_one();
  343. // No need to notify gate1 because we give priority to the queued
  344. // writer, and that writer will eventually notify gate1 after it
  345. // clears the write-entered flag.
  346. }
  347. else
  348. {
  349. // Wake any thread that was blocked on reader overflow.
  350. if (__prev == _S_max_readers)
  351. _M_gate1.notify_one();
  352. }
  353. }
  354. };
  355. #endif
  356. /// @endcond
  357. #if __cplusplus >= 201703L
  358. /// The standard shared mutex type.
  359. class shared_mutex
  360. {
  361. public:
  362. shared_mutex() = default;
  363. ~shared_mutex() = default;
  364. shared_mutex(const shared_mutex&) = delete;
  365. shared_mutex& operator=(const shared_mutex&) = delete;
  366. // Exclusive ownership
  367. void lock() { _M_impl.lock(); }
  368. bool try_lock() { return _M_impl.try_lock(); }
  369. void unlock() { _M_impl.unlock(); }
  370. // Shared ownership
  371. void lock_shared() { _M_impl.lock_shared(); }
  372. bool try_lock_shared() { return _M_impl.try_lock_shared(); }
  373. void unlock_shared() { _M_impl.unlock_shared(); }
  374. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T
  375. typedef void* native_handle_type;
  376. native_handle_type native_handle() { return _M_impl.native_handle(); }
  377. private:
  378. __shared_mutex_pthread _M_impl;
  379. #else
  380. private:
  381. __shared_mutex_cv _M_impl;
  382. #endif
  383. };
  384. #endif // C++17
  385. /// @cond undocumented
  386. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  387. using __shared_timed_mutex_base = __shared_mutex_pthread;
  388. #else
  389. using __shared_timed_mutex_base = __shared_mutex_cv;
  390. #endif
  391. /// @endcond
  392. /// The standard shared timed mutex type.
  393. class shared_timed_mutex
  394. : private __shared_timed_mutex_base
  395. {
  396. using _Base = __shared_timed_mutex_base;
  397. // Must use the same clock as condition_variable for __shared_mutex_cv.
  398. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  399. using __clock_t = chrono::steady_clock;
  400. #else
  401. using __clock_t = chrono::system_clock;
  402. #endif
  403. public:
  404. shared_timed_mutex() = default;
  405. ~shared_timed_mutex() = default;
  406. shared_timed_mutex(const shared_timed_mutex&) = delete;
  407. shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
  408. // Exclusive ownership
  409. void lock() { _Base::lock(); }
  410. bool try_lock() { return _Base::try_lock(); }
  411. void unlock() { _Base::unlock(); }
  412. template<typename _Rep, typename _Period>
  413. bool
  414. try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
  415. {
  416. auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
  417. if (ratio_greater<__clock_t::period, _Period>())
  418. ++__rt;
  419. return try_lock_until(__clock_t::now() + __rt);
  420. }
  421. // Shared ownership
  422. void lock_shared() { _Base::lock_shared(); }
  423. bool try_lock_shared() { return _Base::try_lock_shared(); }
  424. void unlock_shared() { _Base::unlock_shared(); }
  425. template<typename _Rep, typename _Period>
  426. bool
  427. try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
  428. {
  429. auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
  430. if (ratio_greater<__clock_t::period, _Period>())
  431. ++__rt;
  432. return try_lock_shared_until(__clock_t::now() + __rt);
  433. }
  434. #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  435. // Exclusive ownership
  436. template<typename _Duration>
  437. bool
  438. try_lock_until(const chrono::time_point<chrono::system_clock,
  439. _Duration>& __atime)
  440. {
  441. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  442. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  443. __gthread_time_t __ts =
  444. {
  445. static_cast<std::time_t>(__s.time_since_epoch().count()),
  446. static_cast<long>(__ns.count())
  447. };
  448. int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
  449. // On self-deadlock, we just fail to acquire the lock. Technically,
  450. // the program violated the precondition.
  451. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  452. return false;
  453. // Errors not handled: EINVAL
  454. __glibcxx_assert(__ret == 0);
  455. return true;
  456. }
  457. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  458. template<typename _Duration>
  459. bool
  460. try_lock_until(const chrono::time_point<chrono::steady_clock,
  461. _Duration>& __atime)
  462. {
  463. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  464. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  465. __gthread_time_t __ts =
  466. {
  467. static_cast<std::time_t>(__s.time_since_epoch().count()),
  468. static_cast<long>(__ns.count())
  469. };
  470. int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
  471. &__ts);
  472. // On self-deadlock, we just fail to acquire the lock. Technically,
  473. // the program violated the precondition.
  474. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  475. return false;
  476. // Errors not handled: EINVAL
  477. __glibcxx_assert(__ret == 0);
  478. return true;
  479. }
  480. #endif
  481. template<typename _Clock, typename _Duration>
  482. bool
  483. try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
  484. {
  485. #if __cplusplus > 201703L
  486. static_assert(chrono::is_clock_v<_Clock>);
  487. #endif
  488. // The user-supplied clock may not tick at the same rate as
  489. // steady_clock, so we must loop in order to guarantee that
  490. // the timeout has expired before returning false.
  491. typename _Clock::time_point __now = _Clock::now();
  492. do {
  493. auto __rtime = __atime - __now;
  494. if (try_lock_for(__rtime))
  495. return true;
  496. __now = _Clock::now();
  497. } while (__atime > __now);
  498. return false;
  499. }
  500. // Shared ownership
  501. template<typename _Duration>
  502. bool
  503. try_lock_shared_until(const chrono::time_point<chrono::system_clock,
  504. _Duration>& __atime)
  505. {
  506. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  507. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  508. __gthread_time_t __ts =
  509. {
  510. static_cast<std::time_t>(__s.time_since_epoch().count()),
  511. static_cast<long>(__ns.count())
  512. };
  513. int __ret;
  514. // Unlike for lock(), we are not allowed to throw an exception so if
  515. // the maximum number of read locks has been exceeded, or we would
  516. // deadlock, we just try to acquire the lock again (and will time out
  517. // eventually).
  518. // In cases where we would exceed the maximum number of read locks
  519. // throughout the whole time until the timeout, we will fail to
  520. // acquire the lock even if it would be logically free; however, this
  521. // is allowed by the standard, and we made a "strong effort"
  522. // (see C++14 30.4.1.4p26).
  523. // For cases where the implementation detects a deadlock we
  524. // intentionally block and timeout so that an early return isn't
  525. // mistaken for a spurious failure, which might help users realise
  526. // there is a deadlock.
  527. do
  528. __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
  529. while (__ret == EAGAIN || __ret == EDEADLK);
  530. if (__ret == ETIMEDOUT)
  531. return false;
  532. // Errors not handled: EINVAL
  533. __glibcxx_assert(__ret == 0);
  534. return true;
  535. }
  536. #ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
  537. template<typename _Duration>
  538. bool
  539. try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
  540. _Duration>& __atime)
  541. {
  542. auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
  543. auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
  544. __gthread_time_t __ts =
  545. {
  546. static_cast<std::time_t>(__s.time_since_epoch().count()),
  547. static_cast<long>(__ns.count())
  548. };
  549. int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
  550. &__ts);
  551. // On self-deadlock, we just fail to acquire the lock. Technically,
  552. // the program violated the precondition.
  553. if (__ret == ETIMEDOUT || __ret == EDEADLK)
  554. return false;
  555. // Errors not handled: EINVAL
  556. __glibcxx_assert(__ret == 0);
  557. return true;
  558. }
  559. #endif
  560. template<typename _Clock, typename _Duration>
  561. bool
  562. try_lock_shared_until(const chrono::time_point<_Clock,
  563. _Duration>& __atime)
  564. {
  565. #if __cplusplus > 201703L
  566. static_assert(chrono::is_clock_v<_Clock>);
  567. #endif
  568. // The user-supplied clock may not tick at the same rate as
  569. // steady_clock, so we must loop in order to guarantee that
  570. // the timeout has expired before returning false.
  571. typename _Clock::time_point __now = _Clock::now();
  572. do {
  573. auto __rtime = __atime - __now;
  574. if (try_lock_shared_for(__rtime))
  575. return true;
  576. __now = _Clock::now();
  577. } while (__atime > __now);
  578. return false;
  579. }
  580. #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
  581. // Exclusive ownership
  582. template<typename _Clock, typename _Duration>
  583. bool
  584. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  585. {
  586. unique_lock<mutex> __lk(_M_mut);
  587. if (!_M_gate1.wait_until(__lk, __abs_time,
  588. [=]{ return !_M_write_entered(); }))
  589. {
  590. return false;
  591. }
  592. _M_state |= _S_write_entered;
  593. if (!_M_gate2.wait_until(__lk, __abs_time,
  594. [=]{ return _M_readers() == 0; }))
  595. {
  596. _M_state ^= _S_write_entered;
  597. // Wake all threads blocked while the write-entered flag was set.
  598. _M_gate1.notify_all();
  599. return false;
  600. }
  601. return true;
  602. }
  603. // Shared ownership
  604. template <typename _Clock, typename _Duration>
  605. bool
  606. try_lock_shared_until(const chrono::time_point<_Clock,
  607. _Duration>& __abs_time)
  608. {
  609. unique_lock<mutex> __lk(_M_mut);
  610. if (!_M_gate1.wait_until(__lk, __abs_time,
  611. [=]{ return _M_state < _S_max_readers; }))
  612. {
  613. return false;
  614. }
  615. ++_M_state;
  616. return true;
  617. }
  618. #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
  619. };
  620. #endif // _GLIBCXX_HAS_GTHREADS
  621. /// shared_lock
  622. template<typename _Mutex>
  623. class shared_lock
  624. {
  625. public:
  626. typedef _Mutex mutex_type;
  627. // Shared locking
  628. shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
  629. explicit
  630. shared_lock(mutex_type& __m)
  631. : _M_pm(std::__addressof(__m)), _M_owns(true)
  632. { __m.lock_shared(); }
  633. shared_lock(mutex_type& __m, defer_lock_t) noexcept
  634. : _M_pm(std::__addressof(__m)), _M_owns(false) { }
  635. shared_lock(mutex_type& __m, try_to_lock_t)
  636. : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
  637. shared_lock(mutex_type& __m, adopt_lock_t)
  638. : _M_pm(std::__addressof(__m)), _M_owns(true) { }
  639. template<typename _Clock, typename _Duration>
  640. shared_lock(mutex_type& __m,
  641. const chrono::time_point<_Clock, _Duration>& __abs_time)
  642. : _M_pm(std::__addressof(__m)),
  643. _M_owns(__m.try_lock_shared_until(__abs_time)) { }
  644. template<typename _Rep, typename _Period>
  645. shared_lock(mutex_type& __m,
  646. const chrono::duration<_Rep, _Period>& __rel_time)
  647. : _M_pm(std::__addressof(__m)),
  648. _M_owns(__m.try_lock_shared_for(__rel_time)) { }
  649. ~shared_lock()
  650. {
  651. if (_M_owns)
  652. _M_pm->unlock_shared();
  653. }
  654. shared_lock(shared_lock const&) = delete;
  655. shared_lock& operator=(shared_lock const&) = delete;
  656. shared_lock(shared_lock&& __sl) noexcept : shared_lock()
  657. { swap(__sl); }
  658. shared_lock&
  659. operator=(shared_lock&& __sl) noexcept
  660. {
  661. shared_lock(std::move(__sl)).swap(*this);
  662. return *this;
  663. }
  664. void
  665. lock()
  666. {
  667. _M_lockable();
  668. _M_pm->lock_shared();
  669. _M_owns = true;
  670. }
  671. bool
  672. try_lock()
  673. {
  674. _M_lockable();
  675. return _M_owns = _M_pm->try_lock_shared();
  676. }
  677. template<typename _Rep, typename _Period>
  678. bool
  679. try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
  680. {
  681. _M_lockable();
  682. return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
  683. }
  684. template<typename _Clock, typename _Duration>
  685. bool
  686. try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
  687. {
  688. _M_lockable();
  689. return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
  690. }
  691. void
  692. unlock()
  693. {
  694. if (!_M_owns)
  695. __throw_system_error(int(errc::resource_deadlock_would_occur));
  696. _M_pm->unlock_shared();
  697. _M_owns = false;
  698. }
  699. // Setters
  700. void
  701. swap(shared_lock& __u) noexcept
  702. {
  703. std::swap(_M_pm, __u._M_pm);
  704. std::swap(_M_owns, __u._M_owns);
  705. }
  706. mutex_type*
  707. release() noexcept
  708. {
  709. _M_owns = false;
  710. return std::__exchange(_M_pm, nullptr);
  711. }
  712. // Getters
  713. bool owns_lock() const noexcept { return _M_owns; }
  714. explicit operator bool() const noexcept { return _M_owns; }
  715. mutex_type* mutex() const noexcept { return _M_pm; }
  716. private:
  717. void
  718. _M_lockable() const
  719. {
  720. if (_M_pm == nullptr)
  721. __throw_system_error(int(errc::operation_not_permitted));
  722. if (_M_owns)
  723. __throw_system_error(int(errc::resource_deadlock_would_occur));
  724. }
  725. mutex_type* _M_pm;
  726. bool _M_owns;
  727. };
  728. /// Swap specialization for shared_lock
  729. /// @relates shared_mutex
  730. template<typename _Mutex>
  731. void
  732. swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
  733. { __x.swap(__y); }
  734. /// @} group mutexes
  735. _GLIBCXX_END_NAMESPACE_VERSION
  736. } // namespace
  737. #endif // C++14
  738. #endif // _GLIBCXX_SHARED_MUTEX