libstdc++
shared_mutex
Go to the documentation of this file.
1// <shared_mutex> -*- C++ -*-
2
3// Copyright (C) 2013-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/shared_mutex
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_SHARED_MUTEX
30#define _GLIBCXX_SHARED_MUTEX 1
31
32#pragma GCC system_header
33
34#if __cplusplus >= 201402L
35
36#include <bits/c++config.h>
37#include <condition_variable>
38#include <bits/functexcept.h>
39
40namespace std _GLIBCXX_VISIBILITY(default)
41{
42_GLIBCXX_BEGIN_NAMESPACE_VERSION
43
44 /**
45 * @addtogroup mutexes
46 * @{
47 */
48
49#ifdef _GLIBCXX_HAS_GTHREADS
50
51#if __cplusplus >= 201703L
52#define __cpp_lib_shared_mutex 201505
53 class shared_mutex;
54#endif
55
56#define __cpp_lib_shared_timed_mutex 201402
57 class shared_timed_mutex;
58
59 /// @cond undocumented
60
61#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
62#ifdef __gthrw
63#define _GLIBCXX_GTHRW(name) \
64 __gthrw(pthread_ ## name); \
65 static inline int \
66 __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
67 { \
68 if (__gthread_active_p ()) \
69 return __gthrw_(pthread_ ## name) (__rwlock); \
70 else \
71 return 0; \
72 }
73 _GLIBCXX_GTHRW(rwlock_rdlock)
74 _GLIBCXX_GTHRW(rwlock_tryrdlock)
75 _GLIBCXX_GTHRW(rwlock_wrlock)
76 _GLIBCXX_GTHRW(rwlock_trywrlock)
77 _GLIBCXX_GTHRW(rwlock_unlock)
78# ifndef PTHREAD_RWLOCK_INITIALIZER
79 _GLIBCXX_GTHRW(rwlock_destroy)
80 __gthrw(pthread_rwlock_init);
81 static inline int
82 __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
83 {
84 if (__gthread_active_p ())
85 return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
86 else
87 return 0;
88 }
89# endif
90# if _GTHREAD_USE_MUTEX_TIMEDLOCK
91 __gthrw(pthread_rwlock_timedrdlock);
92 static inline int
93 __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
94 const timespec *__ts)
95 {
96 if (__gthread_active_p ())
97 return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
98 else
99 return 0;
100 }
101 __gthrw(pthread_rwlock_timedwrlock);
102 static inline int
103 __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
104 const timespec *__ts)
105 {
106 if (__gthread_active_p ())
107 return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
108 else
109 return 0;
110 }
111# endif
112#else
113 static inline int
114 __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
115 { return pthread_rwlock_rdlock (__rwlock); }
116 static inline int
117 __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
118 { return pthread_rwlock_tryrdlock (__rwlock); }
119 static inline int
120 __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
121 { return pthread_rwlock_wrlock (__rwlock); }
122 static inline int
123 __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
124 { return pthread_rwlock_trywrlock (__rwlock); }
125 static inline int
126 __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
127 { return pthread_rwlock_unlock (__rwlock); }
128 static inline int
129 __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
130 { return pthread_rwlock_destroy (__rwlock); }
131 static inline int
132 __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
133 { return pthread_rwlock_init (__rwlock, NULL); }
134# if _GTHREAD_USE_MUTEX_TIMEDLOCK
135 static inline int
136 __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
137 const timespec *__ts)
138 { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
139 static inline int
140 __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
141 const timespec *__ts)
142 { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
143# endif
144#endif
145
146 /// A shared mutex type implemented using pthread_rwlock_t.
147 class __shared_mutex_pthread
148 {
149 friend class shared_timed_mutex;
150
151#ifdef PTHREAD_RWLOCK_INITIALIZER
152 pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
153
154 public:
155 __shared_mutex_pthread() = default;
156 ~__shared_mutex_pthread() = default;
157#else
158 pthread_rwlock_t _M_rwlock;
159
160 public:
161 __shared_mutex_pthread()
162 {
163 int __ret = __glibcxx_rwlock_init(&_M_rwlock);
164 if (__ret == ENOMEM)
165 __throw_bad_alloc();
166 else if (__ret == EAGAIN)
167 __throw_system_error(int(errc::resource_unavailable_try_again));
168 else if (__ret == EPERM)
169 __throw_system_error(int(errc::operation_not_permitted));
170 // Errors not handled: EBUSY, EINVAL
171 __glibcxx_assert(__ret == 0);
172 }
173
174 ~__shared_mutex_pthread()
175 {
176 int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
177 // Errors not handled: EBUSY, EINVAL
178 __glibcxx_assert(__ret == 0);
179 }
180#endif
181
182 __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
183 __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
184
185 void
186 lock()
187 {
188 int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
189 if (__ret == EDEADLK)
190 __throw_system_error(int(errc::resource_deadlock_would_occur));
191 // Errors not handled: EINVAL
192 __glibcxx_assert(__ret == 0);
193 }
194
195 bool
196 try_lock()
197 {
198 int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
199 if (__ret == EBUSY) return false;
200 // Errors not handled: EINVAL
201 __glibcxx_assert(__ret == 0);
202 return true;
203 }
204
205 void
206 unlock()
207 {
208 int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
209 // Errors not handled: EPERM, EBUSY, EINVAL
210 __glibcxx_assert(__ret == 0);
211 }
212
213 // Shared ownership
214
215 void
216 lock_shared()
217 {
218 int __ret;
219 // We retry if we exceeded the maximum number of read locks supported by
220 // the POSIX implementation; this can result in busy-waiting, but this
221 // is okay based on the current specification of forward progress
222 // guarantees by the standard.
223 do
224 __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
225 while (__ret == EAGAIN);
226 if (__ret == EDEADLK)
227 __throw_system_error(int(errc::resource_deadlock_would_occur));
228 // Errors not handled: EINVAL
229 __glibcxx_assert(__ret == 0);
230 }
231
232 bool
233 try_lock_shared()
234 {
235 int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
236 // If the maximum number of read locks has been exceeded, we just fail
237 // to acquire the lock. Unlike for lock(), we are not allowed to throw
238 // an exception.
239 if (__ret == EBUSY || __ret == EAGAIN) return false;
240 // Errors not handled: EINVAL
241 __glibcxx_assert(__ret == 0);
242 return true;
243 }
244
245 void
246 unlock_shared()
247 {
248 unlock();
249 }
250
251 void* native_handle() { return &_M_rwlock; }
252 };
253#endif
254
255#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
256 /// A shared mutex type implemented using std::condition_variable.
257 class __shared_mutex_cv
258 {
259 friend class shared_timed_mutex;
260
261 // Based on Howard Hinnant's reference implementation from N2406.
262
263 // The high bit of _M_state is the write-entered flag which is set to
264 // indicate a writer has taken the lock or is queuing to take the lock.
265 // The remaining bits are the count of reader locks.
266 //
267 // To take a reader lock, block on gate1 while the write-entered flag is
268 // set or the maximum number of reader locks is held, then increment the
269 // reader lock count.
270 // To release, decrement the count, then if the write-entered flag is set
271 // and the count is zero then signal gate2 to wake a queued writer,
272 // otherwise if the maximum number of reader locks was held signal gate1
273 // to wake a reader.
274 //
275 // To take a writer lock, block on gate1 while the write-entered flag is
276 // set, then set the write-entered flag to start queueing, then block on
277 // gate2 while the number of reader locks is non-zero.
278 // To release, unset the write-entered flag and signal gate1 to wake all
279 // blocked readers and writers.
280 //
281 // This means that when no reader locks are held readers and writers get
282 // equal priority. When one or more reader locks is held a writer gets
283 // priority and no more reader locks can be taken while the writer is
284 // queued.
285
286 // Only locked when accessing _M_state or waiting on condition variables.
287 mutex _M_mut;
288 // Used to block while write-entered is set or reader count at maximum.
289 condition_variable _M_gate1;
290 // Used to block queued writers while reader count is non-zero.
291 condition_variable _M_gate2;
292 // The write-entered flag and reader count.
293 unsigned _M_state;
294
295 static constexpr unsigned _S_write_entered
296 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
297 static constexpr unsigned _S_max_readers = ~_S_write_entered;
298
299 // Test whether the write-entered flag is set. _M_mut must be locked.
300 bool _M_write_entered() const { return _M_state & _S_write_entered; }
301
302 // The number of reader locks currently held. _M_mut must be locked.
303 unsigned _M_readers() const { return _M_state & _S_max_readers; }
304
305 public:
306 __shared_mutex_cv() : _M_state(0) {}
307
308 ~__shared_mutex_cv()
309 {
310 __glibcxx_assert( _M_state == 0 );
311 }
312
313 __shared_mutex_cv(const __shared_mutex_cv&) = delete;
314 __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
315
316 // Exclusive ownership
317
318 void
319 lock()
320 {
321 unique_lock<mutex> __lk(_M_mut);
322 // Wait until we can set the write-entered flag.
323 _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
324 _M_state |= _S_write_entered;
325 // Then wait until there are no more readers.
326 _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
327 }
328
329 bool
330 try_lock()
331 {
332 unique_lock<mutex> __lk(_M_mut, try_to_lock);
333 if (__lk.owns_lock() && _M_state == 0)
334 {
335 _M_state = _S_write_entered;
336 return true;
337 }
338 return false;
339 }
340
341 void
342 unlock()
343 {
344 lock_guard<mutex> __lk(_M_mut);
345 __glibcxx_assert( _M_write_entered() );
346 _M_state = 0;
347 // call notify_all() while mutex is held so that another thread can't
348 // lock and unlock the mutex then destroy *this before we make the call.
349 _M_gate1.notify_all();
350 }
351
352 // Shared ownership
353
354 void
355 lock_shared()
356 {
357 unique_lock<mutex> __lk(_M_mut);
358 _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
359 ++_M_state;
360 }
361
362 bool
363 try_lock_shared()
364 {
365 unique_lock<mutex> __lk(_M_mut, try_to_lock);
366 if (!__lk.owns_lock())
367 return false;
368 if (_M_state < _S_max_readers)
369 {
370 ++_M_state;
371 return true;
372 }
373 return false;
374 }
375
376 void
377 unlock_shared()
378 {
379 lock_guard<mutex> __lk(_M_mut);
380 __glibcxx_assert( _M_readers() > 0 );
381 auto __prev = _M_state--;
382 if (_M_write_entered())
383 {
384 // Wake the queued writer if there are no more readers.
385 if (_M_readers() == 0)
386 _M_gate2.notify_one();
387 // No need to notify gate1 because we give priority to the queued
388 // writer, and that writer will eventually notify gate1 after it
389 // clears the write-entered flag.
390 }
391 else
392 {
393 // Wake any thread that was blocked on reader overflow.
394 if (__prev == _S_max_readers)
395 _M_gate1.notify_one();
396 }
397 }
398 };
399#endif
400 /// @endcond
401
402#if __cplusplus > 201402L
403 /// The standard shared mutex type.
404 class shared_mutex
405 {
406 public:
407 shared_mutex() = default;
408 ~shared_mutex() = default;
409
410 shared_mutex(const shared_mutex&) = delete;
411 shared_mutex& operator=(const shared_mutex&) = delete;
412
413 // Exclusive ownership
414
415 void lock() { _M_impl.lock(); }
416 bool try_lock() { return _M_impl.try_lock(); }
417 void unlock() { _M_impl.unlock(); }
418
419 // Shared ownership
420
421 void lock_shared() { _M_impl.lock_shared(); }
422 bool try_lock_shared() { return _M_impl.try_lock_shared(); }
423 void unlock_shared() { _M_impl.unlock_shared(); }
424
425#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
426 typedef void* native_handle_type;
427 native_handle_type native_handle() { return _M_impl.native_handle(); }
428
429 private:
430 __shared_mutex_pthread _M_impl;
431#else
432 private:
433 __shared_mutex_cv _M_impl;
434#endif
435 };
436#endif // C++17
437
438 /// @cond undocumented
439#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
440 using __shared_timed_mutex_base = __shared_mutex_pthread;
441#else
442 using __shared_timed_mutex_base = __shared_mutex_cv;
443#endif
444 /// @endcond
445
446 /// The standard shared timed mutex type.
447 class shared_timed_mutex
448 : private __shared_timed_mutex_base
449 {
450 using _Base = __shared_timed_mutex_base;
451
452 // Must use the same clock as condition_variable for __shared_mutex_cv.
453#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
454 using __clock_t = chrono::steady_clock;
455#else
456 using __clock_t = chrono::system_clock;
457#endif
458
459 public:
460 shared_timed_mutex() = default;
461 ~shared_timed_mutex() = default;
462
463 shared_timed_mutex(const shared_timed_mutex&) = delete;
464 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
465
466 // Exclusive ownership
467
468 void lock() { _Base::lock(); }
469 bool try_lock() { return _Base::try_lock(); }
470 void unlock() { _Base::unlock(); }
471
472 template<typename _Rep, typename _Period>
473 bool
474 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
475 {
476 auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
477 if (ratio_greater<__clock_t::period, _Period>())
478 ++__rt;
479 return try_lock_until(__clock_t::now() + __rt);
480 }
481
482 // Shared ownership
483
484 void lock_shared() { _Base::lock_shared(); }
485 bool try_lock_shared() { return _Base::try_lock_shared(); }
486 void unlock_shared() { _Base::unlock_shared(); }
487
488 template<typename _Rep, typename _Period>
489 bool
490 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
491 {
492 auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
493 if (ratio_greater<__clock_t::period, _Period>())
494 ++__rt;
495 return try_lock_shared_until(__clock_t::now() + __rt);
496 }
497
498#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
499
500 // Exclusive ownership
501
502 template<typename _Duration>
503 bool
504 try_lock_until(const chrono::time_point<chrono::system_clock,
505 _Duration>& __atime)
506 {
507 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
508 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
509
510 __gthread_time_t __ts =
511 {
512 static_cast<std::time_t>(__s.time_since_epoch().count()),
513 static_cast<long>(__ns.count())
514 };
515
516 int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
517 // On self-deadlock, we just fail to acquire the lock. Technically,
518 // the program violated the precondition.
519 if (__ret == ETIMEDOUT || __ret == EDEADLK)
520 return false;
521 // Errors not handled: EINVAL
522 __glibcxx_assert(__ret == 0);
523 return true;
524 }
525
526#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
527 template<typename _Duration>
528 bool
529 try_lock_until(const chrono::time_point<chrono::steady_clock,
530 _Duration>& __atime)
531 {
532 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
533 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
534
535 __gthread_time_t __ts =
536 {
537 static_cast<std::time_t>(__s.time_since_epoch().count()),
538 static_cast<long>(__ns.count())
539 };
540
541 int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
542 &__ts);
543 // On self-deadlock, we just fail to acquire the lock. Technically,
544 // the program violated the precondition.
545 if (__ret == ETIMEDOUT || __ret == EDEADLK)
546 return false;
547 // Errors not handled: EINVAL
548 __glibcxx_assert(__ret == 0);
549 return true;
550 }
551#endif
552
553 template<typename _Clock, typename _Duration>
554 bool
555 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
556 {
557#if __cplusplus > 201703L
558 static_assert(chrono::is_clock_v<_Clock>);
559#endif
560 // The user-supplied clock may not tick at the same rate as
561 // steady_clock, so we must loop in order to guarantee that
562 // the timeout has expired before returning false.
563 typename _Clock::time_point __now = _Clock::now();
564 do {
565 auto __rtime = __atime - __now;
566 if (try_lock_for(__rtime))
567 return true;
568 __now = _Clock::now();
569 } while (__atime > __now);
570 return false;
571 }
572
573 // Shared ownership
574
575 template<typename _Duration>
576 bool
577 try_lock_shared_until(const chrono::time_point<chrono::system_clock,
578 _Duration>& __atime)
579 {
580 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
581 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
582
583 __gthread_time_t __ts =
584 {
585 static_cast<std::time_t>(__s.time_since_epoch().count()),
586 static_cast<long>(__ns.count())
587 };
588
589 int __ret;
590 // Unlike for lock(), we are not allowed to throw an exception so if
591 // the maximum number of read locks has been exceeded, or we would
592 // deadlock, we just try to acquire the lock again (and will time out
593 // eventually).
594 // In cases where we would exceed the maximum number of read locks
595 // throughout the whole time until the timeout, we will fail to
596 // acquire the lock even if it would be logically free; however, this
597 // is allowed by the standard, and we made a "strong effort"
598 // (see C++14 30.4.1.4p26).
599 // For cases where the implementation detects a deadlock we
600 // intentionally block and timeout so that an early return isn't
601 // mistaken for a spurious failure, which might help users realise
602 // there is a deadlock.
603 do
604 __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
605 while (__ret == EAGAIN || __ret == EDEADLK);
606 if (__ret == ETIMEDOUT)
607 return false;
608 // Errors not handled: EINVAL
609 __glibcxx_assert(__ret == 0);
610 return true;
611 }
612
613#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
614 template<typename _Duration>
615 bool
616 try_lock_shared_until(const chrono::time_point<chrono::steady_clock,
617 _Duration>& __atime)
618 {
619 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
620 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
621
622 __gthread_time_t __ts =
623 {
624 static_cast<std::time_t>(__s.time_since_epoch().count()),
625 static_cast<long>(__ns.count())
626 };
627
628 int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
629 &__ts);
630 // On self-deadlock, we just fail to acquire the lock. Technically,
631 // the program violated the precondition.
632 if (__ret == ETIMEDOUT || __ret == EDEADLK)
633 return false;
634 // Errors not handled: EINVAL
635 __glibcxx_assert(__ret == 0);
636 return true;
637 }
638#endif
639
640 template<typename _Clock, typename _Duration>
641 bool
642 try_lock_shared_until(const chrono::time_point<_Clock,
643 _Duration>& __atime)
644 {
645#if __cplusplus > 201703L
646 static_assert(chrono::is_clock_v<_Clock>);
647#endif
648 // The user-supplied clock may not tick at the same rate as
649 // steady_clock, so we must loop in order to guarantee that
650 // the timeout has expired before returning false.
651 typename _Clock::time_point __now = _Clock::now();
652 do {
653 auto __rtime = __atime - __now;
654 if (try_lock_shared_for(__rtime))
655 return true;
656 __now = _Clock::now();
657 } while (__atime > __now);
658 return false;
659 }
660
661#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
662
663 // Exclusive ownership
664
665 template<typename _Clock, typename _Duration>
666 bool
667 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
668 {
669 unique_lock<mutex> __lk(_M_mut);
670 if (!_M_gate1.wait_until(__lk, __abs_time,
671 [=]{ return !_M_write_entered(); }))
672 {
673 return false;
674 }
675 _M_state |= _S_write_entered;
676 if (!_M_gate2.wait_until(__lk, __abs_time,
677 [=]{ return _M_readers() == 0; }))
678 {
679 _M_state ^= _S_write_entered;
680 // Wake all threads blocked while the write-entered flag was set.
681 _M_gate1.notify_all();
682 return false;
683 }
684 return true;
685 }
686
687 // Shared ownership
688
689 template <typename _Clock, typename _Duration>
690 bool
691 try_lock_shared_until(const chrono::time_point<_Clock,
692 _Duration>& __abs_time)
693 {
694 unique_lock<mutex> __lk(_M_mut);
695 if (!_M_gate1.wait_until(__lk, __abs_time,
696 [=]{ return _M_state < _S_max_readers; }))
697 {
698 return false;
699 }
700 ++_M_state;
701 return true;
702 }
703
704#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
705 };
706#endif // _GLIBCXX_HAS_GTHREADS
707
708 /// shared_lock
709 template<typename _Mutex>
710 class shared_lock
711 {
712 public:
713 typedef _Mutex mutex_type;
714
715 // Shared locking
716
717 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
718
719 explicit
720 shared_lock(mutex_type& __m)
721 : _M_pm(std::__addressof(__m)), _M_owns(true)
722 { __m.lock_shared(); }
723
724 shared_lock(mutex_type& __m, defer_lock_t) noexcept
725 : _M_pm(std::__addressof(__m)), _M_owns(false) { }
726
727 shared_lock(mutex_type& __m, try_to_lock_t)
728 : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
729
730 shared_lock(mutex_type& __m, adopt_lock_t)
731 : _M_pm(std::__addressof(__m)), _M_owns(true) { }
732
733 template<typename _Clock, typename _Duration>
734 shared_lock(mutex_type& __m,
735 const chrono::time_point<_Clock, _Duration>& __abs_time)
736 : _M_pm(std::__addressof(__m)),
737 _M_owns(__m.try_lock_shared_until(__abs_time)) { }
738
739 template<typename _Rep, typename _Period>
740 shared_lock(mutex_type& __m,
741 const chrono::duration<_Rep, _Period>& __rel_time)
742 : _M_pm(std::__addressof(__m)),
743 _M_owns(__m.try_lock_shared_for(__rel_time)) { }
744
745 ~shared_lock()
746 {
747 if (_M_owns)
748 _M_pm->unlock_shared();
749 }
750
751 shared_lock(shared_lock const&) = delete;
752 shared_lock& operator=(shared_lock const&) = delete;
753
754 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
755 { swap(__sl); }
756
757 shared_lock&
758 operator=(shared_lock&& __sl) noexcept
759 {
760 shared_lock(std::move(__sl)).swap(*this);
761 return *this;
762 }
763
764 void
765 lock()
766 {
767 _M_lockable();
768 _M_pm->lock_shared();
769 _M_owns = true;
770 }
771
772 bool
773 try_lock()
774 {
775 _M_lockable();
776 return _M_owns = _M_pm->try_lock_shared();
777 }
778
779 template<typename _Rep, typename _Period>
780 bool
781 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
782 {
783 _M_lockable();
784 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
785 }
786
787 template<typename _Clock, typename _Duration>
788 bool
789 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
790 {
791 _M_lockable();
792 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
793 }
794
795 void
796 unlock()
797 {
798 if (!_M_owns)
799 __throw_system_error(int(errc::resource_deadlock_would_occur));
800 _M_pm->unlock_shared();
801 _M_owns = false;
802 }
803
804 // Setters
805
806 void
807 swap(shared_lock& __u) noexcept
808 {
809 std::swap(_M_pm, __u._M_pm);
810 std::swap(_M_owns, __u._M_owns);
811 }
812
813 mutex_type*
814 release() noexcept
815 {
816 _M_owns = false;
817 return std::exchange(_M_pm, nullptr);
818 }
819
820 // Getters
821
822 bool owns_lock() const noexcept { return _M_owns; }
823
824 explicit operator bool() const noexcept { return _M_owns; }
825
826 mutex_type* mutex() const noexcept { return _M_pm; }
827
828 private:
829 void
830 _M_lockable() const
831 {
832 if (_M_pm == nullptr)
833 __throw_system_error(int(errc::operation_not_permitted));
834 if (_M_owns)
835 __throw_system_error(int(errc::resource_deadlock_would_occur));
836 }
837
838 mutex_type* _M_pm;
839 bool _M_owns;
840 };
841
842 /// Swap specialization for shared_lock
843 /// @relates shared_mutex
844 template<typename _Mutex>
845 void
846 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
847 { __x.swap(__y); }
848
849 /// @} group mutexes
850_GLIBCXX_END_NAMESPACE_VERSION
851} // namespace
852
853#endif // C++14
854
855#endif // _GLIBCXX_SHARED_MUTEX