1 // <shared_mutex> -*- C++ -*-
3 // Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 /** @file include/shared_mutex
26 * This is a Standard C++ Library header.
29 #ifndef _GLIBCXX_SHARED_MUTEX
30 #define _GLIBCXX_SHARED_MUTEX 1
32 #pragma GCC system_header
34 #if __cplusplus <= 201103L
35 # include <bits/c++14_warning.h>
38 #include <bits/c++config.h>
40 #include <condition_variable>
41 #include <bits/functexcept.h>
43 namespace std _GLIBCXX_VISIBILITY(default)
45 _GLIBCXX_BEGIN_NAMESPACE_VERSION
52 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
53 #ifdef _GLIBCXX_HAS_GTHREADS
55 #define __cpp_lib_shared_timed_mutex 201402
57 /// shared_timed_mutex
58 class shared_timed_mutex
60 // Must use the same clock as condition_variable
61 typedef chrono::system_clock __clock_t;
63 // Based on Howard Hinnant's reference implementation from N2406.
65 // The high bit of _M_state is the write-entered flag which is set to
66 // indicate a writer has taken the lock or is queuing to take the lock.
67 // The remaining bits are the count of reader locks.
69 // To take a reader lock, block on gate1 while the write-entered flag is
70 // set or the maximum number of reader locks is held, then increment the
72 // To release, decrement the count, then if the write-entered flag is set
73 // and the count is zero then signal gate2 to wake a queued writer,
74 // otherwise if the maximum number of reader locks was held signal gate1
77 // To take a writer lock, block on gate1 while the write-entered flag is
78 // set, then set the write-entered flag to start queueing, then block on
79 // gate2 while the number of reader locks is non-zero.
80 // To release, unset the write-entered flag and signal gate1 to wake all
81 // blocked readers and writers.
83 // This means that when no reader locks are held readers and writers get
84 // equal priority. When one or more reader locks is held a writer gets
85 // priority and no more reader locks can be taken while the writer is
88 // Only locked when accessing _M_state or waiting on condition variables.
90 // Used to block while write-entered is set or reader count at maximum.
91 condition_variable _M_gate1;
92 // Used to block queued writers while reader count is non-zero.
93 condition_variable _M_gate2;
94 // The write-entered flag and reader count.
97 static constexpr unsigned _S_write_entered
98 = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
99 static constexpr unsigned _S_max_readers = ~_S_write_entered;
101 // Test whether the write-entered flag is set. _M_mut must be locked.
102 bool _M_write_entered() const { return _M_state & _S_write_entered; }
104 // The number of reader locks currently held. _M_mut must be locked.
105 unsigned _M_readers() const { return _M_state & _S_max_readers; }
108 shared_timed_mutex() : _M_state(0) {}
110 ~shared_timed_mutex()
112 _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
115 shared_timed_mutex(const shared_timed_mutex&) = delete;
116 shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
118 // Exclusive ownership
123 unique_lock<mutex> __lk(_M_mut);
124 // Wait until we can set the write-entered flag.
125 _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
126 _M_state |= _S_write_entered;
127 // Then wait until there are no more readers.
128 _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
134 unique_lock<mutex> __lk(_M_mut, try_to_lock);
135 if (__lk.owns_lock() && _M_state == 0)
137 _M_state = _S_write_entered;
143 template<typename _Rep, typename _Period>
145 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
147 return try_lock_until(__clock_t::now() + __rel_time);
150 template<typename _Clock, typename _Duration>
152 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
154 unique_lock<mutex> __lk(_M_mut);
155 if (!_M_gate1.wait_until(__lk, __abs_time,
156 [=]{ return !_M_write_entered(); }))
160 _M_state |= _S_write_entered;
161 if (!_M_gate2.wait_until(__lk, __abs_time,
162 [=]{ return _M_readers() == 0; }))
164 _M_state ^= _S_write_entered;
165 // Wake all threads blocked while the write-entered flag was set.
166 _M_gate1.notify_all();
175 lock_guard<mutex> __lk(_M_mut);
176 _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
178 // call notify_all() while mutex is held so that another thread can't
179 // lock and unlock the mutex then destroy *this before we make the call.
180 _M_gate1.notify_all();
188 unique_lock<mutex> __lk(_M_mut);
189 _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
196 unique_lock<mutex> __lk(_M_mut, try_to_lock);
197 if (!__lk.owns_lock())
199 if (_M_state < _S_max_readers)
207 template<typename _Rep, typename _Period>
209 try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
211 return try_lock_shared_until(__clock_t::now() + __rel_time);
214 template <typename _Clock, typename _Duration>
216 try_lock_shared_until(const chrono::time_point<_Clock,
217 _Duration>& __abs_time)
219 unique_lock<mutex> __lk(_M_mut);
220 if (!_M_gate1.wait_until(__lk, __abs_time,
221 [=]{ return _M_state < _S_max_readers; }))
232 lock_guard<mutex> __lk(_M_mut);
233 _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
234 auto __prev = _M_state--;
235 if (_M_write_entered())
237 // Wake the queued writer if there are no more readers.
238 if (_M_readers() == 0)
239 _M_gate2.notify_one();
240 // No need to notify gate1 because we give priority to the queued
241 // writer, and that writer will eventually notify gate1 after it
242 // clears the write-entered flag.
246 // Wake any thread that was blocked on reader overflow.
247 if (__prev == _S_max_readers)
248 _M_gate1.notify_one();
252 #endif // _GLIBCXX_HAS_GTHREADS
255 template<typename _Mutex>
259 typedef _Mutex mutex_type;
263 shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
266 shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
267 { __m.lock_shared(); }
269 shared_lock(mutex_type& __m, defer_lock_t) noexcept
270 : _M_pm(&__m), _M_owns(false) { }
272 shared_lock(mutex_type& __m, try_to_lock_t)
273 : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
275 shared_lock(mutex_type& __m, adopt_lock_t)
276 : _M_pm(&__m), _M_owns(true) { }
278 template<typename _Clock, typename _Duration>
279 shared_lock(mutex_type& __m,
280 const chrono::time_point<_Clock, _Duration>& __abs_time)
281 : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
283 template<typename _Rep, typename _Period>
284 shared_lock(mutex_type& __m,
285 const chrono::duration<_Rep, _Period>& __rel_time)
286 : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
291 _M_pm->unlock_shared();
294 shared_lock(shared_lock const&) = delete;
295 shared_lock& operator=(shared_lock const&) = delete;
297 shared_lock(shared_lock&& __sl) noexcept : shared_lock()
301 operator=(shared_lock&& __sl) noexcept
303 shared_lock(std::move(__sl)).swap(*this);
311 _M_pm->lock_shared();
319 return _M_owns = _M_pm->try_lock_shared();
322 template<typename _Rep, typename _Period>
324 try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
327 return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
330 template<typename _Clock, typename _Duration>
332 try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
335 return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
342 __throw_system_error(int(errc::resource_deadlock_would_occur));
343 _M_pm->unlock_shared();
350 swap(shared_lock& __u) noexcept
352 std::swap(_M_pm, __u._M_pm);
353 std::swap(_M_owns, __u._M_owns);
360 return std::exchange(_M_pm, nullptr);
365 bool owns_lock() const noexcept { return _M_owns; }
367 explicit operator bool() const noexcept { return _M_owns; }
369 mutex_type* mutex() const noexcept { return _M_pm; }
375 if (_M_pm == nullptr)
376 __throw_system_error(int(errc::operation_not_permitted));
378 __throw_system_error(int(errc::resource_deadlock_would_occur));
385 /// Swap specialization for shared_lock
386 template<typename _Mutex>
388 swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
391 #endif // _GLIBCXX_USE_C99_STDINT_TR1
394 _GLIBCXX_END_NAMESPACE_VERSION
399 #endif // _GLIBCXX_SHARED_MUTEX