libstdc++
shared_mutex
Go to the documentation of this file.
00001 // <shared_mutex> -*- C++ -*-
00002 
00003 // Copyright (C) 2013-2015 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 3, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // Under Section 7 of GPL version 3, you are granted additional
00017 // permissions described in the GCC Runtime Library Exception, version
00018 // 3.1, as published by the Free Software Foundation.
00019 
00020 // You should have received a copy of the GNU General Public License and
00021 // a copy of the GCC Runtime Library Exception along with this program;
00022 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00023 // <http://www.gnu.org/licenses/>.
00024 
00025 /** @file include/shared_mutex
00026  *  This is a Standard C++ Library header.
00027  */
00028 
00029 #ifndef _GLIBCXX_SHARED_MUTEX
00030 #define _GLIBCXX_SHARED_MUTEX 1
00031 
00032 #pragma GCC system_header
00033 
00034 #if __cplusplus <= 201103L
00035 # include <bits/c++14_warning.h>
00036 #else
00037 
00038 #include <bits/c++config.h>
00039 #include <mutex>
00040 #include <condition_variable>
00041 #include <bits/functexcept.h>
00042 
00043 namespace std _GLIBCXX_VISIBILITY(default)
00044 {
00045 _GLIBCXX_BEGIN_NAMESPACE_VERSION
00046 
00047   /**
00048    * @ingroup mutexes
00049    * @{
00050    */
00051 
00052 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
00053 #ifdef _GLIBCXX_HAS_GTHREADS
00054 
00055 #define __cpp_lib_shared_timed_mutex 201402
00056 
00057   /// shared_timed_mutex
00058   class shared_timed_mutex
00059   {
00060 #if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00061     typedef chrono::system_clock        __clock_t;
00062 
00063 #ifdef PTHREAD_RWLOCK_INITIALIZER
00064     pthread_rwlock_t    _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
00065 
00066   public:
00067     shared_timed_mutex() = default;
00068     ~shared_timed_mutex() = default;
00069 #else
00070     pthread_rwlock_t    _M_rwlock;
00071 
00072   public:
00073     shared_timed_mutex()
00074     {
00075       int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
00076       if (__ret == ENOMEM)
00077         __throw_bad_alloc();
00078       else if (__ret == EAGAIN)
00079         __throw_system_error(int(errc::resource_unavailable_try_again));
00080       else if (__ret == EPERM)
00081         __throw_system_error(int(errc::operation_not_permitted));
00082       // Errors not handled: EBUSY, EINVAL
00083       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00084     }
00085 
00086     ~shared_timed_mutex()
00087     {
00088       int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
00089       // Errors not handled: EBUSY, EINVAL
00090       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00091     }
00092 #endif
00093 
00094     shared_timed_mutex(const shared_timed_mutex&) = delete;
00095     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
00096 
00097     // Exclusive ownership
00098 
00099     void
00100     lock()
00101     {
00102       int __ret = pthread_rwlock_wrlock(&_M_rwlock);
00103       if (__ret == EDEADLK)
00104         __throw_system_error(int(errc::resource_deadlock_would_occur));
00105       // Errors not handled: EINVAL
00106       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00107     }
00108 
00109     bool
00110     try_lock()
00111     {
00112       int __ret = pthread_rwlock_trywrlock(&_M_rwlock);
00113       if (__ret == EBUSY) return false;
00114       // Errors not handled: EINVAL
00115       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00116       return true;
00117     }
00118 
00119     template<typename _Rep, typename _Period>
00120       bool
00121       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00122       {
00123         return try_lock_until(__clock_t::now() + __rel_time);
00124       }
00125 
00126     template<typename _Duration>
00127       bool
00128       try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
00129       {
00130         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00131         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00132 
00133         __gthread_time_t __ts =
00134           {
00135             static_cast<std::time_t>(__s.time_since_epoch().count()),
00136             static_cast<long>(__ns.count())
00137           };
00138 
00139         int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
00140         // On self-deadlock, we just fail to acquire the lock.  Technically,
00141         // the program violated the precondition.
00142         if (__ret == ETIMEDOUT || __ret == EDEADLK)
00143           return false;
00144         // Errors not handled: EINVAL
00145         _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00146         return true;
00147       }
00148 
00149     template<typename _Clock, typename _Duration>
00150       bool
00151       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00152       {
00153         // DR 887 - Sync unknown clock to known clock.
00154         const typename _Clock::time_point __c_entry = _Clock::now();
00155         const __clock_t::time_point __s_entry = __clock_t::now();
00156         const auto __delta = __abs_time - __c_entry;
00157         const auto __s_atime = __s_entry + __delta;
00158         return try_lock_until(__s_atime);
00159       }
00160 
00161     void
00162     unlock()
00163     {
00164       int __ret __attribute((__unused__)) = pthread_rwlock_unlock(&_M_rwlock);
00165       // Errors not handled: EPERM, EBUSY, EINVAL
00166       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00167     }
00168 
00169     // Shared ownership
00170 
00171     void
00172     lock_shared()
00173     {
00174       int __ret;
00175       // We retry if we exceeded the maximum number of read locks supported by
00176       // the POSIX implementation; this can result in busy-waiting, but this
00177       // is okay based on the current specification of forward progress
00178       // guarantees by the standard.
00179       do
00180         __ret = pthread_rwlock_rdlock(&_M_rwlock);
00181       while (__ret == EAGAIN);
00182       if (__ret == EDEADLK)
00183         __throw_system_error(int(errc::resource_deadlock_would_occur));
00184       // Errors not handled: EINVAL
00185       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00186     }
00187 
00188     bool
00189     try_lock_shared()
00190     {
00191       int __ret = pthread_rwlock_tryrdlock(&_M_rwlock);
00192       // If the maximum number of read locks has been exceeded, we just fail
00193       // to acquire the lock.  Unlike for lock(), we are not allowed to throw
00194       // an exception.
00195       if (__ret == EBUSY || __ret == EAGAIN) return false;
00196       // Errors not handled: EINVAL
00197       _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00198       return true;
00199     }
00200 
00201     template<typename _Rep, typename _Period>
00202       bool
00203       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
00204       {
00205         return try_lock_shared_until(__clock_t::now() + __rel_time);
00206       }
00207 
00208     template<typename _Duration>
00209       bool
00210       try_lock_shared_until(const chrono::time_point<__clock_t,
00211                             _Duration>& __atime)
00212       {
00213         auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
00214         auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
00215 
00216         __gthread_time_t __ts =
00217           {
00218             static_cast<std::time_t>(__s.time_since_epoch().count()),
00219             static_cast<long>(__ns.count())
00220           };
00221 
00222         int __ret;
00223         // Unlike for lock(), we are not allowed to throw an exception so if
00224         // the maximum number of read locks has been exceeded, or we would
00225         // deadlock, we just try to acquire the lock again (and will time out
00226         // eventually). 
00227         // In cases where we would exceed the maximum number of read locks
00228         // throughout the whole time until the timeout, we will fail to
00229         // acquire the lock even if it would be logically free; however, this
00230         // is allowed by the standard, and we made a "strong effort"
00231         // (see C++14 30.4.1.4p26).
00232         // For cases where the implementation detects a deadlock we
00233         // intentionally block and timeout so that an early return isn't
00234         // mistaken for a spurious failure, which might help users realise
00235         // there is a deadlock.
00236         do
00237           __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
00238         while (__ret == EAGAIN || __ret == EDEADLK);
00239         if (__ret == ETIMEDOUT)
00240           return false;
00241         // Errors not handled: EINVAL
00242         _GLIBCXX_DEBUG_ASSERT(__ret == 0);
00243         return true;
00244       }
00245 
00246     template<typename _Clock, typename _Duration>
00247       bool
00248       try_lock_shared_until(const chrono::time_point<_Clock,
00249                             _Duration>& __abs_time)
00250       {
00251         // DR 887 - Sync unknown clock to known clock.
00252         const typename _Clock::time_point __c_entry = _Clock::now();
00253         const __clock_t::time_point __s_entry = __clock_t::now();
00254         const auto __delta = __abs_time - __c_entry;
00255         const auto __s_atime = __s_entry + __delta;
00256         return try_lock_shared_until(__s_atime);
00257       }
00258 
00259     void
00260     unlock_shared()
00261     {
00262       unlock();
00263     }
00264 
00265 #else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
00266 
00267     // Must use the same clock as condition_variable
00268     typedef chrono::system_clock        __clock_t;
00269 
00270     // Based on Howard Hinnant's reference implementation from N2406.
00271 
00272     // The high bit of _M_state is the write-entered flag which is set to
00273     // indicate a writer has taken the lock or is queuing to take the lock.
00274     // The remaining bits are the count of reader locks.
00275     //
00276     // To take a reader lock, block on gate1 while the write-entered flag is
00277     // set or the maximum number of reader locks is held, then increment the
00278     // reader lock count.
00279     // To release, decrement the count, then if the write-entered flag is set
00280     // and the count is zero then signal gate2 to wake a queued writer,
00281     // otherwise if the maximum number of reader locks was held signal gate1
00282     // to wake a reader.
00283     //
00284     // To take a writer lock, block on gate1 while the write-entered flag is
00285     // set, then set the write-entered flag to start queueing, then block on
00286     // gate2 while the number of reader locks is non-zero.
00287     // To release, unset the write-entered flag and signal gate1 to wake all
00288     // blocked readers and writers.
00289     //
00290     // This means that when no reader locks are held readers and writers get
00291     // equal priority. When one or more reader locks is held a writer gets
00292     // priority and no more reader locks can be taken while the writer is
00293     // queued.
00294 
00295     // Only locked when accessing _M_state or waiting on condition variables.
00296     mutex               _M_mut;
00297     // Used to block while write-entered is set or reader count at maximum.
00298     condition_variable  _M_gate1;
00299     // Used to block queued writers while reader count is non-zero.
00300     condition_variable  _M_gate2;
00301     // The write-entered flag and reader count.
00302     unsigned            _M_state;
00303 
00304     static constexpr unsigned _S_write_entered
00305       = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
00306     static constexpr unsigned _S_max_readers = ~_S_write_entered;
00307 
00308     // Test whether the write-entered flag is set. _M_mut must be locked.
00309     bool _M_write_entered() const { return _M_state & _S_write_entered; }
00310 
00311     // The number of reader locks currently held. _M_mut must be locked.
00312     unsigned _M_readers() const { return _M_state & _S_max_readers; }
00313 
00314   public:
00315     shared_timed_mutex() : _M_state(0) {}
00316 
00317     ~shared_timed_mutex()
00318     {
00319       _GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
00320     }
00321 
00322     shared_timed_mutex(const shared_timed_mutex&) = delete;
00323     shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
00324 
00325     // Exclusive ownership
00326 
00327     void
00328     lock()
00329     {
00330       unique_lock<mutex> __lk(_M_mut);
00331       // Wait until we can set the write-entered flag.
00332       _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
00333       _M_state |= _S_write_entered;
00334       // Then wait until there are no more readers.
00335       _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
00336     }
00337 
00338     bool
00339     try_lock()
00340     {
00341       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00342       if (__lk.owns_lock() && _M_state == 0)
00343         {
00344           _M_state = _S_write_entered;
00345           return true;
00346         }
00347       return false;
00348     }
00349 
00350     template<typename _Rep, typename _Period>
00351       bool
00352       try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00353       {
00354         return try_lock_until(__clock_t::now() + __rel_time);
00355       }
00356 
00357     template<typename _Clock, typename _Duration>
00358       bool
00359       try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00360       {
00361         unique_lock<mutex> __lk(_M_mut);
00362         if (!_M_gate1.wait_until(__lk, __abs_time,
00363                                  [=]{ return !_M_write_entered(); }))
00364           {
00365             return false;
00366           }
00367         _M_state |= _S_write_entered;
00368         if (!_M_gate2.wait_until(__lk, __abs_time,
00369                                  [=]{ return _M_readers() == 0; }))
00370           {
00371             _M_state ^= _S_write_entered;
00372             // Wake all threads blocked while the write-entered flag was set.
00373             _M_gate1.notify_all();
00374             return false;
00375           }
00376         return true;
00377       }
00378 
00379     void
00380     unlock()
00381     {
00382       lock_guard<mutex> __lk(_M_mut);
00383       _GLIBCXX_DEBUG_ASSERT( _M_write_entered() );
00384       _M_state = 0;
00385       // call notify_all() while mutex is held so that another thread can't
00386       // lock and unlock the mutex then destroy *this before we make the call.
00387       _M_gate1.notify_all();
00388     }
00389 
00390     // Shared ownership
00391 
00392     void
00393     lock_shared()
00394     {
00395       unique_lock<mutex> __lk(_M_mut);
00396       _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
00397       ++_M_state;
00398     }
00399 
00400     bool
00401     try_lock_shared()
00402     {
00403       unique_lock<mutex> __lk(_M_mut, try_to_lock);
00404       if (!__lk.owns_lock())
00405         return false;
00406       if (_M_state < _S_max_readers)
00407         {
00408           ++_M_state;
00409           return true;
00410         }
00411       return false;
00412     }
00413 
00414     template<typename _Rep, typename _Period>
00415       bool
00416       try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
00417       {
00418         return try_lock_shared_until(__clock_t::now() + __rel_time);
00419       }
00420 
00421     template <typename _Clock, typename _Duration>
00422       bool
00423       try_lock_shared_until(const chrono::time_point<_Clock,
00424                                                      _Duration>& __abs_time)
00425       {
00426         unique_lock<mutex> __lk(_M_mut);
00427         if (!_M_gate1.wait_until(__lk, __abs_time,
00428                                  [=]{ return _M_state < _S_max_readers; }))
00429           {
00430             return false;
00431           }
00432         ++_M_state;
00433         return true;
00434       }
00435 
00436     void
00437     unlock_shared()
00438     {
00439       lock_guard<mutex> __lk(_M_mut);
00440       _GLIBCXX_DEBUG_ASSERT( _M_readers() > 0 );
00441       auto __prev = _M_state--;
00442       if (_M_write_entered())
00443         {
00444           // Wake the queued writer if there are no more readers.
00445           if (_M_readers() == 0)
00446             _M_gate2.notify_one();
00447           // No need to notify gate1 because we give priority to the queued
00448           // writer, and that writer will eventually notify gate1 after it
00449           // clears the write-entered flag.
00450         }
00451       else
00452         {
00453           // Wake any thread that was blocked on reader overflow.
00454           if (__prev == _S_max_readers)
00455             _M_gate1.notify_one();
00456         }
00457     }
00458 #endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
00459   };
00460 #endif // _GLIBCXX_HAS_GTHREADS
00461 
00462   /// shared_lock
00463   template<typename _Mutex>
00464     class shared_lock
00465     {
00466     public:
00467       typedef _Mutex mutex_type;
00468 
00469       // Shared locking
00470 
00471       shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
00472 
00473       explicit
00474       shared_lock(mutex_type& __m) : _M_pm(&__m), _M_owns(true)
00475       { __m.lock_shared(); }
00476 
00477       shared_lock(mutex_type& __m, defer_lock_t) noexcept
00478       : _M_pm(&__m), _M_owns(false) { }
00479 
00480       shared_lock(mutex_type& __m, try_to_lock_t)
00481       : _M_pm(&__m), _M_owns(__m.try_lock_shared()) { }
00482 
00483       shared_lock(mutex_type& __m, adopt_lock_t)
00484       : _M_pm(&__m), _M_owns(true) { }
00485 
00486       template<typename _Clock, typename _Duration>
00487         shared_lock(mutex_type& __m,
00488                     const chrono::time_point<_Clock, _Duration>& __abs_time)
00489       : _M_pm(&__m), _M_owns(__m.try_lock_shared_until(__abs_time)) { }
00490 
00491       template<typename _Rep, typename _Period>
00492         shared_lock(mutex_type& __m,
00493                     const chrono::duration<_Rep, _Period>& __rel_time)
00494       : _M_pm(&__m), _M_owns(__m.try_lock_shared_for(__rel_time)) { }
00495 
00496       ~shared_lock()
00497       {
00498         if (_M_owns)
00499           _M_pm->unlock_shared();
00500       }
00501 
00502       shared_lock(shared_lock const&) = delete;
00503       shared_lock& operator=(shared_lock const&) = delete;
00504 
00505       shared_lock(shared_lock&& __sl) noexcept : shared_lock()
00506       { swap(__sl); }
00507 
00508       shared_lock&
00509       operator=(shared_lock&& __sl) noexcept
00510       {
00511         shared_lock(std::move(__sl)).swap(*this);
00512         return *this;
00513       }
00514 
00515       void
00516       lock()
00517       {
00518         _M_lockable();
00519         _M_pm->lock_shared();
00520         _M_owns = true;
00521       }
00522 
00523       bool
00524       try_lock()
00525       {
00526         _M_lockable();
00527         return _M_owns = _M_pm->try_lock_shared();
00528       }
00529 
00530       template<typename _Rep, typename _Period>
00531         bool
00532         try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
00533         {
00534           _M_lockable();
00535           return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
00536         }
00537 
00538       template<typename _Clock, typename _Duration>
00539         bool
00540         try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
00541         {
00542           _M_lockable();
00543           return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
00544         }
00545 
00546       void
00547       unlock()
00548       {
00549         if (!_M_owns)
00550           __throw_system_error(int(errc::resource_deadlock_would_occur));
00551         _M_pm->unlock_shared();
00552         _M_owns = false;
00553       }
00554 
00555       // Setters
00556 
00557       void
00558       swap(shared_lock& __u) noexcept
00559       {
00560         std::swap(_M_pm, __u._M_pm);
00561         std::swap(_M_owns, __u._M_owns);
00562       }
00563 
00564       mutex_type*
00565       release() noexcept
00566       {
00567         _M_owns = false;
00568         return std::exchange(_M_pm, nullptr);
00569       }
00570 
00571       // Getters
00572 
00573       bool owns_lock() const noexcept { return _M_owns; }
00574 
00575       explicit operator bool() const noexcept { return _M_owns; }
00576 
00577       mutex_type* mutex() const noexcept { return _M_pm; }
00578 
00579     private:
00580       void
00581       _M_lockable() const
00582       {
00583         if (_M_pm == nullptr)
00584           __throw_system_error(int(errc::operation_not_permitted));
00585         if (_M_owns)
00586           __throw_system_error(int(errc::resource_deadlock_would_occur));
00587       }
00588 
00589       mutex_type*       _M_pm;
00590       bool              _M_owns;
00591     };
00592 
00593   /// Swap specialization for shared_lock
00594   template<typename _Mutex>
00595     void
00596     swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
00597     { __x.swap(__y); }
00598 
00599 #endif // _GLIBCXX_USE_C99_STDINT_TR1
00600 
00601   // @} group mutexes
00602 _GLIBCXX_END_NAMESPACE_VERSION
00603 } // namespace
00604 
00605 #endif // C++14
00606 
00607 #endif // _GLIBCXX_SHARED_MUTEX