3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
26 /** @file include/mutex
27 * This is a Standard C++ Library header.
30 #ifndef _GLIBCXX_MUTEX
31 #define _GLIBCXX_MUTEX 1
33 #pragma GCC system_header
35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
36 # include <bits/c++0x_warning.h>
42 #include <type_traits>
44 #include <system_error>
45 #include <bits/functexcept.h>
46 #include <bits/gthr.h>
47 #include <bits/move.h> // for std::swap
49 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
51 namespace std _GLIBCXX_VISIBILITY(default)
53 _GLIBCXX_BEGIN_NAMESPACE_VERSION
56 * @defgroup mutexes Mutexes
57 * @ingroup concurrency
59 * Classes for mutex support.
66 typedef __gthread_mutex_t __native_type;
67 __native_type _M_mutex;
70 typedef __native_type* native_handle_type;
72 #ifdef __GTHREAD_MUTEX_INIT
73 constexpr mutex() noexcept : _M_mutex(__GTHREAD_MUTEX_INIT) { }
77 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
78 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
81 ~mutex() { __gthread_mutex_destroy(&_M_mutex); }
84 mutex(const mutex&) = delete;
85 mutex& operator=(const mutex&) = delete;
90 int __e = __gthread_mutex_lock(&_M_mutex);
92 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
94 __throw_system_error(__e);
100 // XXX EINVAL, EAGAIN, EBUSY
101 return !__gthread_mutex_trylock(&_M_mutex);
107 // XXX EINVAL, EAGAIN, EPERM
108 __gthread_mutex_unlock(&_M_mutex);
113 { return &_M_mutex; }
116 #ifndef __GTHREAD_RECURSIVE_MUTEX_INIT
117 // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
118 // so we need to obtain a __gthread_mutex_t to destroy
119 class __destroy_recursive_mutex
121 template<typename _Mx, typename _Rm>
123 _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
125 __mx->counter = __rmx->counter;
126 __mx->sema = __rmx->sema;
127 __gthread_mutex_destroy(__mx);
131 // matches a gthr-win32.h recursive mutex
132 template<typename _Rm>
133 static typename enable_if<sizeof(&_Rm::sema), void>::type
134 _S_destroy(_Rm* __mx)
136 __gthread_mutex_t __tmp;
137 _S_destroy_win32(&__tmp, __mx);
140 // matches a recursive mutex with a member 'actual'
141 template<typename _Rm>
142 static typename enable_if<sizeof(&_Rm::actual), void>::type
143 _S_destroy(_Rm* __mx)
144 { __gthread_mutex_destroy(&__mx->actual); }
146 // matches when there's only one mutex type
147 template<typename _Rm>
149 typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
150 _S_destroy(_Rm* __mx)
151 { __gthread_mutex_destroy(__mx); }
156 class recursive_mutex
158 typedef __gthread_recursive_mutex_t __native_type;
159 __native_type _M_mutex;
162 typedef __native_type* native_handle_type;
164 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
165 recursive_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
169 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
170 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
174 { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
177 recursive_mutex(const recursive_mutex&) = delete;
178 recursive_mutex& operator=(const recursive_mutex&) = delete;
183 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
185 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
187 __throw_system_error(__e);
193 // XXX EINVAL, EAGAIN, EBUSY
194 return !__gthread_recursive_mutex_trylock(&_M_mutex);
200 // XXX EINVAL, EAGAIN, EBUSY
201 __gthread_recursive_mutex_unlock(&_M_mutex);
206 { return &_M_mutex; }
212 typedef __gthread_mutex_t __native_type;
214 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
215 typedef chrono::monotonic_clock __clock_t;
217 typedef chrono::high_resolution_clock __clock_t;
220 __native_type _M_mutex;
223 typedef __native_type* native_handle_type;
225 #ifdef __GTHREAD_MUTEX_INIT
226 timed_mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
230 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
233 ~timed_mutex() { __gthread_mutex_destroy(&_M_mutex); }
236 timed_mutex(const timed_mutex&) = delete;
237 timed_mutex& operator=(const timed_mutex&) = delete;
242 int __e = __gthread_mutex_lock(&_M_mutex);
244 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
246 __throw_system_error(__e);
252 // XXX EINVAL, EAGAIN, EBUSY
253 return !__gthread_mutex_trylock(&_M_mutex);
256 template <class _Rep, class _Period>
258 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
259 { return __try_lock_for_impl(__rtime); }
261 template <class _Clock, class _Duration>
263 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
265 chrono::time_point<_Clock, chrono::seconds> __s =
266 chrono::time_point_cast<chrono::seconds>(__atime);
268 chrono::nanoseconds __ns =
269 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
271 __gthread_time_t __ts = {
272 static_cast<std::time_t>(__s.time_since_epoch().count()),
273 static_cast<long>(__ns.count())
276 return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
282 // XXX EINVAL, EAGAIN, EBUSY
283 __gthread_mutex_unlock(&_M_mutex);
288 { return &_M_mutex; }
291 template<typename _Rep, typename _Period>
293 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
294 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
296 __clock_t::time_point __atime = __clock_t::now()
297 + chrono::duration_cast<__clock_t::duration>(__rtime);
299 return try_lock_until(__atime);
302 template <typename _Rep, typename _Period>
304 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
305 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
307 __clock_t::time_point __atime = __clock_t::now()
308 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
310 return try_lock_until(__atime);
314 /// recursive_timed_mutex
315 class recursive_timed_mutex
317 typedef __gthread_recursive_mutex_t __native_type;
319 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
320 typedef chrono::monotonic_clock __clock_t;
322 typedef chrono::high_resolution_clock __clock_t;
325 __native_type _M_mutex;
328 typedef __native_type* native_handle_type;
330 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
331 recursive_timed_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
333 recursive_timed_mutex()
335 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
336 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
339 ~recursive_timed_mutex()
340 { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
343 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
344 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
349 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
351 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
353 __throw_system_error(__e);
359 // XXX EINVAL, EAGAIN, EBUSY
360 return !__gthread_recursive_mutex_trylock(&_M_mutex);
363 template <class _Rep, class _Period>
365 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
366 { return __try_lock_for_impl(__rtime); }
368 template <class _Clock, class _Duration>
370 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
372 chrono::time_point<_Clock, chrono::seconds> __s =
373 chrono::time_point_cast<chrono::seconds>(__atime);
375 chrono::nanoseconds __ns =
376 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
378 __gthread_time_t __ts = {
379 static_cast<std::time_t>(__s.time_since_epoch().count()),
380 static_cast<long>(__ns.count())
383 return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
389 // XXX EINVAL, EAGAIN, EBUSY
390 __gthread_recursive_mutex_unlock(&_M_mutex);
395 { return &_M_mutex; }
398 template<typename _Rep, typename _Period>
400 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
401 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
403 __clock_t::time_point __atime = __clock_t::now()
404 + chrono::duration_cast<__clock_t::duration>(__rtime);
406 return try_lock_until(__atime);
409 template <typename _Rep, typename _Period>
411 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
412 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
414 __clock_t::time_point __atime = __clock_t::now()
415 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
417 return try_lock_until(__atime);
421 /// Do not acquire ownership of the mutex.
422 struct defer_lock_t { };
424 /// Try to acquire ownership of the mutex without blocking.
425 struct try_to_lock_t { };
427 /// Assume the calling thread has already obtained mutex ownership
429 struct adopt_lock_t { };
431 constexpr defer_lock_t defer_lock { };
432 constexpr try_to_lock_t try_to_lock { };
433 constexpr adopt_lock_t adopt_lock { };
435 /// @brief Scoped lock idiom.
436 // Acquire the mutex here with a constructor call, then release with
437 // the destructor call in accordance with RAII style.
438 template<typename _Mutex>
442 typedef _Mutex mutex_type;
444 explicit lock_guard(mutex_type& __m) : _M_device(__m)
445 { _M_device.lock(); }
447 lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
448 { } // calling thread owns mutex
451 { _M_device.unlock(); }
453 lock_guard(const lock_guard&) = delete;
454 lock_guard& operator=(const lock_guard&) = delete;
457 mutex_type& _M_device;
461 template<typename _Mutex>
465 typedef _Mutex mutex_type;
467 unique_lock() noexcept
468 : _M_device(0), _M_owns(false)
471 explicit unique_lock(mutex_type& __m)
472 : _M_device(&__m), _M_owns(false)
478 unique_lock(mutex_type& __m, defer_lock_t) noexcept
479 : _M_device(&__m), _M_owns(false)
482 unique_lock(mutex_type& __m, try_to_lock_t)
483 : _M_device(&__m), _M_owns(_M_device->try_lock())
486 unique_lock(mutex_type& __m, adopt_lock_t)
487 : _M_device(&__m), _M_owns(true)
489 // XXX calling thread owns mutex
492 template<typename _Clock, typename _Duration>
493 unique_lock(mutex_type& __m,
494 const chrono::time_point<_Clock, _Duration>& __atime)
495 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
498 template<typename _Rep, typename _Period>
499 unique_lock(mutex_type& __m,
500 const chrono::duration<_Rep, _Period>& __rtime)
501 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
510 unique_lock(const unique_lock&) = delete;
511 unique_lock& operator=(const unique_lock&) = delete;
513 unique_lock(unique_lock&& __u) noexcept
514 : _M_device(__u._M_device), _M_owns(__u._M_owns)
520 unique_lock& operator=(unique_lock&& __u) noexcept
525 unique_lock(std::move(__u)).swap(*this);
537 __throw_system_error(int(errc::operation_not_permitted));
539 __throw_system_error(int(errc::resource_deadlock_would_occur));
551 __throw_system_error(int(errc::operation_not_permitted));
553 __throw_system_error(int(errc::resource_deadlock_would_occur));
556 _M_owns = _M_device->try_lock();
561 template<typename _Clock, typename _Duration>
563 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
566 __throw_system_error(int(errc::operation_not_permitted));
568 __throw_system_error(int(errc::resource_deadlock_would_occur));
571 _M_owns = _M_device->try_lock_until(__atime);
576 template<typename _Rep, typename _Period>
578 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
581 __throw_system_error(int(errc::operation_not_permitted));
583 __throw_system_error(int(errc::resource_deadlock_would_occur));
586 _M_owns = _M_device->try_lock_for(__rtime);
595 __throw_system_error(int(errc::operation_not_permitted));
604 swap(unique_lock& __u) noexcept
606 std::swap(_M_device, __u._M_device);
607 std::swap(_M_owns, __u._M_owns);
613 mutex_type* __ret = _M_device;
620 owns_lock() const noexcept
623 explicit operator bool() const noexcept
624 { return owns_lock(); }
627 mutex() const noexcept
628 { return _M_device; }
631 mutex_type* _M_device;
632 bool _M_owns; // XXX use atomic_bool
635 /// Partial specialization for unique_lock objects.
636 template<typename _Mutex>
638 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
644 template<typename... _Lock>
646 __do_unlock(tuple<_Lock&...>& __locks)
648 std::get<_Idx>(__locks).unlock();
649 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
654 struct __unlock_impl<-1>
656 template<typename... _Lock>
658 __do_unlock(tuple<_Lock&...>&)
662 template<typename _Lock>
664 __try_to_lock(_Lock& __l)
665 { return unique_lock<_Lock>(__l, try_to_lock); }
667 template<int _Idx, bool _Continue = true>
668 struct __try_lock_impl
670 template<typename... _Lock>
672 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
675 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
676 if (__lock.owns_lock())
678 __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
679 __do_try_lock(__locks, __idx);
687 struct __try_lock_impl<_Idx, false>
689 template<typename... _Lock>
691 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
694 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
695 if (__lock.owns_lock())
703 /** @brief Generic try_lock.
704 * @param __l1 Meets Mutex requirements (try_lock() may throw).
705 * @param __l2 Meets Mutex requirements (try_lock() may throw).
706 * @param __l3 Meets Mutex requirements (try_lock() may throw).
707 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
708 * a 0-based index corresponding to the argument that returned false.
709 * @post Either all arguments are locked, or none will be.
711 * Sequentially calls try_lock() on each argument.
713 template<typename _Lock1, typename _Lock2, typename... _Lock3>
715 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
718 auto __locks = std::tie(__l1, __l2, __l3...);
720 { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
726 /** @brief Generic lock.
727 * @param __l1 Meets Mutex requirements (try_lock() may throw).
728 * @param __l2 Meets Mutex requirements (try_lock() may throw).
729 * @param __l3 Meets Mutex requirements (try_lock() may throw).
730 * @throw An exception thrown by an argument's lock() or try_lock() member.
731 * @post All arguments are locked.
733 * All arguments are locked via a sequence of calls to lock(), try_lock()
734 * and unlock(). If the call exits via an exception any locks that were
735 * obtained will be released.
737 template<typename _L1, typename _L2, typename ..._L3>
739 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
743 unique_lock<_L1> __first(__l1);
745 auto __locks = std::tie(__l2, __l3...);
746 __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
759 typedef __gthread_once_t __native_type;
760 __native_type _M_once;
764 constexpr once_flag() noexcept : _M_once(__GTHREAD_ONCE_INIT) { }
766 /// Deleted copy constructor
767 once_flag(const once_flag&) = delete;
768 /// Deleted assignment operator
769 once_flag& operator=(const once_flag&) = delete;
771 template<typename _Callable, typename... _Args>
773 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
776 #ifdef _GLIBCXX_HAVE_TLS
777 extern __thread void* __once_callable;
778 extern __thread void (*__once_call)();
780 template<typename _Callable>
784 (*(_Callable*)__once_callable)();
787 extern function<void()> __once_functor;
790 __set_once_functor_lock_ptr(unique_lock<mutex>*);
796 extern "C" void __once_proxy();
799 template<typename _Callable, typename... _Args>
801 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
803 #ifdef _GLIBCXX_HAVE_TLS
804 auto __bound_functor = std::bind<void>(std::forward<_Callable>(__f),
805 std::forward<_Args>(__args)...);
806 __once_callable = &__bound_functor;
807 __once_call = &__once_call_impl<decltype(__bound_functor)>;
809 unique_lock<mutex> __functor_lock(__get_once_mutex());
810 __once_functor = std::bind<void>(std::forward<_Callable>(__f),
811 std::forward<_Args>(__args)...);
812 __set_once_functor_lock_ptr(&__functor_lock);
815 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
817 #ifndef _GLIBCXX_HAVE_TLS
819 __set_once_functor_lock_ptr(0);
823 __throw_system_error(__e);
827 _GLIBCXX_END_NAMESPACE_VERSION
830 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
832 #endif // __GXX_EXPERIMENTAL_CXX0X__
834 #endif // _GLIBCXX_MUTEX