3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
27 * This is a Standard C++ Library header.
30 #ifndef _GLIBCXX_MUTEX
31 #define _GLIBCXX_MUTEX 1
33 #pragma GCC system_header
35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
36 # include <bits/c++0x_warning.h>
43 #include <type_traits>
45 #include <system_error>
46 #include <bits/functexcept.h>
47 #include <bits/gthr.h>
48 #include <bits/move.h> // for std::swap
50 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
55 * @defgroup mutexes Mutexes
56 * @ingroup concurrency
58 * Classes for mutex support.
65 typedef __gthread_mutex_t __native_type;
66 __native_type _M_mutex;
69 typedef __native_type* native_handle_type;
73 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
74 #ifdef __GTHREAD_MUTEX_INIT
75 __native_type __tmp = __GTHREAD_MUTEX_INIT;
78 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
82 mutex(const mutex&) = delete;
83 mutex& operator=(const mutex&) = delete;
88 int __e = __gthread_mutex_lock(&_M_mutex);
90 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
92 __throw_system_error(__e);
98 // XXX EINVAL, EAGAIN, EBUSY
99 return !__gthread_mutex_trylock(&_M_mutex);
105 // XXX EINVAL, EAGAIN, EPERM
106 __gthread_mutex_unlock(&_M_mutex);
111 { return &_M_mutex; }
115 class recursive_mutex
117 typedef __gthread_recursive_mutex_t __native_type;
118 __native_type _M_mutex;
121 typedef __native_type* native_handle_type;
125 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
126 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
127 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
130 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
134 recursive_mutex(const recursive_mutex&) = delete;
135 recursive_mutex& operator=(const recursive_mutex&) = delete;
140 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
142 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
144 __throw_system_error(__e);
150 // XXX EINVAL, EAGAIN, EBUSY
151 return !__gthread_recursive_mutex_trylock(&_M_mutex);
157 // XXX EINVAL, EAGAIN, EBUSY
158 __gthread_recursive_mutex_unlock(&_M_mutex);
163 { return &_M_mutex; }
169 typedef __gthread_mutex_t __native_type;
171 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
172 typedef chrono::monotonic_clock __clock_t;
174 typedef chrono::high_resolution_clock __clock_t;
177 __native_type _M_mutex;
180 typedef __native_type* native_handle_type;
184 #ifdef __GTHREAD_MUTEX_INIT
185 __native_type __tmp = __GTHREAD_MUTEX_INIT;
188 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
192 timed_mutex(const timed_mutex&) = delete;
193 timed_mutex& operator=(const timed_mutex&) = delete;
198 int __e = __gthread_mutex_lock(&_M_mutex);
200 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
202 __throw_system_error(__e);
208 // XXX EINVAL, EAGAIN, EBUSY
209 return !__gthread_mutex_trylock(&_M_mutex);
212 template <class _Rep, class _Period>
214 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
215 { return __try_lock_for_impl(__rtime); }
217 template <class _Clock, class _Duration>
219 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
221 chrono::time_point<_Clock, chrono::seconds> __s =
222 chrono::time_point_cast<chrono::seconds>(__atime);
224 chrono::nanoseconds __ns =
225 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
227 __gthread_time_t __ts = {
228 static_cast<std::time_t>(__s.time_since_epoch().count()),
229 static_cast<long>(__ns.count())
232 return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
238 // XXX EINVAL, EAGAIN, EBUSY
239 __gthread_mutex_unlock(&_M_mutex);
244 { return &_M_mutex; }
247 template<typename _Rep, typename _Period>
249 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
250 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
252 __clock_t::time_point __atime = __clock_t::now()
253 + chrono::duration_cast<__clock_t::duration>(__rtime);
255 return try_lock_until(__atime);
258 template <typename _Rep, typename _Period>
260 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
261 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
263 __clock_t::time_point __atime = __clock_t::now()
264 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
266 return try_lock_until(__atime);
270 /// recursive_timed_mutex
271 class recursive_timed_mutex
273 typedef __gthread_recursive_mutex_t __native_type;
275 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
276 typedef chrono::monotonic_clock __clock_t;
278 typedef chrono::high_resolution_clock __clock_t;
281 __native_type _M_mutex;
284 typedef __native_type* native_handle_type;
286 recursive_timed_mutex()
288 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
289 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
290 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
293 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
297 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
298 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
303 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
305 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
307 __throw_system_error(__e);
313 // XXX EINVAL, EAGAIN, EBUSY
314 return !__gthread_recursive_mutex_trylock(&_M_mutex);
317 template <class _Rep, class _Period>
319 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
320 { return __try_lock_for_impl(__rtime); }
322 template <class _Clock, class _Duration>
324 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
326 chrono::time_point<_Clock, chrono::seconds> __s =
327 chrono::time_point_cast<chrono::seconds>(__atime);
329 chrono::nanoseconds __ns =
330 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
332 __gthread_time_t __ts = {
333 static_cast<std::time_t>(__s.time_since_epoch().count()),
334 static_cast<long>(__ns.count())
337 return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
343 // XXX EINVAL, EAGAIN, EBUSY
344 __gthread_recursive_mutex_unlock(&_M_mutex);
349 { return &_M_mutex; }
352 template<typename _Rep, typename _Period>
354 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
355 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
357 __clock_t::time_point __atime = __clock_t::now()
358 + chrono::duration_cast<__clock_t::duration>(__rtime);
360 return try_lock_until(__atime);
363 template <typename _Rep, typename _Period>
365 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
366 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
368 __clock_t::time_point __atime = __clock_t::now()
369 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
371 return try_lock_until(__atime);
375 /// Do not acquire ownership of the mutex.
376 struct defer_lock_t { };
378 /// Try to acquire ownership of the mutex without blocking.
379 struct try_to_lock_t { };
381 /// Assume the calling thread has already obtained mutex ownership
383 struct adopt_lock_t { };
385 extern const defer_lock_t defer_lock;
386 extern const try_to_lock_t try_to_lock;
387 extern const adopt_lock_t adopt_lock;
389 /// @brief Scoped lock idiom.
390 // Acquire the mutex here with a constructor call, then release with
391 // the destructor call in accordance with RAII style.
392 template<typename _Mutex>
396 typedef _Mutex mutex_type;
398 explicit lock_guard(mutex_type& __m) : _M_device(__m)
399 { _M_device.lock(); }
401 lock_guard(mutex_type& __m, adopt_lock_t __a) : _M_device(__m)
402 { _M_device.lock(); }
405 { _M_device.unlock(); }
407 lock_guard(const lock_guard&) = delete;
408 lock_guard& operator=(const lock_guard&) = delete;
411 mutex_type& _M_device;
415 template<typename _Mutex>
419 typedef _Mutex mutex_type;
422 : _M_device(0), _M_owns(false)
425 explicit unique_lock(mutex_type& __m)
426 : _M_device(&__m), _M_owns(false)
432 unique_lock(mutex_type& __m, defer_lock_t)
433 : _M_device(&__m), _M_owns(false)
436 unique_lock(mutex_type& __m, try_to_lock_t)
437 : _M_device(&__m), _M_owns(_M_device->try_lock())
440 unique_lock(mutex_type& __m, adopt_lock_t)
441 : _M_device(&__m), _M_owns(true)
443 // XXX calling thread owns mutex
446 template<typename _Clock, typename _Duration>
447 unique_lock(mutex_type& __m,
448 const chrono::time_point<_Clock, _Duration>& __atime)
449 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
452 template<typename _Rep, typename _Period>
453 unique_lock(mutex_type& __m,
454 const chrono::duration<_Rep, _Period>& __rtime)
455 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
464 unique_lock(const unique_lock&) = delete;
465 unique_lock& operator=(const unique_lock&) = delete;
467 unique_lock(unique_lock&& __u)
468 : _M_device(__u._M_device), _M_owns(__u._M_owns)
474 unique_lock& operator=(unique_lock&& __u)
479 unique_lock(std::move(__u)).swap(*this);
491 __throw_system_error(int(errc::operation_not_permitted));
493 __throw_system_error(int(errc::resource_deadlock_would_occur));
505 __throw_system_error(int(errc::operation_not_permitted));
507 __throw_system_error(int(errc::resource_deadlock_would_occur));
510 _M_owns = _M_device->try_lock();
515 template<typename _Clock, typename _Duration>
517 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
520 __throw_system_error(int(errc::operation_not_permitted));
522 __throw_system_error(int(errc::resource_deadlock_would_occur));
525 _M_owns = _M_device->try_lock_until(__atime);
530 template<typename _Rep, typename _Period>
532 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
535 __throw_system_error(int(errc::operation_not_permitted));
537 __throw_system_error(int(errc::resource_deadlock_would_occur));
540 _M_owns = _M_device->try_lock_for(__rtime);
549 __throw_system_error(int(errc::operation_not_permitted));
558 swap(unique_lock& __u)
560 std::swap(_M_device, __u._M_device);
561 std::swap(_M_owns, __u._M_owns);
567 mutex_type* __ret = _M_device;
577 explicit operator bool() const
578 { return owns_lock(); }
582 { return _M_device; }
585 mutex_type* _M_device;
586 bool _M_owns; // XXX use atomic_bool
589 template<typename _Mutex>
591 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
597 template<typename... _Lock>
599 __do_unlock(tuple<_Lock&...>& __locks)
601 std::get<_Idx>(__locks).unlock();
602 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
607 struct __unlock_impl<-1>
609 template<typename... _Lock>
611 __do_unlock(tuple<_Lock&...>&)
615 template<int _Idx, bool _Continue = true>
616 struct __try_lock_impl
618 template<typename... _Lock>
620 __do_try_lock(tuple<_Lock&...>& __locks)
622 if(std::get<_Idx>(__locks).try_lock())
624 return __try_lock_impl<_Idx + 1,
625 _Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
629 __unlock_impl<_Idx>::__do_unlock(__locks);
636 struct __try_lock_impl<_Idx, false>
638 template<typename... _Lock>
640 __do_try_lock(tuple<_Lock&...>& __locks)
642 if(std::get<_Idx>(__locks).try_lock())
646 __unlock_impl<_Idx>::__do_unlock(__locks);
652 /** @brief Generic try_lock.
653 * @param __l1 Meets Mutex requirements (try_lock() may throw).
654 * @param __l2 Meets Mutex requirements (try_lock() may throw).
655 * @param __l3 Meets Mutex requirements (try_lock() may throw).
656 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
657 * a 0-based index corresponding to the argument that returned false.
658 * @post Either all arguments are locked, or none will be.
660 * Sequentially calls try_lock() on each argument.
662 template<typename _Lock1, typename _Lock2, typename... _Lock3>
664 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
666 tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
667 return __try_lock_impl<0>::__do_try_lock(__locks);
671 template<typename _L1, typename _L2, typename ..._L3>
673 lock(_L1&, _L2&, _L3&...);
679 typedef __gthread_once_t __native_type;
680 __native_type _M_once;
685 __native_type __tmp = __GTHREAD_ONCE_INIT;
689 once_flag(const once_flag&) = delete;
690 once_flag& operator=(const once_flag&) = delete;
692 template<typename _Callable, typename... _Args>
694 call_once(once_flag& __once, _Callable __f, _Args&&... __args);
697 #ifdef _GLIBCXX_HAVE_TLS
698 extern __thread void* __once_callable;
699 extern __thread void (*__once_call)();
701 template<typename _Callable>
705 (*(_Callable*)__once_callable)();
708 extern function<void()> __once_functor;
711 __set_once_functor_lock_ptr(unique_lock<mutex>*);
717 extern "C" void __once_proxy();
720 template<typename _Callable, typename... _Args>
722 call_once(once_flag& __once, _Callable __f, _Args&&... __args)
724 #ifdef _GLIBCXX_HAVE_TLS
725 auto __bound_functor = std::bind<void>(__f, __args...);
726 __once_callable = &__bound_functor;
727 __once_call = &__once_call_impl<decltype(__bound_functor)>;
729 unique_lock<mutex> __functor_lock(__get_once_mutex());
730 __once_functor = std::bind<void>(__f, __args...);
731 __set_once_functor_lock_ptr(&__functor_lock);
734 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
736 #ifndef _GLIBCXX_HAVE_TLS
738 __set_once_functor_lock_ptr(0);
742 __throw_system_error(__e);
748 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
750 #endif // __GXX_EXPERIMENTAL_CXX0X__
752 #endif // _GLIBCXX_MUTEX