1 // Support for concurrent programing -*- C++ -*-
3 // Copyright (C) 2003, 2004, 2005, 2006
4 // Free Software Foundation, Inc.
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 2, or (at your option)
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
17 // You should have received a copy of the GNU General Public License along
18 // with this library; see the file COPYING. If not, write to the Free
19 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
22 // As a special exception, you may use this file as part of a free software
23 // library without restriction. Specifically, if other files instantiate
24 // templates or use macros or inline functions from this file, or you compile
25 // this file and link it with other files to produce an executable, this
26 // file does not by itself cause the resulting executable to be covered by
27 // the GNU General Public License. This exception does not however
28 // invalidate any other reasons why the executable file might be covered by
29 // the GNU General Public License.
31 /** @file concurrence.h
32 * This is an internal header file, included by other library headers.
33 * You should not attempt to use it directly.
36 #ifndef _CONCURRENCE_H
37 #define _CONCURRENCE_H 1
39 #include <bits/gthr.h>
40 #include <bits/functexcept.h>
42 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
44 // Available locking policies:
45 // _S_single single-threaded code that doesn't need to be locked.
46 // _S_mutex multi-threaded code that requires additional support
47 // from gthr.h or abstraction layers in concurrance.h.
48 // _S_atomic multi-threaded code using atomic operations.
49 enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
51 // Compile time constant that indicates prefered locking policy in
52 // the current configuration.
53 static const _Lock_policy __default_lock_policy =
55 // NB: This macro doesn't actually exist yet in the compiler, but is
56 // set somewhat haphazardly at configure time.
57 #ifdef _GLIBCXX_ATOMIC_BUILTINS
69 __gthread_mutex_t _M_mutex;
71 __mutex(const __mutex&);
72 __mutex& operator=(const __mutex&);
78 if (__gthread_active_p())
80 #if defined __GTHREAD_MUTEX_INIT
81 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
84 __GTHREAD_MUTEX_INIT_FUNCTION(_M_mutex);
93 if (__gthread_active_p())
95 if (__gthread_mutex_lock(&_M_mutex) != 0)
96 std::__throw_runtime_error("__mutex::lock");
104 if (__gthread_active_p())
106 if (__gthread_mutex_unlock(&_M_mutex) != 0)
107 std::__throw_runtime_error("__mutex::unlock");
113 class __recursive_mutex
116 __gthread_recursive_mutex_t _M_mutex;
118 __recursive_mutex(const __recursive_mutex&);
119 __recursive_mutex& operator=(const __recursive_mutex&);
125 if (__gthread_active_p())
127 #if defined __GTHREAD_RECURSIVE_MUTEX_INIT
128 __gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
131 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(_M_mutex);
140 if (__gthread_active_p())
142 if (__gthread_recursive_mutex_lock(&_M_mutex) != 0)
143 std::__throw_runtime_error("__recursive_mutex::lock");
151 if (__gthread_active_p())
153 if (__gthread_recursive_mutex_unlock(&_M_mutex) != 0)
154 std::__throw_runtime_error("__recursive_mutex::unlock");
160 /// @brief Scoped lock idiom.
161 // Acquire the mutex here with a constructor call, then release with
162 // the destructor call in accordance with RAII style.
166 typedef __mutex mutex_type;
169 mutex_type& _M_device;
171 __scoped_lock(const __scoped_lock&);
172 __scoped_lock& operator=(const __scoped_lock&);
175 explicit __scoped_lock(mutex_type& __name) : _M_device(__name)
176 { _M_device.lock(); }
178 ~__scoped_lock() throw()
179 { _M_device.unlock(); }
182 _GLIBCXX_END_NAMESPACE