see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-/*
- This file implements objc_sync_enter() and objc_sync_exit(), the
- two functions required to support @synchronized().
+/* This file implements objc_sync_enter() and objc_sync_exit(), the
+ two functions required to support @synchronized().
- objc_sync_enter(object) needs to get a recursive lock associated
- with 'object', and lock it.
-
- objc_sync_exit(object) needs to get the recursive lock associated
- with 'object', and unlock it.
- */
+ objc_sync_enter(object) needs to get a recursive lock associated
+ with 'object', and lock it.
+
+ objc_sync_exit(object) needs to get the recursive lock associated
+ with 'object', and unlock it. */
/* To avoid the overhead of continuously allocating and deallocating
locks, we implement a pool of locks. When a lock is needed for an
which is already held by the current thread without having to use
any protection lock or synchronization mechanism. It can so detect
recursive locks/unlocks, and transform them into no-ops that
- require no actual locking or synchronization mechanisms at all.
-*/
+ require no actual locking or synchronization mechanisms at all. */
/* You can disable the thread-local cache (most likely to benchmark
the code with and without it) by compiling with
- -DSYNC_CACHE_DISABLE, or commenting out the following line.
- */
+ -DSYNC_CACHE_DISABLE, or commenting out the following line. */
/* #define SYNC_CACHE_DISABLE */
/* If thread-local storage is not available, automatically disable the
- cache.
-*/
+ cache. */
#ifndef HAVE_TLS
# define SYNC_CACHE_DISABLE
#endif
/* We have 32 pools of locks, each of them protected by its own
protection lock. It's tempting to increase this number to reduce
- contention; but in our tests it is high enough.
- */
+ contention; but in our tests it is high enough. */
#define SYNC_NUMBER_OF_POOLS 32
/* Given an object, it determines which pool contains the associated
- lock.
- */
+ lock. */
#define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
/* The locks protecting each pool. */
because in that case you know that node->usage_count can't get to
zero until you release the lock. It is valid to have usage_count
== 0 and object != nil; in that case, the lock is not currently
- being used, but is still currently associated with the object.
- */
+ being used, but is still currently associated with the
+ object. */
id object;
/* This is a counter reserved for use by the thread currently
require any synchronization with other threads, since it's
protected by the node->lock itself) instead of the usage_count
(which requires locking the pool protection lock). And it can
- skip the call to objc_mutex_lock/unlock too.
- */
+ skip the call to objc_mutex_lock/unlock too. */
unsigned int recursive_usage_count;
} *lock_node_ptr;
/* The pools of locks. Each of them is a linked list of lock_nodes.
- In the list we keep both unlocked and locked nodes.
- */
+ In the list we keep both unlocked and locked nodes. */
static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
#ifndef SYNC_CACHE_DISABLE
/* We store a cache of locks acquired by each thread in thread-local
- storage.
-*/
+ storage. */
static __thread lock_node_ptr *lock_cache = NULL;
/* This is a conservative implementation that uses a static array of
first 8 get the speed benefits of the cache, but the cache remains
always small, fast and predictable.
- SYNC_CACHE_SIZE is the size of the lock cache for each thread.
- */
+ SYNC_CACHE_SIZE is the size of the lock cache for each thread. */
#define SYNC_CACHE_SIZE 8
#endif /* SYNC_CACHE_DISABLE */
lock_node_ptr unused_node;
if (object == nil)
- {
- return OBJC_SYNC_SUCCESS;
- }
+ return OBJC_SYNC_SUCCESS;
#ifndef SYNC_CACHE_DISABLE
if (lock_cache == NULL)
{
/* Note that this calloc only happen only once per thread, the
- very first time a thread does a objc_sync_enter().
- */
+ very first time a thread does a objc_sync_enter(). */
lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
}
/* Check the cache to see if we have a record of having already
locked the lock corresponding to this object. While doing so,
- keep track of the first free cache node in case we need it later.
- */
+ keep track of the first free cache node in case we need it
+ later. */
node = NULL;
free_cache_slot = -1;
if (locked_node == NULL)
{
if (free_cache_slot == -1)
- {
- free_cache_slot = i;
- }
+ free_cache_slot = i;
}
else if (locked_node->object == object)
{
if (node != NULL)
{
/* We found the lock. Increase recursive_usage_count, which is
- protected by node->lock, which we already hold.
- */
+ protected by node->lock, which we already hold. */
node->recursive_usage_count++;
/* There is no need to actually lock anything, since we already
hold the lock. Correspondingly, objc_sync_exit() will just
- decrease recursive_usage_count and do nothing to unlock.
- */
+ decrease recursive_usage_count and do nothing to unlock. */
return OBJC_SYNC_SUCCESS;
}
#endif /* SYNC_CACHE_DISABLE */
/* The following is the standard lookup for the lock in the standard
- pool lock. It requires a pool protection lock.
- */
+ pool lock. It requires a pool protection lock. */
hash = SYNC_OBJECT_HASH(object);
/* Search for an existing lock for 'object'. While searching, make
- note of any unused lock if we find any.
- */
+ note of any unused lock if we find any. */
unused_node = NULL;
objc_mutex_lock (sync_pool_protection_locks[hash]);
#ifndef SYNC_CACHE_DISABLE
/* Put it in the cache. */
if (free_cache_slot != -1)
- {
- lock_cache[free_cache_slot] = node;
- }
+ lock_cache[free_cache_slot] = node;
#endif
/* Lock it. */
#ifndef SYNC_CACHE_DISABLE
if (free_cache_slot != -1)
- {
- lock_cache[free_cache_slot] = unused_node;
- }
+ lock_cache[free_cache_slot] = unused_node;
#endif
objc_mutex_lock (unused_node->lock);
#ifndef SYNC_CACHE_DISABLE
if (free_cache_slot != -1)
- {
- lock_cache[free_cache_slot] = new_node;
- }
+ lock_cache[free_cache_slot] = new_node;
#endif
objc_mutex_lock (new_node->lock);
lock_node_ptr node;
if (object == nil)
- {
- return OBJC_SYNC_SUCCESS;
- }
+ return OBJC_SYNC_SUCCESS;
#ifndef SYNC_CACHE_DISABLE
if (lock_cache != NULL)
/* Note that, if a node was found in the cache, the variable i
now holds the index where it was found, which will be used to
remove it from the cache. */
-
if (node != NULL)
{
if (node->recursive_usage_count > 0)
hash = SYNC_OBJECT_HASH(object);
/* TODO: If we had atomic increase/decrease operations
- with memory barriers, we could avoid the lock here!
- */
+ with memory barriers, we could avoid the lock
+ here! */
objc_mutex_lock (sync_pool_protection_locks[hash]);
node->usage_count--;
/* Normally, we do not reset object to nil here. We'll
object from being released. In that case, we remove
it (TODO: maybe we should avoid using the garbage
collector at all ? Nothing is ever deallocated in
- this file).
- */
+ this file). */
#if OBJC_WITH_GC
node->object = nil;
#endif
objc_mutex_unlock (node->lock), the pool is unlocked
so other threads may allocate this same lock to
another object (!). This is not a problem, but it is
- curious.
- */
+ curious. */
objc_mutex_unlock (node->lock);
/* Remove the node from the cache. */
objc_mutex_unlock (node->lock);
/* No need to remove the node from the cache, since it
- wasn't found in the cache when we looked for it!
- */
-
+ wasn't found in the cache when we looked for it! */
return OBJC_SYNC_SUCCESS;
}