1 /* Copyright (C) 2011 Free Software Foundation, Inc.
2 Contributed by Torvald Riegel <triegel@redhat.com>.
4 This file is part of the GNU Transactional Memory Library (libitm).
6 Libitm is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
31 // This group consists of all TM methods that synchronize via just a single
32 // global lock (or ownership record).
33 struct gl_mg : public method_group
35 static const gtm_word LOCK_BIT = (~(gtm_word)0 >> 1) + 1;
36 // We can't use the full bitrange because ~0 in gtm_thread::shared_state has
38 static const gtm_word VERSION_MAX = (~(gtm_word)0 >> 1) - 1;
39 static bool is_locked(gtm_word l) { return l & LOCK_BIT; }
40 static gtm_word set_locked(gtm_word l) { return l | LOCK_BIT; }
41 static gtm_word clear_locked(gtm_word l) { return l & ~LOCK_BIT; }
43 // The global ownership record.
44 atomic<gtm_word> orec;
48 orec.store(0, memory_order_relaxed);
50 virtual void fini() { }
56 // The global lock, write-through TM method.
57 // Acquires the orec eagerly before the first write, and then writes through.
58 // Reads abort if the global orec's version number changed or if it is locked.
59 // Currently, writes require undo-logging to prevent deadlock between the
60 // serial lock and the global orec (writer txn acquires orec, reader txn
61 // upgrades to serial and waits for all other txns, writer tries to upgrade to
62 // serial too but cannot, writer cannot abort either, deadlock). We could
63 // avoid this if the serial lock would allow us to prevent other threads from
64 // going to serial mode, but this probably is too much additional complexity
65 // just to optimize this TM method.
66 // gtm_thread::shared_state is used to store a transaction's current
67 // snapshot time (or commit time). The serial lock uses ~0 for inactive
68 // transactions and 0 for active ones. Thus, we always have a meaningful
69 // timestamp in shared_state that can be used to implement quiescence-based
70 // privatization safety. This even holds if a writing transaction has the
71 // lock bit set in its shared_state because this is fine for both the serial
72 // lock (the value will be smaller than ~0) and privatization safety (we
73 // validate that no other update transaction comitted before we acquired the
74 // orec, so we have the most recent timestamp and no other transaction can
75 // commit until we have committed).
76 // However, we therefore cannot use this method for a serial transaction
77 // (because shared_state needs to remain at ~0) and we have to be careful
78 // when switching to serial mode (see the special handling in trycommit() and
80 // ??? This sharing adds some complexity wrt. serial mode. Just use a separate
82 class gl_wt_dispatch : public abi_dispatch
85 static void pre_write(const void *addr, size_t len)
87 gtm_thread *tx = gtm_thr();
88 gtm_word v = tx->shared_state.load(memory_order_acquire);
89 if (unlikely(!gl_mg::is_locked(v)))
91 // Check for and handle version number overflow.
92 if (unlikely(v >= gl_mg::VERSION_MAX))
93 tx->restart(RESTART_INIT_METHOD_GROUP);
95 // CAS global orec from our snapshot time to the locked state.
96 // This validates that we have a consistent snapshot, which is also
97 // for making privatization safety work (see the class' comments).
98 gtm_word now = o_gl_mg.orec.load(memory_order_relaxed);
100 tx->restart(RESTART_VALIDATE_WRITE);
101 if (!o_gl_mg.orec.compare_exchange_strong (now, gl_mg::set_locked(now),
102 memory_order_acquire))
103 tx->restart(RESTART_LOCKED_WRITE);
105 // Set shared_state to new value.
106 tx->shared_state.store(gl_mg::set_locked(now), memory_order_release);
109 // TODO Ensure that this gets inlined: Use internal log interface and LTO.
113 static void validate()
115 // Check that snapshot is consistent. The barrier ensures that this
116 // happens after previous data loads. Recall that load cannot itself
117 // have memory_order_release.
118 gtm_thread *tx = gtm_thr();
119 atomic_thread_fence(memory_order_release);
120 gtm_word l = o_gl_mg.orec.load(memory_order_relaxed);
121 if (l != tx->shared_state.load(memory_order_relaxed))
122 tx->restart(RESTART_VALIDATE_READ);
125 template <typename V> static V load(const V* addr, ls_modifier mod)
127 // Read-for-write should be unlikely, but we need to handle it or will
128 // break later WaW optimizations.
129 if (unlikely(mod == RfW))
131 pre_write(addr, sizeof(V));
135 if (likely(mod != RaW))
140 template <typename V> static void store(V* addr, const V value,
143 if (unlikely(mod != WaW))
144 pre_write(addr, sizeof(V));
149 static void memtransfer_static(void *dst, const void* src, size_t size,
150 bool may_overlap, ls_modifier dst_mod, ls_modifier src_mod)
152 if ((dst_mod != WaW && src_mod != RaW)
153 && (dst_mod != NONTXNAL || src_mod == RfW))
154 pre_write(dst, size);
157 ::memcpy(dst, src, size);
159 ::memmove(dst, src, size);
161 if (src_mod != RfW && src_mod != RaW && src_mod != NONTXNAL
166 static void memset_static(void *dst, int c, size_t size, ls_modifier mod)
169 pre_write(dst, size);
170 ::memset(dst, c, size);
173 virtual gtm_restart_reason begin_or_restart()
175 // We don't need to do anything for nested transactions.
176 gtm_thread *tx = gtm_thr();
177 if (tx->parent_txns.size() > 0)
180 // Spin until global orec is not locked.
181 // TODO This is not necessary if there are no pure loads (check txn props).
186 v = o_gl_mg.orec.load(memory_order_acquire);
187 if (!gl_mg::is_locked(v))
189 // TODO need method-specific max spin count
190 if (++i > gtm_spin_count_var)
191 return RESTART_VALIDATE_READ;
195 // Everything is okay, we have a snapshot time.
196 // We don't need to enforce any ordering for the following store. There
197 // are no earlier data loads in this transaction, so the store cannot
198 // become visible before those (which could lead to the violation of
199 // privatization safety). The store can become visible after later loads
200 // but this does not matter because the previous value will have been
201 // smaller or equal (the serial lock will set shared_state to zero when
202 // marking the transaction as active, and restarts enforce immediate
203 // visibility of a smaller or equal value with a barrier (see
205 tx->shared_state.store(v, memory_order_relaxed);
209 virtual bool trycommit(gtm_word& priv_time)
211 gtm_thread* tx = gtm_thr();
212 gtm_word v = tx->shared_state.load(memory_order_acquire);
214 // Special case: If shared_state is ~0, then we have acquired the
215 // serial lock (tx->state is not updated yet). In this case, the previous
216 // value isn't available anymore, so grab it from the global lock, which
217 // must have a meaningful value because no other transactions are active
218 // anymore. In particular, if it is locked, then we are an update
219 // transaction, which is all we care about for commit.
220 if (v == ~(typeof v)0)
221 v = o_gl_mg.orec.load(memory_order_relaxed);
223 // Release the orec but do not reset shared_state, which will be modified
224 // by the serial lock right after our commit anyway. Also, resetting
225 // shared state here would interfere with the serial lock's use of this
227 if (gl_mg::is_locked(v))
229 // Release the global orec, increasing its version number / timestamp.
230 v = gl_mg::clear_locked(v) + 1;
231 o_gl_mg.orec.store(v, memory_order_release);
233 // Need to ensure privatization safety. Every other transaction must
234 // have a snapshot time that is at least as high as our commit time
235 // (i.e., our commit must be visible to them).
241 virtual void rollback(gtm_transaction_cp *cp)
243 // We don't do anything for rollbacks of nested transactions.
247 gtm_thread *tx = gtm_thr();
248 gtm_word v = tx->shared_state.load(memory_order_acquire);
249 // Special case: If shared_state is ~0, then we have acquired the
250 // serial lock (tx->state is not updated yet). In this case, the previous
251 // value isn't available anymore, so grab it from the global lock, which
252 // must have a meaningful value because no other transactions are active
253 // anymore. In particular, if it is locked, then we are an update
254 // transaction, which is all we care about for rollback.
255 bool is_serial = v == ~(typeof v)0;
257 v = o_gl_mg.orec.load(memory_order_relaxed);
259 // Release lock and increment version number to prevent dirty reads.
260 // Also reset shared state here, so that begin_or_restart() can expect a
261 // value that is correct wrt. privatization safety.
262 if (gl_mg::is_locked(v))
264 // Release the global orec, increasing its version number / timestamp.
265 v = gl_mg::clear_locked(v) + 1;
266 o_gl_mg.orec.store(v, memory_order_release);
268 // Also reset the timestamp published via shared_state.
269 // Special case: Only do this if we are not a serial transaction
270 // because otherwise, we would interfere with the serial lock.
272 tx->shared_state.store(v, memory_order_relaxed);
274 // We need a store-load barrier after this store to prevent it
275 // from becoming visible after later data loads because the
276 // previous value of shared_state has been higher than the actual
277 // snapshot time (the lock bit had been set), which could break
278 // privatization safety. We do not need a barrier before this
279 // store (see pre_write() for an explanation).
280 atomic_thread_fence(memory_order_acq_rel);
285 CREATE_DISPATCH_METHODS(virtual, )
286 CREATE_DISPATCH_METHODS_MEM()
288 gl_wt_dispatch() : abi_dispatch(false, true, false, false, &o_gl_mg)
294 static const gl_wt_dispatch o_gl_wt_dispatch;
297 GTM::dispatch_gl_wt ()
299 return const_cast<gl_wt_dispatch *>(&o_gl_wt_dispatch);