1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
34 /* All PA-RISC implementations supported by linux have strongly
35 ordered loads and stores. Only cache flushes and purges can be
36 delayed. The data cache implementations are all globally
37 coherent. Thus, there is no need to synchonize memory accesses.
39 GCC automatically issues a asm memory barrier when it encounters
40 a __sync_synchronize builtin. Thus, we do not need to define this
43 We implement byte, short and int versions of each atomic operation
44 using the kernel helper defined below. There is no support for
45 64-bit operations yet. */
47 /* A privileged instruction to crash a userspace program with SIGILL. */
48 #define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)")
50 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
51 #define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1)
53 /* Kernel helper for compare-and-exchange a 32-bit value. */
55 __kernel_cmpxchg (int oldval, int newval, int *mem)
57 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
58 register long lws_ret asm("r28");
59 register long lws_errno asm("r21");
60 register int lws_old asm("r25") = oldval;
61 register int lws_new asm("r24") = newval;
62 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
64 : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem),
65 "=r" (lws_old), "=r" (lws_new)
66 : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new)
67 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
69 if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
74 #define HIDDEN __attribute__ ((visibility ("hidden")))
76 /* Big endian masks */
77 #define INVERT_MASK_1 24
78 #define INVERT_MASK_2 16
81 #define MASK_2 0xffffu
83 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
85 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
91 failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
92 } while (failure != 0); \
97 FETCH_AND_OP_WORD (add, , +)
98 FETCH_AND_OP_WORD (sub, , -)
99 FETCH_AND_OP_WORD (or, , |)
100 FETCH_AND_OP_WORD (and, , &)
101 FETCH_AND_OP_WORD (xor, , ^)
102 FETCH_AND_OP_WORD (nand, ~, &)
104 #define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
105 #define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
107 /* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
108 subword-sized quantities. */
110 #define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
112 NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
114 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
115 unsigned int mask, shift, oldval, newval; \
118 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
119 mask = MASK_##WIDTH << shift; \
123 newval = ((PFX_OP ((oldval & mask) >> shift) \
124 INF_OP (unsigned int) val) << shift) & mask; \
125 newval |= oldval & ~mask; \
126 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
127 } while (failure != 0); \
129 return (RETURN & mask) >> shift; \
132 SUBWORD_SYNC_OP (add, , +, short, 2, oldval)
133 SUBWORD_SYNC_OP (sub, , -, short, 2, oldval)
134 SUBWORD_SYNC_OP (or, , |, short, 2, oldval)
135 SUBWORD_SYNC_OP (and, , &, short, 2, oldval)
136 SUBWORD_SYNC_OP (xor, , ^, short, 2, oldval)
137 SUBWORD_SYNC_OP (nand, ~, &, short, 2, oldval)
139 SUBWORD_SYNC_OP (add, , +, char, 1, oldval)
140 SUBWORD_SYNC_OP (sub, , -, char, 1, oldval)
141 SUBWORD_SYNC_OP (or, , |, char, 1, oldval)
142 SUBWORD_SYNC_OP (and, , &, char, 1, oldval)
143 SUBWORD_SYNC_OP (xor, , ^, char, 1, oldval)
144 SUBWORD_SYNC_OP (nand, ~, &, char, 1, oldval)
146 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
148 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
154 failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
155 } while (failure != 0); \
157 return PFX_OP tmp INF_OP val; \
160 OP_AND_FETCH_WORD (add, , +)
161 OP_AND_FETCH_WORD (sub, , -)
162 OP_AND_FETCH_WORD (or, , |)
163 OP_AND_FETCH_WORD (and, , &)
164 OP_AND_FETCH_WORD (xor, , ^)
165 OP_AND_FETCH_WORD (nand, ~, &)
167 SUBWORD_SYNC_OP (add, , +, short, 2, newval)
168 SUBWORD_SYNC_OP (sub, , -, short, 2, newval)
169 SUBWORD_SYNC_OP (or, , |, short, 2, newval)
170 SUBWORD_SYNC_OP (and, , &, short, 2, newval)
171 SUBWORD_SYNC_OP (xor, , ^, short, 2, newval)
172 SUBWORD_SYNC_OP (nand, ~, &, short, 2, newval)
174 SUBWORD_SYNC_OP (add, , +, char, 1, newval)
175 SUBWORD_SYNC_OP (sub, , -, char, 1, newval)
176 SUBWORD_SYNC_OP (or, , |, char, 1, newval)
177 SUBWORD_SYNC_OP (and, , &, char, 1, newval)
178 SUBWORD_SYNC_OP (xor, , ^, char, 1, newval)
179 SUBWORD_SYNC_OP (nand, ~, &, char, 1, newval)
182 __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
184 int actual_oldval, fail;
188 actual_oldval = *ptr;
190 if (oldval != actual_oldval)
191 return actual_oldval;
193 fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
200 #define SUBWORD_VAL_CAS(TYPE, WIDTH) \
202 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
205 int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \
206 unsigned int mask, shift, actual_oldval, actual_newval; \
208 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
209 mask = MASK_##WIDTH << shift; \
213 actual_oldval = *wordptr; \
215 if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
216 return (actual_oldval & mask) >> shift; \
218 actual_newval = (actual_oldval & ~mask) \
219 | (((unsigned int) newval << shift) & mask); \
221 fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
229 SUBWORD_VAL_CAS (short, 2)
230 SUBWORD_VAL_CAS (char, 1)
232 typedef unsigned char bool;
235 __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
237 int failure = __kernel_cmpxchg (oldval, newval, ptr);
238 return (failure == 0);
241 #define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
243 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
247 = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
248 return (oldval == actual_oldval); \
251 SUBWORD_BOOL_CAS (short, 2)
252 SUBWORD_BOOL_CAS (char, 1)
255 __sync_lock_test_and_set_4 (int *ptr, int val)
261 failure = __kernel_cmpxchg (oldval, val, ptr);
262 } while (failure != 0);
267 #define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
269 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
272 unsigned int oldval, newval, shift, mask; \
273 int *wordptr = (int *) ((unsigned long) ptr & ~3); \
275 shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
276 mask = MASK_##WIDTH << shift; \
280 newval = (oldval & ~mask) \
281 | (((unsigned int) val << shift) & mask); \
282 failure = __kernel_cmpxchg (oldval, newval, wordptr); \
283 } while (failure != 0); \
285 return (oldval & mask) >> shift; \
288 SUBWORD_TEST_AND_SET (short, 2)
289 SUBWORD_TEST_AND_SET (char, 1)
291 #define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
293 __sync_lock_release_##WIDTH (TYPE *ptr) \
298 SYNC_LOCK_RELEASE (int, 4)
299 SYNC_LOCK_RELEASE (short, 2)
300 SYNC_LOCK_RELEASE (char, 1)