#include <string.h>
#define FPSCR_SR (1 << 20)
-#define STORE_FPSCR(x) __asm__ volatile("sts fpscr, %0" : "=r"(x))
-#define LOAD_FPSCR(x) __asm__ volatile("lds %0, fpscr" : : "r"(x))
+#define STORE_FPSCR(x) __asm__ __volatile__("sts fpscr, %0" : "=r"(x))
+#define LOAD_FPSCR(x) __asm__ __volatile__("lds %0, fpscr" : : "r"(x))
static void fpu_optimised_copy_fwd(void *dest, const void *src, size_t len)
{
LOAD_FPSCR(FPSCR_SR);
while (len >= 32) {
- __asm__ volatile ("fmov @%0+,dr0":"+r" (s1));
- __asm__ volatile ("fmov @%0+,dr2":"+r" (s1));
- __asm__ volatile ("fmov @%0+,dr4":"+r" (s1));
- __asm__ volatile ("fmov @%0+,dr6":"+r" (s1));
+ __asm__ __volatile__ ("fmov @%0+,dr0":"+r" (s1));
+ __asm__ __volatile__ ("fmov @%0+,dr2":"+r" (s1));
+ __asm__ __volatile__ ("fmov @%0+,dr4":"+r" (s1));
+ __asm__ __volatile__ ("fmov @%0+,dr6":"+r" (s1));
__asm__
- volatile ("fmov dr0,@%0"::"r"
+ __volatile__ ("fmov dr0,@%0"::"r"
(d1):"memory");
d1 += 2;
__asm__
- volatile ("fmov dr2,@%0"::"r"
+ __volatile__ ("fmov dr2,@%0"::"r"
(d1):"memory");
d1 += 2;
__asm__
- volatile ("fmov dr4,@%0"::"r"
+ __volatile__ ("fmov dr4,@%0"::"r"
(d1):"memory");
d1 += 2;
__asm__
- volatile ("fmov dr6,@%0"::"r"
+ __volatile__ ("fmov dr6,@%0"::"r"
(d1):"memory");
d1 += 2;
len -= 32;
__inline_mathcodeNP (floor, __x, \
register long double __value; \
- __volatile unsigned short int __cw; \
- __volatile unsigned short int __cwtmp; \
+ __volatile__ unsigned short int __cw; \
+ __volatile__ unsigned short int __cwtmp; \
__asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \
__cwtmp = (__cw & 0xf3ff) | 0x0400; /* rounding down */ \
__asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \
__inline_mathcodeNP (ceil, __x, \
register long double __value; \
- __volatile unsigned short int __cw; \
- __volatile unsigned short int __cwtmp; \
+ __volatile__ unsigned short int __cw; \
+ __volatile__ unsigned short int __cwtmp; \
__asm__ __volatile__ ("fnstcw %0" : "=m" (__cw)); \
__cwtmp = (__cw & 0xf3ff) | 0x0800; /* rounding up */ \
__asm__ __volatile__ ("fldcw %0" : : "m" (__cwtmp)); \
register long _r15 __asm__ ("r15") = name; \
long _retval; \
LOAD_REGS_##nr \
- __asm __volatile (BREAK_INSN (__IA64_BREAK_SYSCALL) \
+ __asm__ __volatile__ (BREAK_INSN (__IA64_BREAK_SYSCALL) \
: "=r" (_r8), "=r" (_r10), "=r" (_r15) \
ASM_OUTARGS_##nr \
: "2" (_r15) ASM_ARGS_##nr \
{ \
register long __v0 __asm__("$2") ncs_init; \
register long __a3 __asm__("$7"); \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
cs_init \
"syscall\n\t" \
register long __v0 __asm__("$2") ncs_init; \
register long __a0 __asm__("$4") = (long) arg1; \
register long __a3 __asm__("$7"); \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
cs_init \
"syscall\n\t" \
register long __a0 __asm__("$4") = (long) arg1; \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a3 __asm__("$7"); \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
cs_init \
"syscall\n\t" \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a2 __asm__("$6") = (long) arg3; \
register long __a3 __asm__("$7"); \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
cs_init \
"syscall\n\t" \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a2 __asm__("$6") = (long) arg3; \
register long __a3 __asm__("$7") = (long) arg4; \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
cs_init \
"syscall\n\t" \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a2 __asm__("$6") = (long) arg3; \
register long __a3 __asm__("$7") = (long) arg4; \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
"subu\t$29, 32\n\t" \
"sw\t%6, 16($29)\n\t" \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a2 __asm__("$6") = (long) arg3; \
register long __a3 __asm__("$7") = (long) arg4; \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
"subu\t$29, 32\n\t" \
"sw\t%6, 16($29)\n\t" \
register long __a1 __asm__("$5") = (long) arg2; \
register long __a2 __asm__("$6") = (long) arg3; \
register long __a3 __asm__("$7") = (long) arg4; \
- __asm__ volatile ( \
+ __asm__ __volatile__ ( \
".set\tnoreorder\n\t" \
"subu\t$29, 32\n\t" \
"sw\t%6, 16($29)\n\t" \
* So if the build is using -mcpu=[power4,power5,power5+,970] we can
* safely use lwsync.
*/
-# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
+# define atomic_read_barrier() __asm__ ("lwsync" ::: "memory")
/*
* "light weight" sync can also be used for the release barrier.
*/
#define __arch_compare_and_exchange_n(mem, newval, oldval, bwl, version) \
({ signed long __result; \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
*/
#define __arch_operate_old_new_n(mem, value, old, new, bwl, oper) \
- (void) ({ __asm __volatile ("\
+ (void) ({ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
mov r15,r1\n\
#define __arch_operate_new_n(mem, value, bwl, oper) \
({ int32_t __value = (value), __new; \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
mov r15,r1\n\
register long int r3 __asm__ ("%r3") = (name); \
SUBSTITUTE_ARGS_##nr(args); \
\
- __asm__ volatile (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \
+ __asm__ __volatile__ (SYSCALL_INST_STR##nr SYSCALL_INST_PAD \
: "=z" (resultvar) \
: "r" (r3) ASMFMT_##nr \
: "memory"); \
unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \
& 63; \
do \
- __asm __volatile ("ldstub %1, %0" \
+ __asm__ __volatile__ ("ldstub %1, %0" \
: "=r" (__old_lock), \
"=m" (__sparc32_atomic_locks[__idx]) \
: "m" (__sparc32_atomic_locks[__idx]) \
{ \
__sparc32_atomic_locks[(((long) addr >> 2) \
^ ((long) addr >> 12)) & 63] = 0; \
- __asm __volatile ("" ::: "memory"); \
+ __asm__ __volatile__ ("" ::: "memory"); \
} \
while (0)
{ \
unsigned int __old_lock; \
do \
- __asm __volatile ("ldstub %1, %0" \
+ __asm__ __volatile__ ("ldstub %1, %0" \
: "=r" (__old_lock), "=m" (*(addr)) \
: "m" (*(addr)) \
: "memory"); \
do \
{ \
*(char *) (addr) = 0; \
- __asm __volatile ("" ::: "memory"); \
+ __asm__ __volatile__ ("" ::: "memory"); \
} \
while (0)
#ifndef SHARED
# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ \
- register __typeof (*(mem)) __acev_tmp __asm ("%g6"); \
- register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \
- register __typeof (*(mem)) __acev_oldval __asm ("%g5"); \
+ register __typeof (*(mem)) __acev_tmp __asm__ ("%g6"); \
+ register __typeof (mem) __acev_mem __asm__ ("%g1") = (mem); \
+ register __typeof (*(mem)) __acev_oldval __asm__ ("%g5"); \
__acev_tmp = (newval); \
__acev_oldval = (oldval); \
/* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \
because as will then mark the object file as V8+ arch. */ \
- __asm __volatile (".word 0xcde05005" \
+ __asm__ __volatile__ (".word 0xcde05005" \
: "+r" (__acev_tmp), "=m" (*__acev_mem) \
: "r" (__acev_oldval), "m" (*__acev_mem), \
"r" (__acev_mem) : "memory"); \
*__acev_memp = __acev_newval; \
else \
__sparc32_atomic_do_unlock24 (__acev_memp); \
- __asm __volatile ("" ::: "memory"); \
+ __asm__ __volatile__ ("" ::: "memory"); \
__acev_ret; })
#define __v7_exchange_24_rel(mem, newval) \
__sparc32_atomic_do_lock24 (__acev_memp); \
__acev_ret = *__acev_memp & 0xffffff; \
*__acev_memp = __acev_newval; \
- __asm __volatile ("" ::: "memory"); \
+ __asm__ __volatile__ ("" ::: "memory"); \
__acev_ret; })
#ifdef SHARED
register long __o3 __asm__ ("o3") = (long)(arg4); \
register long __o4 __asm__ ("o4") = (long)(arg5); \
register long __g1 __asm__ ("g1") = __NR_clone; \
- __asm __volatile (__CLONE_SYSCALL_STRING : \
+ __asm__ __volatile__ (__CLONE_SYSCALL_STRING : \
"=r" (__g1), "=r" (__o0), "=r" (__o1) : \
"0" (__g1), "1" (__o0), "2" (__o1), \
"r" (__o2), "r" (__o3), "r" (__o4) : \
long double func##l(long double x) \
{ \
long double st_top; \
- __asm ( \
+ __asm__ ( \
" fldt %1\n" \
" fstpl %1\n" \
" jmp " __stringify(__GI_##func) "\n" \
int func##l(long double x) \
{ \
int ret; \
- __asm ( \
+ __asm__ ( \
" fldt %1\n" \
" fstpl %1\n" \
" jmp " __stringify(__GI_##func) "\n" \
long func##l(long double x) \
{ \
long ret; \
- __asm ( \
+ __asm__ ( \
" fldt %1\n" \
" fstpl %1\n" \
" jmp " __stringify(__GI_##func) "\n" \
long long func##l(long double x) \
{ \
long long ret; \
- __asm ( \
+ __asm__ ( \
" fldt %1\n" \
" fstpl %1\n" \
" jmp " __stringify(__GI_##func) "\n" \
#define math_opt_barrier(x) ({ \
__typeof(x) __x = (x); \
/* "t": load x into top-of-stack fpreg */ \
- __asm ("" : "=t" (__x) : "0" (__x)); \
+ __asm__ ("" : "=t" (__x) : "0" (__x)); \
__x; \
})
#define math_force_eval(x) do { \
__typeof(x) __x = (x); \
if (sizeof(__x) <= sizeof(double)) \
/* "m": store x into a memory location */ \
- __asm __volatile ("" : : "m" (__x)); \
+ __asm__ __volatile__ ("" : : "m" (__x)); \
else /* long double */ \
/* "f": load x into (any) fpreg */ \
- __asm __volatile ("" : : "f" (__x)); \
+ __asm__ __volatile__ ("" : : "f" (__x)); \
} while (0)
#endif
__typeof(x) __x = (x); \
if (sizeof(__x) <= sizeof(double)) \
/* "x": load into XMM SSE register */ \
- __asm ("" : "=x" (__x) : "0" (__x)); \
+ __asm__ ("" : "=x" (__x) : "0" (__x)); \
else /* long double */ \
/* "t": load x into top-of-stack fpreg */ \
- __asm ("" : "=t" (__x) : "0" (__x)); \
+ __asm__ ("" : "=t" (__x) : "0" (__x)); \
__x; \
})
#define math_force_eval(x) do { \
__typeof(x) __x = (x); \
if (sizeof(__x) <= sizeof(double)) \
/* "x": load into XMM SSE register */ \
- __asm __volatile ("" : : "x" (__x)); \
+ __asm__ __volatile__ ("" : : "x" (__x)); \
else /* long double */ \
/* "f": load x into (any) fpreg */ \
- __asm __volatile ("" : : "f" (__x)); \
+ __asm__ __volatile__ ("" : : "f" (__x)); \
} while (0)
#endif
/* Default implementations force store to a memory location */
#ifndef math_opt_barrier
-#define math_opt_barrier(x) ({ __typeof(x) __x = (x); __asm ("" : "+m" (__x)); __x; })
+#define math_opt_barrier(x) ({ __typeof(x) __x = (x); __asm__ ("" : "+m" (__x)); __x; })
#endif
#ifndef math_force_eval
-#define math_force_eval(x) do { __typeof(x) __x = (x); __asm __volatile ("" : : "m" (__x)); } while (0)
+#define math_force_eval(x) do { __typeof(x) __x = (x); __asm__ __volatile__ ("" : : "m" (__x)); } while (0)
#endif
following code ahead of the __libc_setup_tls call. This function
will initialize the thread register which is subsequently
used. */
- __asm __volatile ("");
+ __asm__ __volatile__ ("");
#endif
/* Minimal initialization of the thread descriptor. */
/* Macros to load from and store into segment registers. We can use
the 32-bit instructions. */
#define TLS_GET_GS() \
- ({ int __seg; __asm ("movl %%gs, %0" : "=q" (__seg)); __seg; })
+ ({ int __seg; __asm__ ("movl %%gs, %0" : "=q" (__seg)); __seg; })
#define TLS_SET_GS(val) \
- __asm ("movl %0, %%gs" :: "q" (val))
+ __asm__ ("movl %0, %%gs" :: "q" (val))
/* Get the full set of definitions. */
#define __exit_thread_inline(val) \
while (1) { \
if (__builtin_constant_p (val) && (val) == 0) \
- __asm__ volatile ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit)); \
+ __asm__ __volatile__ ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit)); \
else \
- __asm__ volatile ("movl %1, %%ebx; int $0x80" \
+ __asm__ __volatile__ ("movl %1, %%ebx; int $0x80" \
:: "a" (__NR_exit), "r" (val)); \
}
_segdescr.vals[3] = 0x51; \
\
/* Install the TLS. */ \
- __asm__ volatile (TLS_LOAD_EBX \
+ __asm__ __volatile__ (TLS_LOAD_EBX \
"int $0x80\n\t" \
TLS_LOAD_EBX \
: "=a" (_result), "=m" (_segdescr.desc.entry_number) \
/* Return the thread descriptor for the current thread.
- The contained asm must *not* be marked volatile since otherwise
+ The contained asm must *not* be marked __volatile__ since otherwise
assignments like
pthread_descr self = thread_self();
do not get optimized away. */
# define THREAD_GETMEM(descr, member) \
({ __typeof (descr->member) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%gs:%P2,%b0" \
+ __asm__ __volatile__ ("movb %%gs:%P2,%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member))); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%gs:%P1,%0" \
+ __asm__ __volatile__ ("movl %%gs:%P1,%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member))); \
else \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movl %%gs:%P1,%%eax\n\t" \
+ __asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t" \
"movl %%gs:%P2,%%edx" \
: "=A" (__value) \
: "i" (offsetof (struct pthread, member)), \
# define THREAD_GETMEM_NC(descr, member, idx) \
({ __typeof (descr->member[0]) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%gs:%P2(%3),%b0" \
+ __asm__ __volatile__ ("movb %%gs:%P2(%3),%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
+ __asm__ __volatile__ ("movl %%gs:%P1(,%2,4),%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
+ __asm__ __volatile__ ("movl %%gs:%P1(,%2,8),%%eax\n\t" \
"movl %%gs:4+%P1(,%2,8),%%edx" \
: "=&A" (__value) \
: "i" (offsetof (struct pthread, member[0])), \
/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
# define THREAD_SETMEM(descr, member, value) \
({ if (sizeof (descr->member) == 1) \
- __asm__ volatile ("movb %b0,%%gs:%P1" : \
+ __asm__ __volatile__ ("movb %b0,%%gs:%P1" : \
: "iq" (value), \
"i" (offsetof (struct pthread, member))); \
else if (sizeof (descr->member) == 4) \
- __asm__ volatile ("movl %0,%%gs:%P1" : \
+ __asm__ __volatile__ ("movl %0,%%gs:%P1" : \
: "ir" (value), \
"i" (offsetof (struct pthread, member))); \
else \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movl %%eax,%%gs:%P1\n\t" \
+ __asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\t" \
"movl %%edx,%%gs:%P2" : \
: "A" (value), \
"i" (offsetof (struct pthread, member)), \
/* Set member of the thread descriptor directly. */
# define THREAD_SETMEM_NC(descr, member, idx, value) \
({ if (sizeof (descr->member[0]) == 1) \
- __asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
+ __asm__ __volatile__ ("movb %b0,%%gs:%P1(%2)" : \
: "iq" (value), \
"i" (offsetof (struct pthread, member)), \
"r" (idx)); \
else if (sizeof (descr->member[0]) == 4) \
- __asm__ volatile ("movl %0,%%gs:%P1(,%2,4)" : \
+ __asm__ __volatile__ ("movl %0,%%gs:%P1(,%2,4)" : \
: "ir" (value), \
"i" (offsetof (struct pthread, member)), \
"r" (idx)); \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
+ __asm__ __volatile__ ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
"movl %%edx,%%gs:4+%P1(,%2,8)" : \
: "A" (value), \
"i" (offsetof (struct pthread, member)), \
({ __typeof (descr->member) __ret; \
__typeof (oldval) __old = (oldval); \
if (sizeof (descr->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
+ __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3" \
: "=a" (__ret) \
: "0" (__old), "r" (newval), \
"i" (offsetof (struct pthread, member))); \
/* Atomic logical and. */
#define THREAD_ATOMIC_AND(descr, member, val) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0" \
+ __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%gs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (val)); \
else \
/* Atomic set bit. */
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "orl %1, %%gs:%P0" \
+ __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%gs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (1 << (bit))); \
else \
#define CALL_THREAD_FCT(descr) \
({ void *__res; \
int __ignore1, __ignore2; \
- __asm__ volatile ("pushl %%eax\n\t" \
+ __asm__ __volatile__ ("pushl %%eax\n\t" \
"pushl %%eax\n\t" \
"pushl %%eax\n\t" \
"pushl %%gs:%P4\n\t" \
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res; \
- __asm__ volatile ("xchgl %0, %%gs:%P1" \
+ __asm__ __volatile__ ("xchgl %0, %%gs:%P1" \
: "=r" (__res) \
: "i" (offsetof (struct pthread, header.gscope_flag)), \
"0" (THREAD_GSCOPE_FLAG_UNUSED)); \
#define SECTION(x) __asm__ (".section " x )
/* Embed an #include to pull in the alignment and .end directives. */
-asm ("\n#include \"defs.h\"");
+__asm__ ("\n#include \"defs.h\"");
/* The initial common code ends here. */
-asm ("\n/*@HEADER_ENDS*/");
+__asm__ ("\n/*@HEADER_ENDS*/");
/* To determine whether we need .end and .align: */
-asm ("\n/*@TESTS_BEGIN*/");
+__asm__ ("\n/*@TESTS_BEGIN*/");
extern void dummy (void (*foo) (void));
void
dummy (void (*foo) (void))
if (foo)
(*foo) ();
}
-asm ("\n/*@TESTS_END*/");
+__asm__ ("\n/*@TESTS_END*/");
/* The beginning of _init: */
-asm ("\n/*@_init_PROLOG_BEGINS*/");
+__asm__ ("\n/*@_init_PROLOG_BEGINS*/");
static void
call_initialize_minimal (void)
/* The very first thing we must do is to set up the registers. */
call_initialize_minimal ();
- asm ("ALIGN");
- asm("END_INIT");
+ __asm__ ("ALIGN");
+ __asm__("END_INIT");
/* Now the epilog. */
- asm ("\n/*@_init_PROLOG_ENDS*/");
- asm ("\n/*@_init_EPILOG_BEGINS*/");
+ __asm__ ("\n/*@_init_PROLOG_ENDS*/");
+ __asm__ ("\n/*@_init_EPILOG_BEGINS*/");
SECTION(".init");
}
-asm ("END_INIT");
+__asm__ ("END_INIT");
/* End of the _init epilog, beginning of the _fini prolog. */
-asm ("\n/*@_init_EPILOG_ENDS*/");
-asm ("\n/*@_fini_PROLOG_BEGINS*/");
+__asm__ ("\n/*@_init_EPILOG_ENDS*/");
+__asm__ ("\n/*@_fini_PROLOG_BEGINS*/");
SECTION (".fini");
extern void __attribute__ ((section (".fini"))) _fini (void);
{
/* End of the _fini prolog. */
- asm ("ALIGN");
- asm ("END_FINI");
- asm ("\n/*@_fini_PROLOG_ENDS*/");
+ __asm__ ("ALIGN");
+ __asm__ ("END_FINI");
+ __asm__ ("\n/*@_fini_PROLOG_ENDS*/");
{
/* Let GCC know that _fini is not a leaf function by having a dummy
}
/* Beginning of the _fini epilog. */
- asm ("\n/*@_fini_EPILOG_BEGINS*/");
+ __asm__ ("\n/*@_fini_EPILOG_BEGINS*/");
SECTION (".fini");
}
-asm ("END_FINI");
+__asm__ ("END_FINI");
/* End of the _fini epilog. Any further generated assembly (e.g. .ident)
is shared between both crt files. */
-asm ("\n/*@_fini_EPILOG_ENDS*/");
-asm ("\n/*@TRAILER_BEGINS*/");
+__asm__ ("\n/*@_fini_EPILOG_ENDS*/");
+__asm__ ("\n/*@TRAILER_BEGINS*/");
/* End of file. */
if (__builtin_expect (libgcc_s_handle != NULL, 1))
{
/* Force gcc to reload all values. */
- __asm__ volatile ("" ::: "memory");
+ __asm__ __volatile__ ("" ::: "memory");
return;
}
unsigned int val;
do
- __asm__ volatile ("tas.b @%1; movt %0"
+ __asm__ __volatile__ ("tas.b @%1; movt %0"
: "=&r" (val)
: "r" (lock)
: "memory");
#define __exit_thread_inline(val) \
while (1) { \
if (__builtin_constant_p (val) && (val) == 0) \
- __asm__ volatile ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
+ __asm__ __volatile__ ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
:: "i" (__NR_exit)); \
else \
- __asm__ volatile ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
+ __asm__ __volatile__ ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD \
:: "i" (__NR_exit), "r" (val)); \
}
/* Install new dtv for current thread. */
# define INSTALL_NEW_DTV(dtv) \
({ tcbhead_t *__tcbp; \
- __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
__tcbp->dtv = (dtv);})
/* Return dtv of given thread descriptor. */
special attention since 'errno' is not yet available and if the
operation can cause a failure 'errno' must not be touched. */
# define TLS_INIT_TP(tcbp, secondcall) \
- ({ __asm __volatile ("ldc %0,gbr" : : "r" (tcbp)); 0; })
+ ({ __asm__ __volatile__ ("ldc %0,gbr" : : "r" (tcbp)); 0; })
/* Return the address of the dtv for the current thread. */
# define THREAD_DTV() \
({ tcbhead_t *__tcbp; \
- __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
__tcbp->dtv;})
/* Return the thread descriptor for the current thread.
do not get optimized away. */
# define THREAD_SELF \
({ struct pthread *__self; \
- __asm ("stc gbr,%0" : "=r" (__self)); \
+ __asm__ ("stc gbr,%0" : "=r" (__self)); \
__self - 1;})
/* Magic for libthread_db to know how to do THREAD_SELF. */
#define THREAD_GET_POINTER_GUARD() \
({ tcbhead_t *__tcbp; \
- __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
__tcbp->pointer_guard;})
#define THREAD_SET_POINTER_GUARD(value) \
({ tcbhead_t *__tcbp; \
- __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
__tcbp->pointer_guard = (value);})
#define THREAD_COPY_POINTER_GUARD(descr) \
({ tcbhead_t *__tcbp; \
- __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp)); \
((tcbhead_t *) (descr + 1))->pointer_guard = __tcbp->pointer_guard;})
/* Get and set the global scope generation counter in struct pthread. */
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
- __asm __volatile
+ __asm__ __volatile
("1: ldstub [%0], %%g2\n"
" orcc %%g2, 0x0, %%g0\n"
" bne,a 2f\n"
pthread_spin_trylock (pthread_spinlock_t *lock)
{
int res;
- __asm __volatile ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory");
+ __asm__ __volatile__ ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory");
return res == 0 ? 0 : EBUSY;
}
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
- __asm __volatile
+ __asm__ __volatile
("1: ldstub [%0], %%g2\n"
" brnz,pn %%g2, 2f\n"
" membar #StoreLoad | #StoreStore\n"
int
pthread_spin_lock (pthread_spinlock_t *lock)
{
- __asm __volatile
+ __asm__ __volatile
("1: ldstub [%0], %%g5\n"
" brnz,pn %%g5, 2f\n"
" membar #StoreLoad | #StoreStore\n"
pthread_spin_trylock (pthread_spinlock_t *lock)
{
int res;
- __asm __volatile
+ __asm__ __volatile
("ldstub [%1], %0\n"
"membar #StoreLoad | #StoreStore"
: "=r" (res)
int
pthread_spin_unlock (pthread_spinlock_t *lock)
{
- __asm __volatile ("membar #StoreStore | #LoadStore");
+ __asm__ __volatile__ ("membar #StoreStore | #LoadStore");
*lock = 0;
return 0;
}
Do this atomically.
*/
newval = __fork_generation | 1;
- __asm __volatile (
+ __asm__ __volatile__ (
"1: ldl_l %0, %2\n"
" and %0, 2, %1\n"
" bne %1, 2f\n"
/* Thumb-2 has ldrex/strex. However it does not have barrier instructions,
so we still need to use the kernel helper. */
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ register __typeof (oldval) a_oldval asm ("r0"); \
- register __typeof (oldval) a_newval asm ("r1") = (newval); \
- register __typeof (mem) a_ptr asm ("r2") = (mem); \
- register __typeof (oldval) a_tmp asm ("r3"); \
- register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
+ ({ register __typeof (oldval) a_oldval __asm__ ("r0"); \
+ register __typeof (oldval) a_newval __asm__ ("r1") = (newval); \
+ register __typeof (mem) a_ptr __asm__ ("r2") = (mem); \
+ register __typeof (oldval) a_tmp __asm__ ("r3"); \
+ register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval); \
__asm__ __volatile__ \
("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
"cmp\t%[tmp], %[old2]\n\t" \
a_tmp; })
#else
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ register __typeof (oldval) a_oldval asm ("r0"); \
- register __typeof (oldval) a_newval asm ("r1") = (newval); \
- register __typeof (mem) a_ptr asm ("r2") = (mem); \
- register __typeof (oldval) a_tmp asm ("r3"); \
- register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
+ ({ register __typeof (oldval) a_oldval __asm__ ("r0"); \
+ register __typeof (oldval) a_newval __asm__ ("r1") = (newval); \
+ register __typeof (mem) a_ptr __asm__ ("r2") = (mem); \
+ register __typeof (oldval) a_tmp __asm__ ("r3"); \
+ register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval); \
__asm__ __volatile__ \
("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
"cmp\t%[tmp], %[old2]\n\t" \
if (__builtin_expect (libgcc_s_handle != NULL, 1))
{
/* Force gcc to reload all values. */
- asm volatile ("" ::: "memory");
+ __asm__ __volatile__ ("" ::: "memory");
return;
}
ARM unwinder relies on register state at entrance. So we write this in
assembly. */
-asm (
+__asm__ (
" .globl _Unwind_Resume\n"
" .type _Unwind_Resume, %function\n"
"_Unwind_Resume:\n"
ARM unwinder relies on register state at entrance. So we write this in
assembly. */
-asm (
+__asm__ (
" .globl _Unwind_Resume\n"
" .type _Unwind_Resume, %function\n"
"_Unwind_Resume:\n"
({ \
int __status; \
register __typeof (val) _val __asm__ ("edx") = (val); \
- __asm__ __volatile (LLL_EBX_LOAD \
+ __asm__ __volatile__ (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
: "=a" (__status) \
do { \
int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
- __asm__ __volatile (LLL_EBX_LOAD \
+ __asm__ __volatile__ (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
: "=a" (__ignore) \
#define lll_trylock(futex) \
({ int ret; \
- __asm__ __volatile (__lll_trylock_asm \
+ __asm__ __volatile__ (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
"0" (LLL_LOCK_INITIALIZER), \
#define lll_robust_trylock(futex, id) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
: "r" (id), "m" (futex), \
"0" (LLL_LOCK_INITIALIZER) \
#define lll_cond_trylock(futex) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
(void) \
({ int ignore1, ignore2; \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm__ __volatile (__lll_lock_asm_start \
+ __asm__ __volatile__ (__lll_lock_asm_start \
"jnz _L_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_lock_%=,@function\n" \
else \
{ \
int ignore3; \
- __asm__ __volatile (__lll_lock_asm_start \
+ __asm__ __volatile__ (__lll_lock_asm_start \
"jnz _L_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_lock_%=,@function\n" \
#define lll_robust_lock(futex, id, private) \
({ int __result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"jnz _L_robust_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_robust_lock_%=,@function\n" \
#define lll_cond_lock(futex, private) \
(void) \
({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"jnz _L_cond_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_cond_lock_%=,@function\n" \
#define lll_robust_cond_lock(futex, id, private) \
({ int __result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
"jnz _L_robust_cond_lock_%=\n\t" \
".subsection 1\n\t" \
".type _L_robust_cond_lock_%=,@function\n" \
#define lll_timedlock(futex, timeout, private) \
({ int __result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
"jnz _L_timedlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_timedlock_%=,@function\n" \
#define lll_robust_timedlock(futex, timeout, id, private) \
({ int __result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
"jnz _L_robust_timedlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_robust_timedlock_%=,@function\n" \
(void) \
({ int ignore; \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm__ __volatile (__lll_unlock_asm \
+ __asm__ __volatile__ (__lll_unlock_asm \
"jne _L_unlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_unlock_%=,@function\n" \
else \
{ \
int ignore2; \
- __asm__ __volatile (__lll_unlock_asm \
+ __asm__ __volatile__ (__lll_unlock_asm \
"jne _L_unlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_unlock_%=,@function\n" \
#define lll_robust_unlock(futex, private) \
(void) \
({ int ignore, ignore2; \
- __asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "andl %3, %0\n\t" \
"jne _L_robust_unlock_%=\n\t" \
".subsection 1\n\t" \
".type _L_robust_unlock_%=,@function\n" \
(void) \
({ int __ignore; \
register int _nr __asm__ ("edx") = 1; \
- __asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "orl %5, (%2)\n\t" \
LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
int __ignore; \
register __typeof (tid) _tid __asm__ ("edx") = (tid); \
if (_tid != 0) \
- __asm__ __volatile (LLL_EBX_LOAD \
+ __asm__ __volatile__ (LLL_EBX_LOAD \
"1:\tmovl %1, %%eax\n\t" \
LLL_ENTER_KERNEL \
"cmpl $0, (%%ebx)\n\t" \
/* Set *futex to ID if it is 0, atomically. Returns the old value */
#define __lll_robust_trylock(futex, id) \
({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
+ __asm__ __volatile__ ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
" cmpwi 0,%0,0\n" \
" bne 2f\n" \
" stwcx. %3,0,%2\n" \
Do this atomically.
*/
newval = __fork_generation | 1;
- __asm __volatile ("1: lwarx %0,0,%3\n"
+ __asm__ __volatile__ ("1: lwarx %0,0,%3\n"
" andi. %1,%0,2\n"
" bne 2f\n"
" stwcx. %4,0,%3\n"
int
pthread_spin_unlock (pthread_spinlock_t *lock)
{
- __asm __volatile (__lll_rel_instr ::: "memory");
+ __asm__ __volatile__ (__lll_rel_instr ::: "memory");
*lock = 0;
return 0;
}
{
struct new_sem *isem = (struct new_sem *) sem;
- __asm __volatile (__lll_rel_instr ::: "memory");
+ __asm__ __volatile__ (__lll_rel_instr ::: "memory");
atomic_increment (&isem->value);
- __asm __volatile (__lll_acq_instr ::: "memory");
+ __asm__ __volatile__ (__lll_acq_instr ::: "memory");
if (isem->nwaiters > 0)
{
int err = lll_futex_wake (&isem->value, 1,
#define lll_trylock(futex) \
({ unsigned char __result; \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_robust_trylock(futex, id) \
({ unsigned char __result; \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_cond_trylock(futex) \
({ unsigned char __result; \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_lock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_robust_lock(futex, id, private) \
({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
always wakeup waiters. */
#define lll_cond_lock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_robust_cond_lock(futex, id, private) \
({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_timedlock(futex, timeout, private) \
({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_robust_timedlock(futex, timeout, id, private) \
({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
nop\n\
#define lll_unlock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
mov r15,r1\n\
#define lll_robust_unlock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
mov r15,r1\n\
#define lll_robust_dead(futex, private) \
(void) ({ int __ignore, *__futex = &(futex); \
- __asm __volatile ("\
+ __asm__ __volatile__ ("\
.align 2\n\
mova 1f,r0\n\
mov r15,r1\n\
#define lll_futex_timed_wait(futex, val, timeout, private) \
({ \
int __status; \
- register unsigned long __r3 __asm ("r3") = SYS_futex; \
- register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
- register unsigned long __r5 __asm ("r5") \
+ register unsigned long __r3 __asm__ ("r3") = SYS_futex; \
+ register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex); \
+ register unsigned long __r5 __asm__ ("r5") \
= __lll_private_flag (FUTEX_WAIT, private); \
- register unsigned long __r6 __asm ("r6") = (unsigned long) (val); \
- register unsigned long __r7 __asm ("r7") = (timeout); \
- __asm __volatile (SYSCALL_WITH_INST_PAD \
+ register unsigned long __r6 __asm__ ("r6") = (unsigned long) (val); \
+ register unsigned long __r7 __asm__ ("r7") = (timeout); \
+ __asm__ __volatile__ (SYSCALL_WITH_INST_PAD \
: "=z" (__status) \
: "r" (__r3), "r" (__r4), "r" (__r5), \
"r" (__r6), "r" (__r7) \
#define lll_futex_wake(futex, nr, private) \
do { \
int __ignore; \
- register unsigned long __r3 __asm ("r3") = SYS_futex; \
- register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
- register unsigned long __r5 __asm ("r5") \
+ register unsigned long __r3 __asm__ ("r3") = SYS_futex; \
+ register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex); \
+ register unsigned long __r5 __asm__ ("r5") \
= __lll_private_flag (FUTEX_WAKE, private); \
- register unsigned long __r6 __asm ("r6") = (unsigned long) (nr); \
- register unsigned long __r7 __asm ("r7") = 0; \
- __asm __volatile (SYSCALL_WITH_INST_PAD \
+ register unsigned long __r6 __asm__ ("r6") = (unsigned long) (nr); \
+ register unsigned long __r7 __asm__ ("r7") = 0; \
+ __asm__ __volatile__ (SYSCALL_WITH_INST_PAD \
: "=z" (__ignore) \
: "r" (__r3), "r" (__r4), "r" (__r5), \
"r" (__r6), "r" (__r7) \
register const struct timespec *__to __asm__ ("r10") = timeout; \
int __status; \
register __typeof (val) _val __asm__ ("edx") = (val); \
- __asm__ __volatile ("syscall" \
+ __asm__ __volatile__ ("syscall" \
: "=a" (__status) \
: "0" (SYS_futex), "D" (futex), \
"S" (__lll_private_flag (FUTEX_WAIT, private)), \
do { \
int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
- __asm__ __volatile ("syscall" \
+ __asm__ __volatile__ ("syscall" \
: "=a" (__ignore) \
: "0" (SYS_futex), "D" (futex), \
"S" (__lll_private_flag (FUTEX_WAKE, private)), \
#define lll_trylock(futex) \
({ int ret; \
- __asm__ __volatile (__lll_trylock_asm \
+ __asm__ __volatile__ (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
: "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
"0" (LLL_LOCK_INITIALIZER) \
#define lll_robust_trylock(futex, id) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
: "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
#define lll_cond_trylock(futex) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
: "r" (LLL_LOCK_INITIALIZER_WAITERS), \
"m" (futex), "0" (LLL_LOCK_INITIALIZER) \
(void) \
({ int ignore1, ignore2, ignore3; \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm__ __volatile (__lll_lock_asm_start \
+ __asm__ __volatile__ (__lll_lock_asm_start \
".subsection 1\n\t" \
".type _L_lock_%=, @function\n" \
"_L_lock_%=:\n" \
: "0" (1), "m" (futex), "3" (0) \
: "cx", "r11", "cc", "memory"); \
else \
- __asm__ __volatile (__lll_lock_asm_start \
+ __asm__ __volatile__ (__lll_lock_asm_start \
".subsection 1\n\t" \
".type _L_lock_%=, @function\n" \
"_L_lock_%=:\n" \
#define lll_robust_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
".type _L_robust_lock_%=, @function\n" \
#define lll_cond_lock(futex, private) \
(void) \
({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
".type _L_cond_lock_%=, @function\n" \
#define lll_robust_cond_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
".type _L_robust_cond_lock_%=, @function\n" \
#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
".type _L_timedlock_%=, @function\n" \
#define lll_robust_timedlock(futex, timeout, id, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
".type _L_robust_timedlock_%=, @function\n" \
(void) \
({ int ignore; \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
- __asm__ __volatile (__lll_unlock_asm_start \
+ __asm__ __volatile__ (__lll_unlock_asm_start \
".subsection 1\n\t" \
".type _L_unlock_%=, @function\n" \
"_L_unlock_%=:\n" \
: "m" (futex) \
: "ax", "cx", "r11", "cc", "memory"); \
else \
- __asm__ __volatile (__lll_unlock_asm_start \
+ __asm__ __volatile__ (__lll_unlock_asm_start \
".subsection 1\n\t" \
".type _L_unlock_%=, @function\n" \
"_L_unlock_%=:\n" \
do \
{ \
int ignore; \
- __asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "andl %2, %0\n\t" \
"jne 1f\n\t" \
".subsection 1\n\t" \
".type _L_robust_unlock_%=, @function\n" \
do \
{ \
int ignore; \
- __asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
+ __asm__ __volatile__ (LOCK_INSTR "orl %3, (%2)\n\t" \
"syscall" \
: "=m" (futex), "=a" (ignore) \
: "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
register int __nr_move __asm__ ("r10") = nr_move; \
register void *__mutex __asm__ ("r8") = mutex; \
register int __val __asm__ ("r9") = val; \
- __asm__ __volatile ("syscall" \
+ __asm__ __volatile__ ("syscall" \
: "=a" (__res) \
: "0" (__NR_futex), "D" ((void *) ftx), \
"S" (__lll_private_flag (FUTEX_CMP_REQUEUE, \
int __ignore; \
register __typeof (tid) _tid __asm__ ("edx") = (tid); \
if (_tid != 0) \
- __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
+ __asm__ __volatile__ ("xorq %%r10, %%r10\n\t" \
"1:\tmovq %2, %%rax\n\t" \
"syscall\n\t" \
"cmpl $0, (%%rdi)\n\t" \
#define RESET_VGETCPU_CACHE() \
do { \
- asm volatile ("movl %0, %%fs:%P1\n\t" \
+ __asm__ __volatile__ ("movl %0, %%fs:%P1\n\t" \
"movl %0, %%fs:%P2" \
: \
: "ir" (0), "i" (offsetof (struct pthread, \
/* While there is no such syscall. */
#define __exit_thread_inline(val) \
- __asm__ volatile ("syscall" :: "a" (__NR_exit), "D" (val))
+ __asm__ __volatile__ ("syscall" :: "a" (__NR_exit), "D" (val))
_head->self = _thrdescr; \
\
/* It is a simple syscall to set the %fs value for the thread. */ \
- __asm__ volatile ("syscall" \
+ __asm__ __volatile__ ("syscall" \
: "=a" (_result) \
: "0" ((unsigned long int) __NR_arch_prctl), \
"D" ((unsigned long int) ARCH_SET_FS), \
/* Return the thread descriptor for the current thread.
- The contained asm must *not* be marked volatile since otherwise
+ The contained asm must *not* be marked __volatile__ since otherwise
assignments like
pthread_descr self = thread_self();
do not get optimized away. */
# define THREAD_GETMEM(descr, member) \
({ __typeof (descr->member) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%fs:%P2,%b0" \
+ __asm__ __volatile__ ("movb %%fs:%P2,%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member))); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%fs:%P1,%0" \
+ __asm__ __volatile__ ("movl %%fs:%P1,%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member))); \
else \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movq %%fs:%P1,%q0" \
+ __asm__ __volatile__ ("movq %%fs:%P1,%q0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member))); \
} \
# define THREAD_GETMEM_NC(descr, member, idx) \
({ __typeof (descr->member[0]) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%fs:%P2(%q3),%b0" \
+ __asm__ __volatile__ ("movb %%fs:%P2(%q3),%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%fs:%P1(,%q2,4),%0" \
+ __asm__ __volatile__ ("movl %%fs:%P1(,%q2,4),%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member[0])), "r" (idx));\
else \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movq %%fs:%P1(,%q2,8),%q0" \
+ __asm__ __volatile__ ("movq %%fs:%P1(,%q2,8),%q0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
/* Same as THREAD_SETMEM, but the member offset can be non-constant. */
# define THREAD_SETMEM(descr, member, value) \
({ if (sizeof (descr->member) == 1) \
- __asm__ volatile ("movb %b0,%%fs:%P1" : \
+ __asm__ __volatile__ ("movb %b0,%%fs:%P1" : \
: "iq" (value), \
"i" (offsetof (struct pthread, member))); \
else if (sizeof (descr->member) == 4) \
- __asm__ volatile ("movl %0,%%fs:%P1" : \
+ __asm__ __volatile__ ("movl %0,%%fs:%P1" : \
: IMM_MODE (value), \
"i" (offsetof (struct pthread, member))); \
else \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movq %q0,%%fs:%P1" : \
+ __asm__ __volatile__ ("movq %q0,%%fs:%P1" : \
: IMM_MODE ((unsigned long int) value), \
"i" (offsetof (struct pthread, member))); \
}})
/* Set member of the thread descriptor directly. */
# define THREAD_SETMEM_NC(descr, member, idx, value) \
({ if (sizeof (descr->member[0]) == 1) \
- __asm__ volatile ("movb %b0,%%fs:%P1(%q2)" : \
+ __asm__ __volatile__ ("movb %b0,%%fs:%P1(%q2)" : \
: "iq" (value), \
"i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
else if (sizeof (descr->member[0]) == 4) \
- __asm__ volatile ("movl %0,%%fs:%P1(,%q2,4)" : \
+ __asm__ __volatile__ ("movl %0,%%fs:%P1(,%q2,4)" : \
: IMM_MODE (value), \
"i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movq %q0,%%fs:%P1(,%q2,8)" : \
+ __asm__ __volatile__ ("movq %q0,%%fs:%P1(,%q2,8)" : \
: IMM_MODE ((unsigned long int) value), \
"i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
({ __typeof (descr->member) __ret; \
__typeof (oldval) __old = (oldval); \
if (sizeof (descr->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
+ __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3" \
: "=a" (__ret) \
: "0" (__old), "r" (newval), \
"i" (offsetof (struct pthread, member))); \
/* Atomic logical and. */
# define THREAD_ATOMIC_AND(descr, member, val) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "andl %1, %%fs:%P0" \
+ __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%fs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (val)); \
else \
/* Atomic set bit. */
# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
(void) ({ if (sizeof ((descr)->member) == 4) \
- __asm__ volatile (LOCK_PREFIX "orl %1, %%fs:%P0" \
+ __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%fs:%P0" \
:: "i" (offsetof (struct pthread, member)), \
"ir" (1 << (bit))); \
else \
# define CALL_THREAD_FCT(descr) \
({ void *__res; \
- __asm__ volatile ("movq %%fs:%P2, %%rdi\n\t" \
+ __asm__ __volatile__ ("movq %%fs:%P2, %%rdi\n\t" \
"callq *%%fs:%P1" \
: "=a" (__res) \
: "i" (offsetof (struct pthread, start_routine)), \
# define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res; \
- __asm__ volatile ("xchgl %0, %%fs:%P1" \
+ __asm__ __volatile__ ("xchgl %0, %%fs:%P1" \
: "=r" (__res) \
: "i" (offsetof (struct pthread, header.gscope_flag)), \
"0" (THREAD_GSCOPE_FLAG_UNUSED)); \
static void __attribute__ ((noinline))
clobber_lots_of_regs (void)
{
-#define X1(n) long r##n = 10##n; __asm __volatile ("" : "+r" (r##n));
+#define X1(n) long r##n = 10##n; __asm__ __volatile__ ("" : "+r" (r##n));
#define X2(n) X1(n##0) X1(n##1) X1(n##2) X1(n##3) X1(n##4)
#define X3(n) X2(n##0) X2(n##1) X2(n##2) X2(n##3) X2(n##4)
X3(0) X3(1) X3(2) X3(3) X3(4)
#undef X1
-#define X1(n) __asm __volatile ("" : : "r" (r##n));
+#define X1(n) __asm__ __volatile__ ("" : : "r" (r##n));
X3(0) X3(1) X3(2) X3(3) X3(4)
#undef X1
#undef X2
{
int *p = &CONCAT (v, N);
/* GCC assumes &var is never NULL, add optimization barrier. */
- __asm __volatile ("" : "+r" (p));
+ __asm__ __volatile__ ("" : "+r" (p));
if (p == NULL || *p != 4)
{
printf ("fail %d %p\n", N, p);
{
int *p = &var;
/* GCC assumes &var is never NULL, add optimization barrier. */
- __asm __volatile ("" : "+r" (p));
+ __asm__ __volatile__ ("" : "+r" (p));
if (p == NULL || *p != 4)
{
printf ("fail %d %p\n", N, p);