1 /* Copyright (C) 1992,1997-2003,2004,2005,2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, write to the Free
16 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 #ifndef _BITS_SYSCALLS_H
20 #define _BITS_SYSCALLS_H
23 # error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead."
31 # define INLINE_VSYSCALL(name, nr, args...) \
35 INTERNAL_SYSCALL_DECL (sc_err); \
38 if (__vdso_##name != NULL) \
40 sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, sc_err, nr, ##args); \
41 if (!INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
43 if (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err) != ENOSYS) \
47 sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, ##args); \
48 if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \
51 __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \
58 # define INLINE_VSYSCALL(name, nr, args...) \
59 INLINE_SYSCALL (name, nr, ##args)
63 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
68 if (__vdso_##name != NULL) \
70 v_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \
71 if (!INTERNAL_SYSCALL_ERROR_P (v_ret, err) \
72 || INTERNAL_SYSCALL_ERRNO (v_ret, err) != ENOSYS) \
75 v_ret = INTERNAL_SYSCALL (name, err, nr, ##args); \
80 # define INTERNAL_VSYSCALL(name, err, nr, args...) \
81 INTERNAL_SYSCALL (name, err, nr, ##args)
84 # define INTERNAL_VSYSCALL_NO_SYSCALL_FALLBACK(name, err, nr, args...) \
86 long int sc_ret = ENOSYS; \
88 if (__vdso_##name != NULL) \
89 sc_ret = INTERNAL_VSYSCALL_NCS (__vdso_##name, err, nr, ##args); \
95 /* List of system calls which are supported as vsyscalls. */
96 # define HAVE_CLOCK_GETRES_VSYSCALL 1
97 # define HAVE_CLOCK_GETTIME_VSYSCALL 1
99 /* Define a macro which expands inline into the wrapper code for a VDSO
100 call. This use is for internal calls that do not need to handle errors
101 normally. It will never touch errno.
102 On powerpc a system call basically clobbers the same registers like a
103 function call, with the exception of LR (which is needed for the
104 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
105 an error return status). */
106 # define INTERNAL_VSYSCALL_NCS(funcptr, err, nr, args...) \
108 register void *r0 __asm__ ("r0"); \
109 register long int r3 __asm__ ("r3"); \
110 register long int r4 __asm__ ("r4"); \
111 register long int r5 __asm__ ("r5"); \
112 register long int r6 __asm__ ("r6"); \
113 register long int r7 __asm__ ("r7"); \
114 register long int r8 __asm__ ("r8"); \
115 register long int r9 __asm__ ("r9"); \
116 register long int r10 __asm__ ("r10"); \
117 register long int r11 __asm__ ("r11"); \
118 register long int r12 __asm__ ("r12"); \
119 LOAD_ARGS_##nr (funcptr, args); \
120 __asm__ __volatile__ \
125 "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
126 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
128 : "cr0", "ctr", "lr", "memory"); \
129 err = (long int) r0; \
133 /* Define a macro which expands inline into the wrapper code for a system
134 call. This use is for internal calls that do not need to handle errors
135 normally. It will never touch errno.
136 On powerpc a system call basically clobbers the same registers like a
137 function call, with the exception of LR (which is needed for the
138 "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal
139 an error return status). */
141 # define INTERNAL_SYSCALL_DECL(err) long int err
143 # define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \
145 register long int r0 __asm__ ("r0"); \
146 register long int r3 __asm__ ("r3"); \
147 register long int r4 __asm__ ("r4"); \
148 register long int r5 __asm__ ("r5"); \
149 register long int r6 __asm__ ("r6"); \
150 register long int r7 __asm__ ("r7"); \
151 register long int r8 __asm__ ("r8"); \
152 register long int r9 __asm__ ("r9"); \
153 register long int r10 __asm__ ("r10"); \
154 register long int r11 __asm__ ("r11"); \
155 register long int r12 __asm__ ("r12"); \
156 LOAD_ARGS_##nr(name, args); \
157 __asm__ __volatile__ \
161 "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \
162 "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \
164 : "cr0", "ctr", "memory"); \
169 # define INTERNAL_SYSCALL_ERROR_P(val, err) \
170 ((void) (val), __builtin_expect ((err) & (1 << 28), 0))
172 # define INTERNAL_SYSCALL_ERRNO(val, err) (val)
174 extern void __illegally_sized_syscall_arg1(void);
175 extern void __illegally_sized_syscall_arg2(void);
176 extern void __illegally_sized_syscall_arg3(void);
177 extern void __illegally_sized_syscall_arg4(void);
178 extern void __illegally_sized_syscall_arg5(void);
179 extern void __illegally_sized_syscall_arg6(void);
181 # define LOAD_ARGS_0(name, dummy) \
183 # define LOAD_ARGS_1(name, __arg1) \
184 long int arg1 = (long int) (__arg1); \
185 LOAD_ARGS_0(name, 0); \
186 if (__builtin_classify_type (__arg1) != 5 && sizeof (__arg1) > 4) \
187 __illegally_sized_syscall_arg1 (); \
189 # define LOAD_ARGS_2(name, __arg1, __arg2) \
190 long int arg2 = (long int) (__arg2); \
191 LOAD_ARGS_1(name, __arg1); \
192 if (__builtin_classify_type (__arg2) != 5 && sizeof (__arg2) > 4) \
193 __illegally_sized_syscall_arg2 (); \
195 # define LOAD_ARGS_3(name, __arg1, __arg2, __arg3) \
196 long int arg3 = (long int) (__arg3); \
197 LOAD_ARGS_2(name, __arg1, __arg2); \
198 if (__builtin_classify_type (__arg3) != 5 && sizeof (__arg3) > 4) \
199 __illegally_sized_syscall_arg3 (); \
201 # define LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4) \
202 long int arg4 = (long int) (__arg4); \
203 LOAD_ARGS_3(name, __arg1, __arg2, __arg3); \
204 if (__builtin_classify_type (__arg4) != 5 && sizeof (__arg4) > 4) \
205 __illegally_sized_syscall_arg4 (); \
207 # define LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \
208 long int arg5 = (long int) (__arg5); \
209 LOAD_ARGS_4(name, __arg1, __arg2, __arg3, __arg4); \
210 if (__builtin_classify_type (__arg5) != 5 && sizeof (__arg5) > 4) \
211 __illegally_sized_syscall_arg5 (); \
213 # define LOAD_ARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \
214 long int arg6 = (long int) (__arg6); \
215 LOAD_ARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \
216 if (__builtin_classify_type (__arg6) != 5 && sizeof (__arg6) > 4) \
217 __illegally_sized_syscall_arg6 (); \
220 # define ASM_INPUT_0 "0" (r0)
221 # define ASM_INPUT_1 ASM_INPUT_0, "1" (r3)
222 # define ASM_INPUT_2 ASM_INPUT_1, "2" (r4)
223 # define ASM_INPUT_3 ASM_INPUT_2, "3" (r5)
224 # define ASM_INPUT_4 ASM_INPUT_3, "4" (r6)
225 # define ASM_INPUT_5 ASM_INPUT_4, "5" (r7)
226 # define ASM_INPUT_6 ASM_INPUT_5, "6" (r8)
228 #endif /* __ASSEMBLER__ */
231 /* Pointer mangling support. */
232 #if defined NOT_IN_libc && defined IS_IN_rtld
233 /* We cannot use the thread descriptor because in ld.so we use setjmp
234 earlier than the descriptor is initialized. */
236 # ifdef __ASSEMBLER__
237 # define PTR_MANGLE(reg, tmpreg) \
238 lwz tmpreg,POINTER_GUARD(r2); \
240 # define PTR_MANGLE2(reg, tmpreg) \
242 # define PTR_MANGLE3(destreg, reg, tmpreg) \
243 lwz tmpreg,POINTER_GUARD(r2); \
244 xor destreg,tmpreg,reg
245 # define PTR_DEMANGLE(reg, tmpreg) PTR_MANGLE (reg, tmpreg)
246 # define PTR_DEMANGLE2(reg, tmpreg) PTR_MANGLE2 (reg, tmpreg)
247 # define PTR_DEMANGLE3(destreg, reg, tmpreg) PTR_MANGLE3 (destreg, reg, tmpreg)
249 # define PTR_MANGLE(var) \
250 (var) = (__typeof (var)) ((uintptr_t) (var) ^ THREAD_GET_POINTER_GUARD ())
251 # define PTR_DEMANGLE(var) PTR_MANGLE (var)
255 #endif /* _BITS_SYSCALLS_H */