2 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
4 * Licensed under LGPL v2.1 or later, see the file COPYING.LIB in this tarball.
7 #ifndef _BITS_SYSCALLS_H
8 #define _BITS_SYSCALLS_H
10 #error "Never use <bits/syscalls.h> directly; include <sys/syscall.h> instead."
18 * Fine tuned code for errno handling in syscall wrappers.
20 * 1. __syscall_error(raw_syscall_ret_val) is used to set the errno (vs.
21 * the typical __set_errno). This helps elide the generated code for
22 * GOT fetch for __errno_location pointer etc, in each wrapper.
24 * 2. The call to above is also disguised in inline asm. This elides
25 * unconditional save/restore of a few callee regs which gcc almost
26 * always generates if the call is exposed
28 * 3. The function can't be hidden because wrappers from librt et all also
29 * call it. However hidden is not really needed to bypass PLT for
30 * intra-libc calls as the branch insn w/o @plt is sufficient.
34 /* ldso doesn't have real errno */
35 #define ERRNO_ERRANDS(_sys_result)
36 #else /* !IS_IN_rtld */
37 extern int __syscall_error (int);
39 /* Inter-libc callers use PLT */
40 #define CALL_ERRNO_SETTER "bl __syscall_error@plt \n\t"
42 /* intra-libc callers, despite PIC can bypass PLT */
43 #define CALL_ERRNO_SETTER "bl __syscall_error \n\t"
46 #define ERRNO_ERRANDS(_sys_result) \
48 "st.a blink, [sp, -4] \n\t" \
50 "ld.ab blink, [sp, 4] \n\t" \
53 :"r1","r2","r3","r4","r5","r6", \
54 "r7","r8","r9","r10","r11","r12" \
57 #endif /* IS_IN_rtld */
59 /* Invoke the syscall and return unprocessed kernel status */
60 #define INTERNAL_SYSCALL(nm, err, nr, args...) \
61 INTERNAL_SYSCALL_NCS(SYS_ify (nm), err, nr, args)
63 /* -1 to -1023 as valid error values will suffice for some time */
64 #define INTERNAL_SYSCALL_ERROR_P(val, err) \
65 ((unsigned int) (val) > (unsigned int) -1024)
68 * Standard sycall wrapper:
69 * -"const" syscall number @nm, sets errno, return success/error-codes
71 #define INLINE_SYSCALL(nm, nr_args, args...) \
73 register int __res __asm__("r0"); \
74 __res = INTERNAL_SYSCALL(nm, , nr_args, args); \
75 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P ((__res), ), 0)) \
77 ERRNO_ERRANDS(__res); \
82 /* Non const syscall number @nm
83 * Ideally this could be folded within INLINE_SYSCALL with
84 * __builtin_constant_p in INTERNAL_SYSCALL but that fails for syscall.c
86 #define INLINE_SYSCALL_NCS(nm, nr_args, args...) \
88 register int __res __asm__("r0"); \
89 __res = INTERNAL_SYSCALL_NCS(nm, , nr_args, args); \
90 if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P ((__res), ), 0)) \
92 ERRNO_ERRANDS(__res); \
97 #define INLINE_SYSCALL_NOERR(name, nr, args...) \
98 ({ unsigned int _inline_sys_result = INTERNAL_SYSCALL (name, , nr, args);\
99 (int) _inline_sys_result; })
101 /*-------------------------------------------------------------------------
102 * Mechanics of Trap - specific to ARC700
104 * Note the memory clobber is not strictly needed for intended semantics of
105 * the inline asm. However some of the cases, such as old-style 6 arg mmap
106 * gcc was generating code for inline syscall ahead of buffer packing needed
107 * for syscall itself.
108 *-------------------------------------------------------------------------*/
110 #define ARC_TRAP_INSN "trap0 \n\t"
112 #define INTERNAL_SYSCALL_NCS(nm, err, nr_args, args...) \
114 /* Per ABI, r0 is 1st arg and return reg */ \
115 register int __ret __asm__("r0"); \
116 register int _sys_num __asm__("r8"); \
118 LOAD_ARGS_##nr_args (nm, args) \
123 : "r"(_sys_num) ASM_ARGS_##nr_args \
129 /* Macros for setting up inline __asm__ input regs */
131 #define ASM_ARGS_1 ASM_ARGS_0, "r" (__ret)
132 #define ASM_ARGS_2 ASM_ARGS_1, "r" (_arg2)
133 #define ASM_ARGS_3 ASM_ARGS_2, "r" (_arg3)
134 #define ASM_ARGS_4 ASM_ARGS_3, "r" (_arg4)
135 #define ASM_ARGS_5 ASM_ARGS_4, "r" (_arg5)
136 #define ASM_ARGS_6 ASM_ARGS_5, "r" (_arg6)
137 #define ASM_ARGS_7 ASM_ARGS_6, "r" (_arg7)
139 /* Macros for converting sys-call wrapper args into sys call args */
140 #define LOAD_ARGS_0(nm, arg) \
141 _sys_num = (int) (nm); \
143 #define LOAD_ARGS_1(nm, arg1) \
144 __ret = (int) (arg1); \
145 LOAD_ARGS_0 (nm, arg1)
148 * Note that the use of _tmpX might look superflous, however it is needed
149 * to ensure that register variables are not clobbered if arg happens to be
150 * a function call itself. e.g. sched_setaffinity() calling getpid() for arg2
152 * Also this specific order of recursive calling is important to segregate
153 * the tmp args evaluation (function call case described above) and assigment
154 * of register variables
156 #define LOAD_ARGS_2(nm, arg1, arg2) \
157 int _tmp2 = (int) (arg2); \
158 LOAD_ARGS_1 (nm, arg1) \
159 register int _arg2 __asm__ ("r1") = _tmp2;
161 #define LOAD_ARGS_3(nm, arg1, arg2, arg3) \
162 int _tmp3 = (int) (arg3); \
163 LOAD_ARGS_2 (nm, arg1, arg2) \
164 register int _arg3 __asm__ ("r2") = _tmp3;
166 #define LOAD_ARGS_4(nm, arg1, arg2, arg3, arg4) \
167 int _tmp4 = (int) (arg4); \
168 LOAD_ARGS_3 (nm, arg1, arg2, arg3) \
169 register int _arg4 __asm__ ("r3") = _tmp4;
171 #define LOAD_ARGS_5(nm, arg1, arg2, arg3, arg4, arg5) \
172 int _tmp5 = (int) (arg5); \
173 LOAD_ARGS_4 (nm, arg1, arg2, arg3, arg4) \
174 register int _arg5 __asm__ ("r4") = _tmp5;
176 #define LOAD_ARGS_6(nm, arg1, arg2, arg3, arg4, arg5, arg6) \
177 int _tmp6 = (int) (arg6); \
178 LOAD_ARGS_5 (nm, arg1, arg2, arg3, arg4, arg5) \
179 register int _arg6 __asm__ ("r5") = _tmp6;
181 #define LOAD_ARGS_7(nm, arg1, arg2, arg3, arg4, arg5, arg6, arg7)\
182 int _tmp7 = (int) (arg7); \
183 LOAD_ARGS_6 (nm, arg1, arg2, arg3, arg4, arg5, arg6) \
184 register int _arg7 __asm__ ("r6") = _tmp7;
188 #define ARC_TRAP_INSN trap0
190 #endif /* __ASSEMBLER__ */
192 #endif /* _BITS_SYSCALLS_H */