1 /* Copyright (c) 2005-2008, Google Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Author: Markus Gutschke
34 /* This file includes Linux-specific support functions common to the
35 * coredumper and the thread lister; primarily, this is a collection
36 * of direct system calls, and a couple of symbols missing from
37 * standard header files.
38 * There are a few options that the including file can set to control
39 * the behavior of this file:
42 * The entire header file will normally be wrapped in 'extern "C" { }",
43 * making it suitable for compilation as both C and C++ source. If you
44 * do not want to do this, you can set the SYS_CPLUSPLUS macro to inhibit
45 * the wrapping. N.B. doing so will suppress inclusion of all prerequisite
46 * system header files, too. It is the caller's responsibility to provide
47 * the necessary definitions.
50 * All system calls will update "errno" unless overriden by setting the
51 * SYS_ERRNO macro prior to including this file. SYS_ERRNO should be
55 * New symbols will be defined "static inline", unless overridden by
56 * the SYS_INLINE macro.
58 * SYS_LINUX_SYSCALL_SUPPORT_H
59 * This macro is used to avoid multiple inclusions of this header file.
60 * If you need to include this file more than once, make sure to
61 * unset SYS_LINUX_SYSCALL_SUPPORT_H before each inclusion.
64 * New system calls will have a prefix of "sys_" unless overridden by
65 * the SYS_PREFIX macro. Valid values for this macro are [0..9] which
66 * results in prefixes "sys[0..9]_". It is also possible to set this
67 * macro to -1, which avoids all prefixes.
69 * This file defines a few internal symbols that all start with "LSS_".
70 * Do not access these symbols from outside this file. They are not part
71 * of the supported API.
73 * NOTE: This is a stripped down version of the official opensource
74 * version of linux_syscall_support.h, which lives at
75 * http://code.google.com/p/linux-syscall-support/
76 * It includes only the syscalls that are used in perftools, plus a
77 * few extra. Here's the breakdown:
78 * 1) Perftools uses these: grep -rho 'sys_[a-z0-9_A-Z]* *(' src | sort -u
109 * 2) These are used as subroutines of the above:
110 * sys_getpid -- gettid
111 * sys_kill -- ptrace_detach
112 * sys_restore -- sigaction
113 * sys_restore_rt -- sigaction
114 * sys_socketcall -- socket
115 * sys_wait4 -- waitpid
116 * 3) I left these in even though they're not used. They either
117 * complement the above (write vs read) or are variants (rt_sigaction):
130 #ifndef SYS_LINUX_SYSCALL_SUPPORT_H
131 #define SYS_LINUX_SYSCALL_SUPPORT_H
133 /* We currently only support x86-32, x86-64, ARM, MIPS, and PPC on Linux.
134 * Porting to other related platforms should not be difficult.
136 #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
137 defined(__mips__) || defined(__PPC__)) && defined(__linux)
139 #ifndef SYS_CPLUSPLUS
141 /* Some system header files in older versions of gcc neglect to properly
142 * handle being included from C++. As it appears to be harmless to have
143 * multiple nested 'extern "C"' blocks, just add another one here.
152 #include <sys/ptrace.h>
153 #include <sys/resource.h>
154 #include <sys/time.h>
155 #include <sys/types.h>
158 #include <linux/unistd.h>
162 /* Include definitions of the ABI currently in use. */
168 /* As glibc often provides subtly incompatible data structures (and implicit
169 * wrapper functions that convert them), we provide our own kernel data
170 * structures for use by the system calls.
171 * These structures have been developed by using Linux 2.6.23 headers for
172 * reference. Note though, we do not care about exact API compatibility
173 * with the kernel, and in fact the kernel often does not have a single
174 * API that works across architectures. Instead, we try to mimic the glibc
175 * API where reasonable, and only guarantee ABI compatibility with the
177 * Most notably, here are a few changes that were made to the structures
178 * defined by kernel headers:
180 * - we only define structures, but not symbolic names for kernel data
181 * types. For the latter, we directly use the native C datatype
182 * (i.e. "unsigned" instead of "mode_t").
183 * - in a few cases, it is possible to define identical structures for
184 * both 32bit (e.g. i386) and 64bit (e.g. x86-64) platforms by
185 * standardizing on the 64bit version of the data types. In particular,
186 * this means that we use "unsigned" where the 32bit headers say
188 * - overall, we try to minimize the number of cases where we need to
189 * conditionally define different structures.
190 * - the "struct kernel_sigaction" class of structures have been
191 * modified to more closely mimic glibc's API by introducing an
192 * anonymous union for the function pointer.
193 * - a small number of field names had to have an underscore appended to
194 * them, because glibc defines a global macro by the same name.
197 /* include/linux/dirent.h */
198 struct kernel_dirent64 {
199 unsigned long long d_ino;
201 unsigned short d_reclen;
202 unsigned char d_type;
206 /* include/linux/dirent.h */
207 struct kernel_dirent {
210 unsigned short d_reclen;
214 /* include/linux/time.h */
215 struct kernel_timespec {
220 /* include/linux/time.h */
221 struct kernel_timeval {
226 /* include/linux/resource.h */
227 struct kernel_rusage {
228 struct kernel_timeval ru_utime;
229 struct kernel_timeval ru_stime;
246 #if defined(__i386__) || defined(__arm__) || defined(__PPC__)
248 /* include/asm-{arm,i386,mips,ppc}/signal.h */
249 struct kernel_old_sigaction {
251 void (*sa_handler_)(int);
252 void (*sa_sigaction_)(int, siginfo_t *, void *);
254 unsigned long sa_mask;
255 unsigned long sa_flags;
256 void (*sa_restorer)(void);
257 } __attribute__((packed,aligned(4)));
258 #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
259 #define kernel_old_sigaction kernel_sigaction
262 /* Some kernel functions (e.g. sigaction() in 2.6.23) require that the
263 * exactly match the size of the signal set, even though the API was
264 * intended to be extensible. We define our own KERNEL_NSIG to deal with
266 * Please note that glibc provides signals [1.._NSIG-1], whereas the
267 * kernel (and this header) provides the range [1..KERNEL_NSIG]. The
268 * actual number of signals is obviously the same, but the constants
272 #define KERNEL_NSIG 128
274 #define KERNEL_NSIG 64
277 /* include/asm-{arm,i386,mips,x86_64}/signal.h */
278 struct kernel_sigset_t {
279 unsigned long sig[(KERNEL_NSIG + 8*sizeof(unsigned long) - 1)/
280 (8*sizeof(unsigned long))];
283 /* include/asm-{arm,i386,mips,x86_64,ppc}/signal.h */
284 struct kernel_sigaction {
286 unsigned long sa_flags;
288 void (*sa_handler_)(int);
289 void (*sa_sigaction_)(int, siginfo_t *, void *);
291 struct kernel_sigset_t sa_mask;
294 void (*sa_handler_)(int);
295 void (*sa_sigaction_)(int, siginfo_t *, void *);
297 unsigned long sa_flags;
298 void (*sa_restorer)(void);
299 struct kernel_sigset_t sa_mask;
303 /* include/asm-{arm,i386,mips,ppc}/stat.h */
305 #if _MIPS_SIM == _MIPS_SIM_ABI64
308 struct kernel_stat64 {
312 unsigned long long st_ino;
321 unsigned st_atime_nsec_;
323 unsigned st_mtime_nsec_;
325 unsigned st_ctime_nsec_;
328 unsigned long long st_blocks;
330 #elif defined __PPC__
331 struct kernel_stat64 {
332 unsigned long long st_dev;
333 unsigned long long st_ino;
338 unsigned long long st_rdev;
339 unsigned short int __pad2;
344 unsigned long st_atime_nsec_;
346 unsigned long st_mtime_nsec_;
348 unsigned long st_ctime_nsec_;
349 unsigned long __unused4;
350 unsigned long __unused5;
353 struct kernel_stat64 {
354 unsigned long long st_dev;
355 unsigned char __pad0[4];
361 unsigned long long st_rdev;
362 unsigned char __pad3[4];
365 unsigned long long st_blocks;
367 unsigned st_atime_nsec_;
369 unsigned st_mtime_nsec_;
371 unsigned st_ctime_nsec_;
372 unsigned long long st_ino;
376 /* include/asm-{arm,i386,mips,x86_64,ppc}/stat.h */
377 #if defined(__i386__) || defined(__arm__)
379 /* The kernel headers suggest that st_dev and st_rdev should be 32bit
380 * quantities encoding 12bit major and 20bit minor numbers in an interleaved
381 * format. In reality, we do not see useful data in the top bits. So,
382 * we'll leave the padding in here, until we find a better solution.
384 unsigned short st_dev;
387 unsigned short st_mode;
388 unsigned short st_nlink;
389 unsigned short st_uid;
390 unsigned short st_gid;
391 unsigned short st_rdev;
397 unsigned st_atime_nsec_;
399 unsigned st_mtime_nsec_;
401 unsigned st_ctime_nsec_;
405 #elif defined(__x86_64__)
407 unsigned long st_dev;
408 unsigned long st_ino;
409 unsigned long st_nlink;
414 unsigned long st_rdev;
418 unsigned long st_atime_;
419 unsigned long st_atime_nsec_;
420 unsigned long st_mtime_;
421 unsigned long st_mtime_nsec_;
422 unsigned long st_ctime_;
423 unsigned long st_ctime_nsec_;
426 #elif defined(__PPC__)
429 unsigned long st_ino; // ino_t
430 unsigned long st_mode; // mode_t
431 unsigned short st_nlink; // nlink_t
432 unsigned st_uid; // uid_t
433 unsigned st_gid; // gid_t
435 long st_size; // off_t
436 unsigned long st_blksize;
437 unsigned long st_blocks;
438 unsigned long st_atime_;
439 unsigned long st_atime_nsec_;
440 unsigned long st_mtime_;
441 unsigned long st_mtime_nsec_;
442 unsigned long st_ctime_;
443 unsigned long st_ctime_nsec_;
444 unsigned long __unused4;
445 unsigned long __unused5;
447 #elif (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64)
473 /* Definitions missing from the standard header files */
476 #define O_DIRECTORY 0040000
478 #define O_DIRECTORY 0200000
481 #ifndef PR_GET_DUMPABLE
482 #define PR_GET_DUMPABLE 3
484 #ifndef PR_SET_DUMPABLE
485 #define PR_SET_DUMPABLE 4
488 #define AT_FDCWD (-100)
490 #ifndef AT_SYMLINK_NOFOLLOW
491 #define AT_SYMLINK_NOFOLLOW 0x100
494 #define AT_REMOVEDIR 0x200
497 #define MREMAP_FIXED 2
500 #define SA_RESTORER 0x04000000
503 #if defined(__i386__)
504 #ifndef __NR_rt_sigaction
505 #define __NR_rt_sigaction 174
506 #define __NR_rt_sigprocmask 175
509 #define __NR_stat64 195
512 #define __NR_fstat64 197
514 #ifndef __NR_getdents64
515 #define __NR_getdents64 220
518 #define __NR_gettid 224
521 #define __NR_futex 240
524 #define __NR_openat 295
527 #define __NR_getcpu 318
529 /* End of i386 definitions */
530 #elif defined(__arm__)
532 #if defined(__thumb__) || defined(__ARM_EABI__)
533 #define __SYS_REG(name) register long __sysreg __asm__("r6") = __NR_##name;
534 #define __SYS_REG_LIST(regs...) [sysreg] "r" (__sysreg) , ##regs
535 #define __syscall(name) "swi\t0"
536 #define __syscall_safe(name) \
538 "mov r7,%[sysreg]\n" \
539 __syscall(name)"\n" \
542 #define __SYS_REG(name)
543 #define __SYS_REG_LIST(regs...) regs
544 #define __syscall(name) "swi\t" __sys1(__NR_##name) ""
545 #define __syscall_safe(name) __syscall(name)
548 #ifndef __NR_rt_sigaction
549 #define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174)
550 #define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175)
553 #define __NR_stat64 (__NR_SYSCALL_BASE + 195)
556 #define __NR_fstat64 (__NR_SYSCALL_BASE + 197)
558 #ifndef __NR_getdents64
559 #define __NR_getdents64 (__NR_SYSCALL_BASE + 217)
562 #define __NR_gettid (__NR_SYSCALL_BASE + 224)
565 #define __NR_futex (__NR_SYSCALL_BASE + 240)
567 /* End of ARM definitions */
568 #elif defined(__x86_64__)
570 #define __NR_gettid 186
573 #define __NR_futex 202
575 #ifndef __NR_getdents64
576 #define __NR_getdents64 217
579 #define __NR_openat 257
581 /* End of x86-64 definitions */
582 #elif defined(__mips__)
583 #if _MIPS_SIM == _MIPS_SIM_ABI32
584 #ifndef __NR_rt_sigaction
585 #define __NR_rt_sigaction (__NR_Linux + 194)
586 #define __NR_rt_sigprocmask (__NR_Linux + 195)
589 #define __NR_stat64 (__NR_Linux + 213)
592 #define __NR_fstat64 (__NR_Linux + 215)
594 #ifndef __NR_getdents64
595 #define __NR_getdents64 (__NR_Linux + 219)
598 #define __NR_gettid (__NR_Linux + 222)
601 #define __NR_futex (__NR_Linux + 238)
604 #define __NR_openat (__NR_Linux + 288)
607 #define __NR_fstatat (__NR_Linux + 293)
610 #define __NR_getcpu (__NR_Linux + 312)
612 /* End of MIPS (old 32bit API) definitions */
613 #elif _MIPS_SIM == _MIPS_SIM_ABI64
615 #define __NR_gettid (__NR_Linux + 178)
618 #define __NR_futex (__NR_Linux + 194)
621 #define __NR_openat (__NR_Linux + 247)
624 #define __NR_fstatat (__NR_Linux + 252)
627 #define __NR_getcpu (__NR_Linux + 271)
629 /* End of MIPS (64bit API) definitions */
632 #define __NR_gettid (__NR_Linux + 178)
635 #define __NR_futex (__NR_Linux + 194)
638 #define __NR_openat (__NR_Linux + 251)
641 #define __NR_fstatat (__NR_Linux + 256)
644 #define __NR_getcpu (__NR_Linux + 275)
646 /* End of MIPS (new 32bit API) definitions */
648 /* End of MIPS definitions */
649 #elif defined(__PPC__)
650 #ifndef __NR_rt_sigaction
651 #define __NR_rt_sigaction 173
652 #define __NR_rt_sigprocmask 174
655 #define __NR_stat64 195
658 #define __NR_fstat64 197
660 #ifndef __NR_getdents64
661 #define __NR_getdents64 202
664 #define __NR_gettid 207
667 #define __NR_futex 221
670 #define __NR_openat 286
673 #define __NR_getcpu 302
675 /* End of powerpc defininitions */
679 /* After forking, we must make sure to only call system calls. */
680 #if __BOUNDED_POINTERS__
681 #error "Need to port invocations of syscalls for bounded ptrs"
683 /* The core dumper and the thread lister get executed after threads
684 * have been suspended. As a consequence, we cannot call any functions
685 * that acquire locks. Unfortunately, libc wraps most system calls
686 * (e.g. in order to implement pthread_atfork, and to make calls
687 * cancellable), which means we cannot call these functions. Instead,
688 * we have to call syscall() directly.
692 /* Allow the including file to override the location of errno. This can
693 * be useful when using clone() with the CLONE_VM option.
695 #define LSS_ERRNO SYS_ERRNO
697 #define LSS_ERRNO errno
702 #define LSS_INLINE SYS_INLINE
704 #define LSS_INLINE static inline
707 /* Allow the including file to override the prefix used for all new
708 * system calls. By default, it will be set to "sys_".
712 #define LSS_NAME(name) sys_##name
714 #define LSS_NAME(name) name
715 #elif SYS_PREFIX == 0
716 #define LSS_NAME(name) sys0_##name
717 #elif SYS_PREFIX == 1
718 #define LSS_NAME(name) sys1_##name
719 #elif SYS_PREFIX == 2
720 #define LSS_NAME(name) sys2_##name
721 #elif SYS_PREFIX == 3
722 #define LSS_NAME(name) sys3_##name
723 #elif SYS_PREFIX == 4
724 #define LSS_NAME(name) sys4_##name
725 #elif SYS_PREFIX == 5
726 #define LSS_NAME(name) sys5_##name
727 #elif SYS_PREFIX == 6
728 #define LSS_NAME(name) sys6_##name
729 #elif SYS_PREFIX == 7
730 #define LSS_NAME(name) sys7_##name
731 #elif SYS_PREFIX == 8
732 #define LSS_NAME(name) sys8_##name
733 #elif SYS_PREFIX == 9
734 #define LSS_NAME(name) sys9_##name
738 #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__))
739 /* Failing system calls return a negative result in the range of
740 * -1..-4095. These are "errno" values with the sign inverted.
742 #define LSS_RETURN(type, res) \
744 if ((unsigned long)(res) >= (unsigned long)(-4095)) { \
745 LSS_ERRNO = -(res); \
748 return (type) (res); \
750 #elif defined(__mips__)
751 /* On MIPS, failing system calls return -1, and set errno in a
752 * separate CPU register.
754 #define LSS_RETURN(type, res, err) \
760 return (type) (res); \
762 #elif defined(__PPC__)
763 /* On PPC, failing system calls return -1, and set errno in a
764 * separate CPU register. See linux/unistd.h.
766 #define LSS_RETURN(type, res, err) \
768 if (err & 0x10000000 ) { \
772 return (type) (res); \
775 #if defined(__i386__)
776 #if defined(NO_FRAME_POINTER) && (100 * __GNUC__ + __GNUC_MINOR__ >= 404)
777 /* This only works for GCC-4.4 and above -- the first version to use
778 .cfi directives for dwarf unwind info. */
779 #define CFI_ADJUST_CFA_OFFSET(adjust) \
780 ".cfi_adjust_cfa_offset " #adjust "\n"
782 #define CFI_ADJUST_CFA_OFFSET(adjust) /**/
785 /* In PIC mode (e.g. when building shared libraries), gcc for i386
786 * reserves ebx. Unfortunately, most distribution ship with implementations
787 * of _syscallX() which clobber ebx.
788 * Also, most definitions of _syscallX() neglect to mark "memory" as being
789 * clobbered. This causes problems with compilers, that do a better job
790 * at optimizing across __asm__ calls.
791 * So, we just have to redefine all of the _syscallX() macros.
794 #define LSS_BODY(type,args...) \
796 __asm__ __volatile__("push %%ebx\n" \
797 CFI_ADJUST_CFA_OFFSET(4) \
801 CFI_ADJUST_CFA_OFFSET(-4) \
803 : "esp", "memory"); \
804 LSS_RETURN(type,__res)
806 #define _syscall0(type,name) \
807 type LSS_NAME(name)(void) { \
809 __asm__ volatile("int $0x80" \
811 : "0" (__NR_##name) \
813 LSS_RETURN(type,__res); \
816 #define _syscall1(type,name,type1,arg1) \
817 type LSS_NAME(name)(type1 arg1) { \
820 : "0" (__NR_##name), "ri" ((long)(arg1))); \
823 #define _syscall2(type,name,type1,arg1,type2,arg2) \
824 type LSS_NAME(name)(type1 arg1,type2 arg2) { \
827 : "0" (__NR_##name),"ri" ((long)(arg1)), "c" ((long)(arg2))); \
830 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
831 type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) { \
834 : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \
835 "d" ((long)(arg3))); \
838 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
839 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
842 : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \
843 "d" ((long)(arg3)),"S" ((long)(arg4))); \
846 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
848 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
851 __asm__ __volatile__("push %%ebx\n" \
857 : "i" (__NR_##name), "ri" ((long)(arg1)), \
858 "c" ((long)(arg2)), "d" ((long)(arg3)), \
859 "S" ((long)(arg4)), "D" ((long)(arg5)) \
860 : "esp", "memory"); \
861 LSS_RETURN(type,__res); \
864 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
865 type5,arg5,type6,arg6) \
866 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
867 type5 arg5, type6 arg6) { \
869 struct { long __a1; long __a6; } __s = { (long)arg1, (long) arg6 }; \
870 __asm__ __volatile__("push %%ebp\n" \
872 "movl 4(%2),%%ebp\n" \
873 "movl 0(%2), %%ebx\n" \
879 : "i" (__NR_##name), "0" ((long)(&__s)), \
880 "c" ((long)(arg2)), "d" ((long)(arg3)), \
881 "S" ((long)(arg4)), "D" ((long)(arg5)) \
882 : "esp", "memory"); \
883 LSS_RETURN(type,__res); \
885 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
886 int flags, void *arg, int *parent_tidptr,
887 void *newtls, int *child_tidptr) {
889 __asm__ __volatile__(/* if (fn == NULL)
895 /* if (child_stack == NULL)
901 /* Set up alignment of the child stack:
902 * child_stack = (child_stack & ~0xF) - 20;
907 /* Push "arg" and "fn" onto the stack that will be
911 "movl %%eax,4(%%ecx)\n"
913 "movl %%eax,(%%ecx)\n"
915 /* %eax = syscall(%eax = __NR_clone,
917 * %ecx = child_stack,
918 * %edx = parent_tidptr,
920 * %edi = child_tidptr)
921 * Also, make sure that %ebx gets preserved as it is
933 /* In the parent: restore %ebx
934 * In the child: move "fn" into %ebx
944 /* In the child, now. Terminate frame pointer chain.
948 /* Call "fn". "arg" is already on the stack.
952 /* Call _exit(%ebx). Unfortunately older versions
953 * of gcc restrict the number of arguments that can
954 * be passed to asm(). So, we need to hard-code the
955 * system call number.
965 : "0"(-EINVAL), "i"(__NR_clone),
966 "m"(fn), "m"(child_stack), "m"(flags), "m"(arg),
967 "m"(parent_tidptr), "m"(newtls), "m"(child_tidptr)
968 : "esp", "memory", "ecx", "edx", "esi", "edi");
969 LSS_RETURN(int, __res);
972 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
973 /* On i386, the kernel does not know how to return from a signal
974 * handler. Instead, it relies on user space to provide a
975 * restorer function that calls the {rt_,}sigreturn() system call.
976 * Unfortunately, we cannot just reference the glibc version of this
977 * function, as glibc goes out of its way to make it inaccessible.
980 __asm__ __volatile__("call 2f\n"
987 : "i" (__NR_rt_sigreturn));
990 LSS_INLINE void (*LSS_NAME(restore)(void))(void) {
991 /* On i386, the kernel does not know how to return from a signal
992 * handler. Instead, it relies on user space to provide a
993 * restorer function that calls the {rt_,}sigreturn() system call.
994 * Unfortunately, we cannot just reference the glibc version of this
995 * function, as glibc goes out of its way to make it inaccessible.
998 __asm__ __volatile__("call 2f\n"
1004 "addl $(1b-0b),%0\n"
1006 : "i" (__NR_sigreturn));
1009 #elif defined(__x86_64__)
1010 /* There are no known problems with any of the _syscallX() macros
1011 * currently shipping for x86_64, but we still need to be able to define
1012 * our own version so that we can override the location of the errno
1013 * location (e.g. when using the clone() system call with the CLONE_VM
1017 #define LSS_BODY(type,name, ...) \
1019 __asm__ __volatile__("syscall" : "=a" (__res) : "0" (__NR_##name), \
1020 ##__VA_ARGS__ : "r11", "rcx", "memory"); \
1021 LSS_RETURN(type, __res)
1023 #define _syscall0(type,name) \
1024 type LSS_NAME(name)() { \
1025 LSS_BODY(type, name); \
1028 #define _syscall1(type,name,type1,arg1) \
1029 type LSS_NAME(name)(type1 arg1) { \
1030 LSS_BODY(type, name, "D" ((long)(arg1))); \
1033 #define _syscall2(type,name,type1,arg1,type2,arg2) \
1034 type LSS_NAME(name)(type1 arg1, type2 arg2) { \
1035 LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2))); \
1038 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
1039 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
1040 LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2)), \
1041 "d" ((long)(arg3))); \
1044 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
1045 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
1047 __asm__ __volatile__("movq %5,%%r10; syscall" : \
1048 "=a" (__res) : "0" (__NR_##name), \
1049 "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
1050 "r" ((long)(arg4)) : "r10", "r11", "rcx", "memory"); \
1051 LSS_RETURN(type, __res); \
1054 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1056 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1059 __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; syscall" : \
1060 "=a" (__res) : "0" (__NR_##name), \
1061 "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
1062 "r" ((long)(arg4)), "r" ((long)(arg5)) : \
1063 "r8", "r10", "r11", "rcx", "memory"); \
1064 LSS_RETURN(type, __res); \
1067 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1068 type5,arg5,type6,arg6) \
1069 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1070 type5 arg5, type6 arg6) { \
1072 __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; movq %7,%%r9;" \
1074 "=a" (__res) : "0" (__NR_##name), \
1075 "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
1076 "r" ((long)(arg4)), "r" ((long)(arg5)), "r" ((long)(arg6)) : \
1077 "r8", "r9", "r10", "r11", "rcx", "memory"); \
1078 LSS_RETURN(type, __res); \
1080 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
1081 int flags, void *arg, int *parent_tidptr,
1082 void *newtls, int *child_tidptr) {
1085 __asm__ __volatile__(/* if (fn == NULL)
1091 /* if (child_stack == NULL)
1097 /* Set up alignment of the child stack:
1098 * child_stack = (child_stack & ~0xF) - 16;
1103 /* Push "arg" and "fn" onto the stack that will be
1104 * used by the child.
1109 /* %rax = syscall(%rax = __NR_clone,
1111 * %rsi = child_stack,
1112 * %rdx = parent_tidptr,
1114 * %r10 = child_tidptr)
1124 "testq %%rax,%%rax\n"
1127 /* In the child. Terminate frame pointer chain.
1129 "xorq %%rbp,%%rbp\n"
1137 /* Call _exit(%ebx).
1139 "movq %%rax,%%rdi\n"
1143 /* Return to parent.
1147 : "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit),
1148 "r"(fn), "S"(child_stack), "D"(flags), "r"(arg),
1149 "d"(parent_tidptr), "g"(newtls), "g"(child_tidptr)
1150 : "rsp", "memory", "r8", "r10", "r11", "rcx");
1152 LSS_RETURN(int, __res);
1155 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
1156 /* On x86-64, the kernel does not know how to return from
1157 * a signal handler. Instead, it relies on user space to provide a
1158 * restorer function that calls the rt_sigreturn() system call.
1159 * Unfortunately, we cannot just reference the glibc version of this
1160 * function, as glibc goes out of its way to make it inaccessible.
1163 __asm__ __volatile__("call 2f\n"
1168 "addq $(1b-0b),%0\n"
1170 : "i" (__NR_rt_sigreturn));
1173 #elif defined(__arm__)
1174 /* Most definitions of _syscallX() neglect to mark "memory" as being
1175 * clobbered. This causes problems with compilers, that do a better job
1176 * at optimizing across __asm__ calls.
1177 * So, we just have to redefine all fo the _syscallX() macros.
1180 #define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a
1182 /* r0..r3 are scratch registers and not preserved across function
1183 * calls. We need to first evaluate the first 4 syscall arguments
1184 * and store them on stack. They must be loaded into r0..r3 after
1185 * all function calls to avoid r0..r3 being clobbered.
1188 #define LSS_SAVE_ARG(r,a) long __tmp##r = (long)a
1190 #define LSS_LOAD_ARG(r) register long __r##r __asm__("r"#r) = __tmp##r
1193 #define LSS_BODY(type, name, args...) \
1194 register long __res_r0 __asm__("r0"); \
1197 __asm__ __volatile__ (__syscall_safe(name) \
1199 : __SYS_REG_LIST(args) \
1200 : "lr", "memory"); \
1202 LSS_RETURN(type, __res)
1204 #define _syscall0(type, name) \
1205 type LSS_NAME(name)() { \
1206 LSS_BODY(type, name); \
1209 #define _syscall1(type, name, type1, arg1) \
1210 type LSS_NAME(name)(type1 arg1) { \
1211 /* There is no need for using a volatile temp. */ \
1213 LSS_BODY(type, name, "r"(__r0)); \
1216 #define _syscall2(type, name, type1, arg1, type2, arg2) \
1217 type LSS_NAME(name)(type1 arg1, type2 arg2) { \
1218 LSS_SAVE_ARG(0, arg1); \
1219 LSS_SAVE_ARG(1, arg2); \
1222 LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \
1225 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
1226 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
1227 LSS_SAVE_ARG(0, arg1); \
1228 LSS_SAVE_ARG(1, arg2); \
1229 LSS_SAVE_ARG(2, arg3); \
1233 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \
1236 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
1238 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
1239 LSS_SAVE_ARG(0, arg1); \
1240 LSS_SAVE_ARG(1, arg2); \
1241 LSS_SAVE_ARG(2, arg3); \
1242 LSS_SAVE_ARG(3, arg4); \
1247 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \
1250 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1251 type4, arg4, type5, arg5) \
1252 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1254 LSS_SAVE_ARG(0, arg1); \
1255 LSS_SAVE_ARG(1, arg2); \
1256 LSS_SAVE_ARG(2, arg3); \
1257 LSS_SAVE_ARG(3, arg4); \
1263 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
1267 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1268 type4, arg4, type5, arg5, type6, arg6) \
1269 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1270 type5 arg5, type6 arg6) { \
1271 LSS_SAVE_ARG(0, arg1); \
1272 LSS_SAVE_ARG(1, arg2); \
1273 LSS_SAVE_ARG(2, arg3); \
1274 LSS_SAVE_ARG(3, arg4); \
1281 LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
1282 "r"(__r4), "r"(__r5)); \
1284 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
1285 int flags, void *arg, int *parent_tidptr,
1286 void *newtls, int *child_tidptr) {
1287 register long __res __asm__("r5");
1289 if (fn == NULL || child_stack == NULL) {
1294 /* stash first 4 arguments on stack first because we can only load
1295 * them after all function calls.
1297 int tmp_flags = flags;
1298 int * tmp_stack = (int*) child_stack;
1299 void * tmp_ptid = parent_tidptr;
1300 void * tmp_tls = newtls;
1302 register int *__ctid __asm__("r4") = child_tidptr;
1304 /* Push "arg" and "fn" onto the stack that will be
1305 * used by the child.
1307 *(--tmp_stack) = (int) arg;
1308 *(--tmp_stack) = (int) fn;
1310 /* We must load r0..r3 last after all possible function calls. */
1311 register int __flags __asm__("r0") = tmp_flags;
1312 register void *__stack __asm__("r1") = tmp_stack;
1313 register void *__ptid __asm__("r2") = tmp_ptid;
1314 register void *__tls __asm__("r3") = tmp_tls;
1316 /* %r0 = syscall(%r0 = flags,
1317 * %r1 = child_stack,
1318 * %r2 = parent_tidptr,
1320 * %r4 = child_tidptr)
1323 __asm__ __volatile__(/* %r0 = syscall(%r0 = flags,
1324 * %r1 = child_stack,
1325 * %r2 = parent_tidptr,
1327 * %r4 = child_tidptr)
1331 __syscall(clone)"\n"
1339 /* In the child, now. Call "fn(arg)".
1345 /* Call _exit(%r0), which never returns. We only
1346 * need to set r7 for EABI syscall ABI but we do
1347 * this always to simplify code sharing between
1348 * old and new syscall ABIs.
1353 /* Pop r7 from the stack only in the parent.
1358 "i"(__NR_exit), "r"(__stack), "r"(__flags),
1359 "r"(__ptid), "r"(__tls), "r"(__ctid)
1360 : "cc", "lr", "memory");
1363 LSS_RETURN(int, __res);
1365 #elif defined(__mips__)
1367 #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \
1370 #if _MIPS_SIM == _MIPS_SIM_ABI32
1371 // See http://sources.redhat.com/ml/libc-alpha/2004-10/msg00050.html
1372 // or http://www.linux-mips.org/archives/linux-mips/2004-10/msg00142.html
1373 #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12",\
1374 "$13", "$14", "$15", "$24", "$25", "memory"
1376 #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
1377 "$14", "$15", "$24", "$25", "memory"
1381 #define LSS_BODY(type,name,r7,...) \
1382 register unsigned long __v0 __asm__("$2") = __NR_##name; \
1383 __asm__ __volatile__ ("syscall\n" \
1384 : "=&r"(__v0), r7 (__r7) \
1385 : "0"(__v0), ##__VA_ARGS__ \
1386 : MIPS_SYSCALL_CLOBBERS); \
1387 LSS_RETURN(type, __v0, __r7)
1389 #define _syscall0(type, name) \
1390 type LSS_NAME(name)() { \
1391 register unsigned long __r7 __asm__("$7"); \
1392 LSS_BODY(type, name, "=r"); \
1395 #define _syscall1(type, name, type1, arg1) \
1396 type LSS_NAME(name)(type1 arg1) { \
1397 register unsigned long __r7 __asm__("$7"); \
1398 LSS_REG(4, arg1); LSS_BODY(type, name, "=r", "r"(__r4)); \
1401 #define _syscall2(type, name, type1, arg1, type2, arg2) \
1402 type LSS_NAME(name)(type1 arg1, type2 arg2) { \
1403 register unsigned long __r7 __asm__("$7"); \
1404 LSS_REG(4, arg1); LSS_REG(5, arg2); \
1405 LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5)); \
1408 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
1409 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
1410 register unsigned long __r7 __asm__("$7"); \
1411 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1412 LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5), "r"(__r6)); \
1415 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
1416 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
1417 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1419 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6)); \
1422 #if _MIPS_SIM == _MIPS_SIM_ABI32
1423 /* The old 32bit MIPS system call API passes the fifth and sixth argument
1424 * on the stack, whereas the new APIs use registers "r8" and "r9".
1426 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1428 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1430 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1432 register unsigned long __v0 __asm__("$2"); \
1433 __asm__ __volatile__ (".set noreorder\n" \
1436 "sw $2, 16($29)\n" \
1441 : "=&r"(__v0), "+r" (__r7) \
1442 : "i" (__NR_##name), "r"(__r4), "r"(__r5), \
1443 "r"(__r6), "m" ((unsigned long)arg5) \
1444 : MIPS_SYSCALL_CLOBBERS); \
1445 LSS_RETURN(type, __v0, __r7); \
1448 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1450 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1452 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1453 LSS_REG(7, arg4); LSS_REG(8, arg5); \
1454 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \
1459 #if _MIPS_SIM == _MIPS_SIM_ABI32
1460 /* The old 32bit MIPS system call API passes the fifth and sixth argument
1461 * on the stack, whereas the new APIs use registers "r8" and "r9".
1463 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1464 type5,arg5,type6,arg6) \
1465 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1466 type5 arg5, type6 arg6) { \
1467 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1469 register unsigned long __v0 __asm__("$2"); \
1470 __asm__ __volatile__ (".set noreorder\n" \
1474 "sw $2, 16($29)\n" \
1475 "sw $8, 20($29)\n" \
1480 : "=&r"(__v0), "+r" (__r7) \
1481 : "i" (__NR_##name), "r"(__r4), "r"(__r5), \
1482 "r"(__r6), "r" ((unsigned long)arg5), \
1483 "r" ((unsigned long)arg6) \
1484 : MIPS_SYSCALL_CLOBBERS); \
1485 LSS_RETURN(type, __v0, __r7); \
1488 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
1489 type5,arg5,type6,arg6) \
1490 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1491 type5 arg5,type6 arg6) { \
1492 LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
1493 LSS_REG(7, arg4); LSS_REG(8, arg5); LSS_REG(9, arg6); \
1494 LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \
1495 "r"(__r8), "r"(__r9)); \
1498 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
1499 int flags, void *arg, int *parent_tidptr,
1500 void *newtls, int *child_tidptr) {
1501 register unsigned long __v0 __asm__("$2");
1502 register unsigned long __r7 __asm__("$7") = (unsigned long)newtls;
1504 register int __flags __asm__("$4") = flags;
1505 register void *__stack __asm__("$5") = child_stack;
1506 register void *__ptid __asm__("$6") = parent_tidptr;
1507 register int *__ctid __asm__("$8") = child_tidptr;
1508 __asm__ __volatile__(
1509 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
1511 #elif _MIPS_SIM == _MIPS_SIM_NABI32
1517 /* if (fn == NULL || child_stack == NULL)
1524 /* Push "arg" and "fn" onto the stack that will be
1525 * used by the child.
1527 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
1531 #elif _MIPS_SIM == _MIPS_SIM_NABI32
1541 /* $7 = syscall($4 = flags,
1543 * $6 = parent_tidptr,
1545 * $8 = child_tidptr)
1556 /* In the child, now. Call "fn(arg)".
1558 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
1561 #elif _MIPS_SIM == _MIPS_SIM_NABI32
1577 #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
1579 #elif _MIPS_SIM == _MIPS_SIM_NABI32
1584 : "=&r" (__v0), "=r" (__r7)
1585 : "i"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit),
1586 "r"(fn), "r"(__stack), "r"(__flags), "r"(arg),
1587 "r"(__ptid), "r"(__r7), "r"(__ctid)
1588 : "$9", "$10", "$11", "$12", "$13", "$14", "$15",
1591 LSS_RETURN(int, __v0, __r7);
1593 #elif defined (__PPC__)
1594 #undef LSS_LOADARGS_0
1595 #define LSS_LOADARGS_0(name, dummy...) \
1596 __sc_0 = __NR_##name
1597 #undef LSS_LOADARGS_1
1598 #define LSS_LOADARGS_1(name, arg1) \
1599 LSS_LOADARGS_0(name); \
1600 __sc_3 = (unsigned long) (arg1)
1601 #undef LSS_LOADARGS_2
1602 #define LSS_LOADARGS_2(name, arg1, arg2) \
1603 LSS_LOADARGS_1(name, arg1); \
1604 __sc_4 = (unsigned long) (arg2)
1605 #undef LSS_LOADARGS_3
1606 #define LSS_LOADARGS_3(name, arg1, arg2, arg3) \
1607 LSS_LOADARGS_2(name, arg1, arg2); \
1608 __sc_5 = (unsigned long) (arg3)
1609 #undef LSS_LOADARGS_4
1610 #define LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4) \
1611 LSS_LOADARGS_3(name, arg1, arg2, arg3); \
1612 __sc_6 = (unsigned long) (arg4)
1613 #undef LSS_LOADARGS_5
1614 #define LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \
1615 LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4); \
1616 __sc_7 = (unsigned long) (arg5)
1617 #undef LSS_LOADARGS_6
1618 #define LSS_LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
1619 LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \
1620 __sc_8 = (unsigned long) (arg6)
1621 #undef LSS_ASMINPUT_0
1622 #define LSS_ASMINPUT_0 "0" (__sc_0)
1623 #undef LSS_ASMINPUT_1
1624 #define LSS_ASMINPUT_1 LSS_ASMINPUT_0, "1" (__sc_3)
1625 #undef LSS_ASMINPUT_2
1626 #define LSS_ASMINPUT_2 LSS_ASMINPUT_1, "2" (__sc_4)
1627 #undef LSS_ASMINPUT_3
1628 #define LSS_ASMINPUT_3 LSS_ASMINPUT_2, "3" (__sc_5)
1629 #undef LSS_ASMINPUT_4
1630 #define LSS_ASMINPUT_4 LSS_ASMINPUT_3, "4" (__sc_6)
1631 #undef LSS_ASMINPUT_5
1632 #define LSS_ASMINPUT_5 LSS_ASMINPUT_4, "5" (__sc_7)
1633 #undef LSS_ASMINPUT_6
1634 #define LSS_ASMINPUT_6 LSS_ASMINPUT_5, "6" (__sc_8)
1636 #define LSS_BODY(nr, type, name, args...) \
1637 long __sc_ret, __sc_err; \
1639 register unsigned long __sc_0 __asm__ ("r0"); \
1640 register unsigned long __sc_3 __asm__ ("r3"); \
1641 register unsigned long __sc_4 __asm__ ("r4"); \
1642 register unsigned long __sc_5 __asm__ ("r5"); \
1643 register unsigned long __sc_6 __asm__ ("r6"); \
1644 register unsigned long __sc_7 __asm__ ("r7"); \
1645 register unsigned long __sc_8 __asm__ ("r8"); \
1647 LSS_LOADARGS_##nr(name, args); \
1648 __asm__ __volatile__ \
1652 "=&r" (__sc_3), "=&r" (__sc_4), \
1653 "=&r" (__sc_5), "=&r" (__sc_6), \
1654 "=&r" (__sc_7), "=&r" (__sc_8) \
1655 : LSS_ASMINPUT_##nr \
1656 : "cr0", "ctr", "memory", \
1657 "r9", "r10", "r11", "r12"); \
1658 __sc_ret = __sc_3; \
1659 __sc_err = __sc_0; \
1661 LSS_RETURN(type, __sc_ret, __sc_err)
1663 #define _syscall0(type, name) \
1664 type LSS_NAME(name)(void) { \
1665 LSS_BODY(0, type, name); \
1668 #define _syscall1(type, name, type1, arg1) \
1669 type LSS_NAME(name)(type1 arg1) { \
1670 LSS_BODY(1, type, name, arg1); \
1673 #define _syscall2(type, name, type1, arg1, type2, arg2) \
1674 type LSS_NAME(name)(type1 arg1, type2 arg2) { \
1675 LSS_BODY(2, type, name, arg1, arg2); \
1678 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
1679 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
1680 LSS_BODY(3, type, name, arg1, arg2, arg3); \
1683 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
1685 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
1686 LSS_BODY(4, type, name, arg1, arg2, arg3, arg4); \
1689 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1690 type4, arg4, type5, arg5) \
1691 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1693 LSS_BODY(5, type, name, arg1, arg2, arg3, arg4, arg5); \
1696 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1697 type4, arg4, type5, arg5, type6, arg6) \
1698 type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1699 type5 arg5, type6 arg6) { \
1700 LSS_BODY(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
1702 /* clone function adapted from glibc 2.3.6 clone.S */
1703 /* TODO(csilvers): consider wrapping some args up in a struct, like we
1704 * do for i386's _syscall6, so we can compile successfully on gcc 2.95
1706 LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
1707 int flags, void *arg, int *parent_tidptr,
1708 void *newtls, int *child_tidptr) {
1711 register int (*__fn)(void *) __asm__ ("r8") = fn;
1712 register void *__cstack __asm__ ("r4") = child_stack;
1713 register int __flags __asm__ ("r3") = flags;
1714 register void * __arg __asm__ ("r9") = arg;
1715 register int * __ptidptr __asm__ ("r5") = parent_tidptr;
1716 register void * __newtls __asm__ ("r6") = newtls;
1717 register int * __ctidptr __asm__ ("r7") = child_tidptr;
1718 __asm__ __volatile__(
1719 /* check for fn == NULL
1720 * and child_stack == NULL
1722 "cmpwi cr0, %6, 0\n\t"
1723 "cmpwi cr1, %7, 0\n\t"
1724 "cror cr0*4+eq, cr1*4+eq, cr0*4+eq\n\t"
1727 /* set up stack frame for child */
1728 "clrrwi %7, %7, 4\n\t"
1730 "stwu 0, -16(%7)\n\t"
1732 /* fn, arg, child_stack are saved across the syscall: r28-30 */
1739 /* flags already in r3
1740 * child_stack already in r4
1741 * ptidptr already in r5
1742 * newtls already in r6
1743 * ctidptr already in r7
1747 /* Test if syscall was successful */
1748 "cmpwi cr1, 3, 0\n\t"
1749 "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
1752 /* Do the function call */
1757 /* Call _exit(r3) */
1761 /* Return to parent */
1765 : "=r" (__ret), "=r" (__err)
1766 : "0" (-1), "1" (EINVAL),
1767 "i" (__NR_clone), "i" (__NR_exit),
1768 "r" (__fn), "r" (__cstack), "r" (__flags),
1769 "r" (__arg), "r" (__ptidptr), "r" (__newtls),
1771 : "cr0", "cr1", "memory", "ctr",
1772 "r0", "r29", "r27", "r28");
1774 LSS_RETURN(int, __ret, __err);
1777 #define __NR__exit __NR_exit
1778 #define __NR__gettid __NR_gettid
1779 #define __NR__mremap __NR_mremap
1780 LSS_INLINE _syscall1(int, close, int, f)
1781 LSS_INLINE _syscall1(int, _exit, int, e)
1782 LSS_INLINE _syscall3(int, fcntl, int, f,
1784 LSS_INLINE _syscall2(int, fstat, int, f,
1785 struct kernel_stat*, b)
1786 LSS_INLINE _syscall4(int, futex, int*, a,
1788 struct kernel_timespec*, t)
1789 LSS_INLINE _syscall3(int, getdents, int, f,
1790 struct kernel_dirent*, d, int, c)
1791 #ifdef __NR_getdents64
1792 LSS_INLINE _syscall3(int, getdents64, int, f,
1793 struct kernel_dirent64*, d, int, c)
1795 LSS_INLINE _syscall0(pid_t, getpid)
1796 LSS_INLINE _syscall0(pid_t, getppid)
1797 LSS_INLINE _syscall0(pid_t, _gettid)
1798 LSS_INLINE _syscall2(int, kill, pid_t, p,
1800 LSS_INLINE _syscall3(off_t, lseek, int, f,
1802 LSS_INLINE _syscall2(int, munmap, void*, s,
1804 LSS_INLINE _syscall5(void*, _mremap, void*, o,
1805 size_t, os, size_t, ns,
1806 unsigned long, f, void *, a)
1807 LSS_INLINE _syscall3(int, open, const char*, p,
1809 LSS_INLINE _syscall2(int, prctl, int, o,
1811 LSS_INLINE _syscall4(long, ptrace, int, r,
1812 pid_t, p, void *, a, void *, d)
1813 LSS_INLINE _syscall3(ssize_t, read, int, f,
1814 void *, b, size_t, c)
1815 LSS_INLINE _syscall4(int, rt_sigaction, int, s,
1816 const struct kernel_sigaction*, a,
1817 struct kernel_sigaction*, o, size_t, c)
1818 LSS_INLINE _syscall4(int, rt_sigprocmask, int, h,
1819 const struct kernel_sigset_t*, s,
1820 struct kernel_sigset_t*, o, size_t, c);
1821 LSS_INLINE _syscall0(int, sched_yield)
1822 LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s,
1824 LSS_INLINE _syscall2(int, stat, const char*, f,
1825 struct kernel_stat*, b)
1826 LSS_INLINE _syscall3(ssize_t, write, int, f,
1827 const void *, b, size_t, c)
1828 #if defined(__NR_getcpu)
1829 LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu,
1830 unsigned *, node, void *, unused);
1832 #if defined(__x86_64__) || \
1833 (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
1834 LSS_INLINE _syscall3(int, socket, int, d,
1837 #if defined(__x86_64__)
1838 LSS_INLINE _syscall6(void*, mmap, void*, s,
1843 LSS_INLINE int LSS_NAME(sigaction)(int signum,
1844 const struct kernel_sigaction *act,
1845 struct kernel_sigaction *oldact) {
1846 /* On x86_64, the kernel requires us to always set our own
1847 * SA_RESTORER in order to be able to return from a signal handler.
1848 * This function must have a "magic" signature that the "gdb"
1849 * (and maybe the kernel?) can recognize.
1851 if (act != NULL && !(act->sa_flags & SA_RESTORER)) {
1852 struct kernel_sigaction a = *act;
1853 a.sa_flags |= SA_RESTORER;
1854 a.sa_restorer = LSS_NAME(restore_rt)();
1855 return LSS_NAME(rt_sigaction)(signum, &a, oldact,
1858 return LSS_NAME(rt_sigaction)(signum, act, oldact,
1863 LSS_INLINE int LSS_NAME(sigprocmask)(int how,
1864 const struct kernel_sigset_t *set,
1865 struct kernel_sigset_t *oldset) {
1866 return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
1869 #if defined(__x86_64__) || \
1870 defined(__arm__) || \
1871 (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
1872 LSS_INLINE _syscall4(pid_t, wait4, pid_t, p,
1874 struct kernel_rusage*, r)
1875 LSS_INLINE pid_t LSS_NAME(waitpid)(pid_t pid, int *status, int options){
1876 return LSS_NAME(wait4)(pid, status, options, 0);
1879 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1880 LSS_INLINE _syscall4(int, openat, int, d, const char *, p, int, f, int, m)
1882 LSS_INLINE int LSS_NAME(sigemptyset)(struct kernel_sigset_t *set) {
1883 memset(&set->sig, 0, sizeof(set->sig));
1887 LSS_INLINE int LSS_NAME(sigfillset)(struct kernel_sigset_t *set) {
1888 memset(&set->sig, -1, sizeof(set->sig));
1892 LSS_INLINE int LSS_NAME(sigaddset)(struct kernel_sigset_t *set,
1894 if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
1898 set->sig[(signum - 1)/(8*sizeof(set->sig[0]))]
1899 |= 1UL << ((signum - 1) % (8*sizeof(set->sig[0])));
1904 LSS_INLINE int LSS_NAME(sigdelset)(struct kernel_sigset_t *set,
1906 if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
1910 set->sig[(signum - 1)/(8*sizeof(set->sig[0]))]
1911 &= ~(1UL << ((signum - 1) % (8*sizeof(set->sig[0]))));
1916 #if defined(__i386__) || \
1917 defined(__arm__) || \
1918 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || defined(__PPC__)
1919 #define __NR__sigaction __NR_sigaction
1920 #define __NR__sigprocmask __NR_sigprocmask
1921 LSS_INLINE _syscall2(int, fstat64, int, f,
1922 struct kernel_stat64 *, b)
1923 LSS_INLINE _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
1924 loff_t *, res, uint, wh)
1926 LSS_INLINE _syscall6(void*, mmap, void*, s,
1931 #ifndef __ARM_EABI__
1932 /* Not available on ARM EABI Linux. */
1933 LSS_INLINE _syscall1(void*, mmap, void*, a)
1935 LSS_INLINE _syscall6(void*, mmap2, void*, s,
1940 LSS_INLINE _syscall3(int, _sigaction, int, s,
1941 const struct kernel_old_sigaction*, a,
1942 struct kernel_old_sigaction*, o)
1943 LSS_INLINE _syscall3(int, _sigprocmask, int, h,
1944 const unsigned long*, s,
1946 LSS_INLINE _syscall2(int, stat64, const char *, p,
1947 struct kernel_stat64 *, b)
1949 LSS_INLINE int LSS_NAME(sigaction)(int signum,
1950 const struct kernel_sigaction *act,
1951 struct kernel_sigaction *oldact) {
1952 int old_errno = LSS_ERRNO;
1954 struct kernel_sigaction a;
1958 /* On i386, the kernel requires us to always set our own
1959 * SA_RESTORER when using realtime signals. Otherwise, it does not
1960 * know how to return from a signal handler. This function must have
1961 * a "magic" signature that the "gdb" (and maybe the kernel?) can
1963 * Apparently, a SA_RESTORER is implicitly set by the kernel, when
1964 * using non-realtime signals.
1966 * TODO: Test whether ARM needs a restorer
1968 if (!(a.sa_flags & SA_RESTORER)) {
1969 a.sa_flags |= SA_RESTORER;
1970 a.sa_restorer = (a.sa_flags & SA_SIGINFO)
1971 ? LSS_NAME(restore_rt)() : LSS_NAME(restore)();
1975 rc = LSS_NAME(rt_sigaction)(signum, act ? &a : act, oldact,
1977 if (rc < 0 && LSS_ERRNO == ENOSYS) {
1978 struct kernel_old_sigaction oa, ooa, *ptr_a = &oa, *ptr_oa = &ooa;
1982 oa.sa_handler_ = act->sa_handler_;
1983 memcpy(&oa.sa_mask, &act->sa_mask, sizeof(oa.sa_mask));
1985 oa.sa_restorer = act->sa_restorer;
1987 oa.sa_flags = act->sa_flags;
1992 LSS_ERRNO = old_errno;
1993 rc = LSS_NAME(_sigaction)(signum, ptr_a, ptr_oa);
1994 if (rc == 0 && oldact) {
1996 memcpy(oldact, act, sizeof(*act));
1998 memset(oldact, 0, sizeof(*oldact));
2000 oldact->sa_handler_ = ptr_oa->sa_handler_;
2001 oldact->sa_flags = ptr_oa->sa_flags;
2002 memcpy(&oldact->sa_mask, &ptr_oa->sa_mask, sizeof(ptr_oa->sa_mask));
2004 oldact->sa_restorer = ptr_oa->sa_restorer;
2011 LSS_INLINE int LSS_NAME(sigprocmask)(int how,
2012 const struct kernel_sigset_t *set,
2013 struct kernel_sigset_t *oldset) {
2014 int olderrno = LSS_ERRNO;
2015 int rc = LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
2016 if (rc < 0 && LSS_ERRNO == ENOSYS) {
2017 LSS_ERRNO = olderrno;
2019 LSS_NAME(sigemptyset)(oldset);
2021 rc = LSS_NAME(_sigprocmask)(how,
2022 set ? &set->sig[0] : NULL,
2023 oldset ? &oldset->sig[0] : NULL);
2028 #if defined(__PPC__)
2029 #undef LSS_SC_LOADARGS_0
2030 #define LSS_SC_LOADARGS_0(dummy...)
2031 #undef LSS_SC_LOADARGS_1
2032 #define LSS_SC_LOADARGS_1(arg1) \
2033 __sc_4 = (unsigned long) (arg1)
2034 #undef LSS_SC_LOADARGS_2
2035 #define LSS_SC_LOADARGS_2(arg1, arg2) \
2036 LSS_SC_LOADARGS_1(arg1); \
2037 __sc_5 = (unsigned long) (arg2)
2038 #undef LSS_SC_LOADARGS_3
2039 #define LSS_SC_LOADARGS_3(arg1, arg2, arg3) \
2040 LSS_SC_LOADARGS_2(arg1, arg2); \
2041 __sc_6 = (unsigned long) (arg3)
2042 #undef LSS_SC_LOADARGS_4
2043 #define LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4) \
2044 LSS_SC_LOADARGS_3(arg1, arg2, arg3); \
2045 __sc_7 = (unsigned long) (arg4)
2046 #undef LSS_SC_LOADARGS_5
2047 #define LSS_SC_LOADARGS_5(arg1, arg2, arg3, arg4, arg5) \
2048 LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4); \
2049 __sc_8 = (unsigned long) (arg5)
2051 #define LSS_SC_BODY(nr, type, opt, args...) \
2052 long __sc_ret, __sc_err; \
2054 register unsigned long __sc_0 __asm__ ("r0") = __NR_socketcall; \
2055 register unsigned long __sc_3 __asm__ ("r3") = opt; \
2056 register unsigned long __sc_4 __asm__ ("r4"); \
2057 register unsigned long __sc_5 __asm__ ("r5"); \
2058 register unsigned long __sc_6 __asm__ ("r6"); \
2059 register unsigned long __sc_7 __asm__ ("r7"); \
2060 register unsigned long __sc_8 __asm__ ("r8"); \
2061 LSS_SC_LOADARGS_##nr(args); \
2062 __asm__ __volatile__ \
2063 ("stwu 1, -48(1)\n\t" \
2064 "stw 4, 20(1)\n\t" \
2065 "stw 5, 24(1)\n\t" \
2066 "stw 6, 28(1)\n\t" \
2067 "stw 7, 32(1)\n\t" \
2068 "stw 8, 36(1)\n\t" \
2069 "addi 4, 1, 20\n\t" \
2073 "=&r" (__sc_3), "=&r" (__sc_4), \
2074 "=&r" (__sc_5), "=&r" (__sc_6), \
2075 "=&r" (__sc_7), "=&r" (__sc_8) \
2076 : LSS_ASMINPUT_##nr \
2077 : "cr0", "ctr", "memory"); \
2078 __sc_ret = __sc_3; \
2079 __sc_err = __sc_0; \
2081 LSS_RETURN(type, __sc_ret, __sc_err)
2083 LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
2084 LSS_SC_BODY(3, int, 1, domain, type, protocol);
2087 #if defined(__i386__) || \
2088 (defined(__arm__) && !defined(__ARM_EABI__)) || \
2089 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
2091 /* See sys_socketcall in net/socket.c in kernel source.
2092 * It de-multiplexes on its first arg and unpacks the arglist
2093 * array in its second arg.
2095 LSS_INLINE _syscall2(long, socketcall, int, c, unsigned long*, a)
2097 LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
2098 unsigned long args[3] = {
2099 (unsigned long) domain,
2100 (unsigned long) type,
2101 (unsigned long) protocol
2103 return LSS_NAME(socketcall)(1, args);
2105 #elif defined(__ARM_EABI__)
2106 LSS_INLINE _syscall3(int, socket, int, d,
2109 #if defined(__i386__) || defined(__PPC__) || \
2110 (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
2111 LSS_INLINE _syscall3(pid_t, waitpid, pid_t, p,
2114 #if defined(__mips__)
2115 /* sys_pipe() on MIPS has non-standard calling conventions, as it returns
2116 * both file handles through CPU registers.
2118 LSS_INLINE int LSS_NAME(pipe)(int *p) {
2119 register unsigned long __v0 __asm__("$2") = __NR_pipe;
2120 register unsigned long __v1 __asm__("$3");
2121 register unsigned long __r7 __asm__("$7");
2122 __asm__ __volatile__ ("syscall\n"
2123 : "=&r"(__v0), "=&r"(__v1), "+r" (__r7)
2125 : "$8", "$9", "$10", "$11", "$12",
2126 "$13", "$14", "$15", "$24", "memory");
2137 LSS_INLINE _syscall1(int, pipe, int *, p)
2140 LSS_INLINE pid_t LSS_NAME(gettid)() {
2141 pid_t tid = LSS_NAME(_gettid)();
2145 return LSS_NAME(getpid)();
2148 LSS_INLINE void *LSS_NAME(mremap)(void *old_address, size_t old_size,
2149 size_t new_size, int flags, ...) {
2151 void *new_address, *rc;
2152 va_start(ap, flags);
2153 new_address = va_arg(ap, void *);
2154 rc = LSS_NAME(_mremap)(old_address, old_size, new_size,
2155 flags, new_address);
2160 LSS_INLINE int LSS_NAME(ptrace_detach)(pid_t pid) {
2161 /* PTRACE_DETACH can sometimes forget to wake up the tracee and it
2162 * then sends job control signals to the real parent, rather than to
2163 * the tracer. We reduce the risk of this happening by starting a
2164 * whole new time slice, and then quickly sending a SIGCONT signal
2165 * right after detaching from the tracee.
2168 LSS_NAME(sched_yield)();
2169 rc = LSS_NAME(ptrace)(PTRACE_DETACH, pid, (void *)0, (void *)0);
2171 LSS_NAME(kill)(pid, SIGCONT);
2177 #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS)