Chromium Code Reviews| Index: lss/linux_syscall_support.h |
| =================================================================== |
| --- lss/linux_syscall_support.h (revision 18) |
| +++ lss/linux_syscall_support.h (working copy) |
| @@ -103,6 +103,7 @@ |
| #include <signal.h> |
| #include <stdarg.h> |
| #include <stddef.h> |
| +#include <stdint.h> |
| #include <string.h> |
| #include <sys/ptrace.h> |
| #include <sys/resource.h> |
| @@ -408,24 +409,24 @@ |
| }; |
| #elif defined(__x86_64__) |
| struct kernel_stat { |
| - unsigned long st_dev; |
| - unsigned long st_ino; |
| - unsigned long st_nlink; |
| + uint64_t st_dev; |
| + uint64_t st_ino; |
| + uint64_t st_nlink; |
| unsigned st_mode; |
| unsigned st_uid; |
| unsigned st_gid; |
| unsigned __pad0; |
| - unsigned long st_rdev; |
| - long st_size; |
| - long st_blksize; |
| - long st_blocks; |
| - unsigned long st_atime_; |
| - unsigned long st_atime_nsec_; |
| - unsigned long st_mtime_; |
| - unsigned long st_mtime_nsec_; |
| - unsigned long st_ctime_; |
| - unsigned long st_ctime_nsec_; |
| - long __unused[3]; |
| + uint64_t st_rdev; |
| + int64_t st_size; |
| + int64_t st_blksize; |
| + int64_t st_blocks; |
| + uint64_t st_atime_; |
| + uint64_t st_atime_nsec_; |
| + uint64_t st_mtime_; |
| + uint64_t st_mtime_nsec_; |
| + uint64_t st_ctime_; |
| + uint64_t st_ctime_nsec_; |
| + int64_t __unused[3]; |
| }; |
| #elif defined(__PPC__) |
| struct kernel_stat { |
| @@ -522,11 +523,25 @@ |
| long f_namelen; |
| long f_spare[6]; |
| }; |
| -#else |
| +#elif defined(__x86_64__) |
| struct kernel_statfs { |
| /* x86_64 actually defines all these fields as signed, whereas all other */ |
| /* platforms define them as unsigned. Leaving them at unsigned should not */ |
| - /* cause any problems. */ |
| + /* cause any problems. Make sure these are 64-bit even on x32. */ |
| + uint64_t f_type; |
| + uint64_t f_bsize; |
| + uint64_t f_blocks; |
| + uint64_t f_bfree; |
| + uint64_t f_bavail; |
| + uint64_t f_files; |
| + uint64_t f_ffree; |
| + struct { int val[2]; } f_fsid; |
| + uint64_t f_namelen; |
| + uint64_t f_frsize; |
| + uint64_t f_spare[5]; |
| +}; |
| +#else |
| +struct kernel_statfs { |
| unsigned long f_type; |
| unsigned long f_bsize; |
| unsigned long f_blocks; |
| @@ -1790,75 +1805,139 @@ |
| #else |
| #define LSS_ENTRYPOINT "syscall\n" |
| #endif |
| + |
| + /* The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. |
| + * We need to explicitly cast to an unsigned 64 bit type to avoid implicit |
| + * sign extension. We can't cast pointers directly because those are |
| + * 32 bits, and gcc will dump ugly warnings about casting from a pointer |
| + * to an integer of a different size. |
| + */ |
| + #undef LSS_SYSCALL_ARG |
| + #define LSS_SYSCALL_ARG(a) ((uint64_t)(uintptr_t)(a)) |
| + #undef _LSS_RETURN |
| + #define _LSS_RETURN(type, res, cast) \ |
| + do { \ |
| + if ((uint64_t)(res) >= (uint64_t)(-4095)) { \ |
| + LSS_ERRNO = -(res); \ |
| + res = -1; \ |
| + } \ |
| + return (type)(cast)(res); \ |
| + } while (0) |
| + #undef LSS_RETURN |
| + #define LSS_RETURN(type, res) _LSS_RETURN(type, res, uintptr_t) |
| + |
| + #undef _LSS_BODY |
| + #define _LSS_BODY(nr, type, name, cast, ...) \ |
| + long long __res; \ |
| + __asm__ __volatile__(LSS_BODY_ASM##nr LSS_ENTRYPOINT \ |
| + : "=a" (__res) \ |
| + : "0" (__NR_##name) LSS_BODY_ARG##nr(__VA_ARGS__) \ |
| + : LSS_BODY_CLOBBER##nr "r11", "rcx", "memory"); \ |
| + _LSS_RETURN(type, __res, cast) |
| #undef LSS_BODY |
| - #define LSS_BODY(type,name, ...) \ |
| - long __res; \ |
| - __asm__ __volatile__(LSS_ENTRYPOINT \ |
| - : "=a" (__res) : "0" (__NR_##name), \ |
| - ##__VA_ARGS__ : "r11", "rcx", "memory"); \ |
| - LSS_RETURN(type, __res) |
| + #define LSS_BODY(nr, type, name, args...) \ |
| + _LSS_BODY(nr, type, name, uintptr_t, ## args) |
| + |
| + #undef LSS_BODY_ASM0 |
| + #undef LSS_BODY_ASM1 |
| + #undef LSS_BODY_ASM2 |
| + #undef LSS_BODY_ASM3 |
| + #undef LSS_BODY_ASM4 |
| + #undef LSS_BODY_ASM5 |
| + #undef LSS_BODY_ASM6 |
| + #define LSS_BODY_ASM0 |
| + #define LSS_BODY_ASM1 LSS_BODY_ASM0 |
| + #define LSS_BODY_ASM2 LSS_BODY_ASM1 |
| + #define LSS_BODY_ASM3 LSS_BODY_ASM2 |
| + #define LSS_BODY_ASM4 LSS_BODY_ASM3 "movq %5,%%r10;" |
| + #define LSS_BODY_ASM5 LSS_BODY_ASM4 "movq %6,%%r8;" |
| + #define LSS_BODY_ASM6 LSS_BODY_ASM5 "movq %7,%%r9;" |
| + |
| + #undef LSS_BODY_CLOBBER0 |
| + #undef LSS_BODY_CLOBBER1 |
| + #undef LSS_BODY_CLOBBER2 |
| + #undef LSS_BODY_CLOBBER3 |
| + #undef LSS_BODY_CLOBBER4 |
| + #undef LSS_BODY_CLOBBER5 |
| + #undef LSS_BODY_CLOBBER6 |
| + #define LSS_BODY_CLOBBER0 |
| + #define LSS_BODY_CLOBBER1 LSS_BODY_CLOBBER0 |
| + #define LSS_BODY_CLOBBER2 LSS_BODY_CLOBBER1 |
| + #define LSS_BODY_CLOBBER3 LSS_BODY_CLOBBER2 |
| + #define LSS_BODY_CLOBBER4 LSS_BODY_CLOBBER3 "r10", |
| + #define LSS_BODY_CLOBBER5 LSS_BODY_CLOBBER4 "r8", |
| + #define LSS_BODY_CLOBBER6 LSS_BODY_CLOBBER5 "r9", |
| + |
| + #undef LSS_BODY_ARG0 |
| + #undef LSS_BODY_ARG1 |
| + #undef LSS_BODY_ARG2 |
| + #undef LSS_BODY_ARG3 |
| + #undef LSS_BODY_ARG4 |
| + #undef LSS_BODY_ARG5 |
| + #undef LSS_BODY_ARG6 |
| + #define LSS_BODY_ARG0() |
| + #define LSS_BODY_ARG1(arg1) \ |
| + LSS_BODY_ARG0(), "D" (arg1) |
| + #define LSS_BODY_ARG2(arg1, arg2) \ |
| + LSS_BODY_ARG1(arg1), "S" (arg2) |
| + #define LSS_BODY_ARG3(arg1, arg2, arg3) \ |
| + LSS_BODY_ARG2(arg1, arg2), "d" (arg3) |
| + #define LSS_BODY_ARG4(arg1, arg2, arg3, arg4) \ |
| + LSS_BODY_ARG3(arg1, arg2, arg3), "r" (arg4) |
| + #define LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5) \ |
| + LSS_BODY_ARG4(arg1, arg2, arg3, arg4), "r" (arg5) |
| + #define LSS_BODY_ARG6(arg1, arg2, arg3, arg4, arg5, arg6) \ |
| + LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5), "r" (arg6) |
| + |
| #undef _syscall0 |
| #define _syscall0(type,name) \ |
| type LSS_NAME(name)(void) { \ |
| - LSS_BODY(type, name); \ |
| + LSS_BODY(0, type, name); \ |
| } |
| #undef _syscall1 |
| #define _syscall1(type,name,type1,arg1) \ |
| type LSS_NAME(name)(type1 arg1) { \ |
| - LSS_BODY(type, name, "D" ((long)(arg1))); \ |
| + LSS_BODY(1, type, name, LSS_SYSCALL_ARG(arg1)); \ |
| } |
| #undef _syscall2 |
| #define _syscall2(type,name,type1,arg1,type2,arg2) \ |
| type LSS_NAME(name)(type1 arg1, type2 arg2) { \ |
| - LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2))); \ |
| + LSS_BODY(2, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2));\ |
| } |
| #undef _syscall3 |
| #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ |
| type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ |
| - LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2)), \ |
| - "d" ((long)(arg3))); \ |
| + LSS_BODY(3, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ |
| + LSS_SYSCALL_ARG(arg3)); \ |
| } |
| #undef _syscall4 |
| #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ |
| type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ |
| - long __res; \ |
| - __asm__ __volatile__("movq %5,%%r10;" LSS_ENTRYPOINT : \ |
| - "=a" (__res) : "0" (__NR_##name), \ |
| - "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \ |
| - "r" ((long)(arg4)) : "r10", "r11", "rcx", "memory"); \ |
| - LSS_RETURN(type, __res); \ |
| + LSS_BODY(4, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ |
| + LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4));\ |
| } |
| #undef _syscall5 |
| #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ |
| type5,arg5) \ |
| type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ |
| type5 arg5) { \ |
| - long __res; \ |
| - __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8;" LSS_ENTRYPOINT :\ |
| - "=a" (__res) : "0" (__NR_##name), \ |
| - "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \ |
| - "r" ((long)(arg4)), "r" ((long)(arg5)) : \ |
| - "r8", "r10", "r11", "rcx", "memory"); \ |
| - LSS_RETURN(type, __res); \ |
| + LSS_BODY(5, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ |
| + LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ |
| + LSS_SYSCALL_ARG(arg5)); \ |
| } |
| #undef _syscall6 |
| #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ |
| type5,arg5,type6,arg6) \ |
| type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ |
| type5 arg5, type6 arg6) { \ |
| - long __res; \ |
| - __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; movq %7,%%r9;" \ |
| - LSS_ENTRYPOINT : \ |
| - "=a" (__res) : "0" (__NR_##name), \ |
| - "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \ |
| - "r" ((long)(arg4)), "r" ((long)(arg5)), "r" ((long)(arg6)) : \ |
| - "r8", "r9", "r10", "r11", "rcx", "memory"); \ |
| - LSS_RETURN(type, __res); \ |
| + LSS_BODY(6, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ |
| + LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ |
| + LSS_SYSCALL_ARG(arg5), LSS_SYSCALL_ARG(arg6));\ |
| } |
| LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, |
| int flags, void *arg, int *parent_tidptr, |
| void *newtls, int *child_tidptr) { |
| - long __res; |
| + long long __res; |
| { |
| __asm__ __volatile__(/* if (fn == NULL) |
| * return -EINVAL; |
| @@ -1921,17 +2000,26 @@ |
| "1:\n" |
| : "=a" (__res) |
| : "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit), |
| - "r"(fn), "S"(child_stack), "D"(flags), "r"(arg), |
| - "d"(parent_tidptr), "r"(newtls), |
| - "r"(child_tidptr) |
| + "r"(LSS_SYSCALL_ARG(fn)), |
| + "S"(LSS_SYSCALL_ARG(child_stack)), |
| + "D"(LSS_SYSCALL_ARG(flags)), |
| + "r"(LSS_SYSCALL_ARG(arg)), |
| + "d"(LSS_SYSCALL_ARG(parent_tidptr)), |
| + "r"(LSS_SYSCALL_ARG(newtls)), |
| + "r"(LSS_SYSCALL_ARG(child_tidptr)) |
| : "rsp", "memory", "r8", "r10", "r11", "rcx"); |
| } |
| LSS_RETURN(int, __res); |
| } |
| LSS_INLINE _syscall2(int, arch_prctl, int, c, void *, a) |
| - LSS_INLINE _syscall4(int, fadvise64, int, fd, loff_t, offset, loff_t, len, |
| - int, advice) |
| + /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE int LSS_NAME(fadvise64)(int fd, loff_t offset, loff_t len, |
| + int advice) { |
| + LSS_BODY(4, int, fadvise64, LSS_SYSCALL_ARG(fd), (uint64_t)(offset), |
| + (uint64_t)(len), LSS_SYSCALL_ARG(advice)); |
| + } |
| + |
| LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { |
| /* On x86-64, the kernel does not know how to return from |
| * a signal handler. Instead, it relies on user space to provide a |
| @@ -1939,7 +2027,7 @@ |
| * Unfortunately, we cannot just reference the glibc version of this |
| * function, as glibc goes out of its way to make it inaccessible. |
| */ |
| - void (*res)(void); |
| + long long res; |
| __asm__ __volatile__("call 2f\n" |
| "0:.align 16\n" |
| "1:movq %1,%%rax\n" |
| @@ -1948,7 +2036,7 @@ |
| "addq $(1b-0b),%0\n" |
| : "=a" (res) |
| : "i" (__NR_rt_sigreturn)); |
| - return res; |
| + return (void (*)())(uintptr_t)res; |
|
Mark Seaborn
2013/04/02 15:47:17
I noticed that this breaks the NaCl build, which u
|
| } |
| #elif defined(__ARM_ARCH_3__) |
| /* Most definitions of _syscallX() neglect to mark "memory" as being |
| @@ -2654,8 +2742,15 @@ |
| struct kernel_stat*, b) |
| LSS_INLINE _syscall2(int, fstatfs, int, f, |
| struct kernel_statfs*, b) |
| - LSS_INLINE _syscall2(int, ftruncate, int, f, |
| - off_t, l) |
| + #if defined(__x86_64__) |
| + /* Need to make sure off_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE int LSS_NAME(ftruncate)(int f, off_t l) { |
| + LSS_BODY(2, int, ftruncate, LSS_SYSCALL_ARG(f), (uint64_t)(l)); |
| + } |
| + #else |
| + LSS_INLINE _syscall2(int, ftruncate, int, f, |
| + off_t, l) |
| + #endif |
| LSS_INLINE _syscall4(int, futex, int*, a, |
| int, o, int, v, |
| struct kernel_timespec*, t) |
| @@ -2704,8 +2799,16 @@ |
| int, who, int, ioprio) |
| LSS_INLINE _syscall2(int, kill, pid_t, p, |
| int, s) |
| - LSS_INLINE _syscall3(off_t, lseek, int, f, |
| - off_t, o, int, w) |
| + #if defined(__x86_64__) |
| + /* Need to make sure off_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE off_t LSS_NAME(lseek)(int f, off_t o, int w) { |
| + _LSS_BODY(3, off_t, lseek, off_t, LSS_SYSCALL_ARG(f), (uint64_t)(o), |
| + LSS_SYSCALL_ARG(w)); |
| + } |
| + #else |
| + LSS_INLINE _syscall3(off_t, lseek, int, f, |
| + off_t, o, int, w) |
| + #endif |
| LSS_INLINE _syscall2(int, munmap, void*, s, |
| size_t, l) |
| LSS_INLINE _syscall6(long, move_pages, pid_t, p, |
| @@ -2804,8 +2907,12 @@ |
| int, t, int, p, int*, s) |
| #endif |
| #if defined(__x86_64__) |
| - LSS_INLINE _syscall4(int, fallocate, int, fd, int, mode, |
| - loff_t, offset, loff_t, len) |
| + /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE int LSS_NAME(fallocate)(int f, int mode, loff_t offset, |
| + loff_t len) { |
| + LSS_BODY(4, int, fallocate, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(mode), |
| + (uint64_t)(offset), (uint64_t)(len)); |
| + } |
| LSS_INLINE int LSS_NAME(getresgid32)(gid_t *rgid, |
| gid_t *egid, |
| @@ -2819,10 +2926,13 @@ |
| return LSS_NAME(getresuid)(ruid, euid, suid); |
| } |
| - LSS_INLINE _syscall6(void*, mmap, void*, s, |
| - size_t, l, int, p, |
| - int, f, int, d, |
| - __off64_t, o) |
| + /* Need to make sure __off64_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, |
| + __off64_t o) { |
| + LSS_BODY(6, void*, mmap, LSS_SYSCALL_ARG(s), LSS_SYSCALL_ARG(l), |
| + LSS_SYSCALL_ARG(p), LSS_SYSCALL_ARG(f), |
| + LSS_SYSCALL_ARG(d), (uint64_t)(o)); |
| + } |
| LSS_INLINE _syscall4(int, newfstatat, int, d, |
| const char *, p, |
| @@ -3466,8 +3576,24 @@ |
| return -1; |
| } |
| } |
| - #if defined(__x86_64__) || \ |
| - (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64) |
| + #if defined(__x86_64__) |
| + /* Need to make sure loff_t isn't truncated to 32-bits under x32. */ |
| + LSS_INLINE ssize_t LSS_NAME(pread64)(int f, void *b, size_t c, loff_t o) { |
| + LSS_BODY(4, ssize_t, pread64, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(b), |
| + LSS_SYSCALL_ARG(c), (uint64_t)(o)); |
| + } |
| + |
| + LSS_INLINE ssize_t LSS_NAME(pwrite64)(int f, const void *b, size_t c, |
| + loff_t o) { |
| + LSS_BODY(4, ssize_t, pwrite64, LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(b), |
| + LSS_SYSCALL_ARG(c), (uint64_t)(o)); |
| + } |
| + |
| + LSS_INLINE int LSS_NAME(readahead)(int f, loff_t o, unsigned c) { |
| + LSS_BODY(3, int, readahead, LSS_SYSCALL_ARG(f), (uint64_t)(o), |
| + LSS_SYSCALL_ARG(c)); |
| + } |
| + #elif defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI64 |
| LSS_INLINE _syscall4(ssize_t, pread64, int, f, |
| void *, b, size_t, c, |
| loff_t, o) |