Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* Copyright (c) 2005-2011, Google Inc. | 1 /* Copyright (c) 2005-2011, Google Inc. |
| 2 * All rights reserved. | 2 * All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 2010 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2021 } | 2021 } |
| 2022 | 2022 |
| 2023 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { | 2023 LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { |
| 2024 /* On x86-64, the kernel does not know how to return from | 2024 /* On x86-64, the kernel does not know how to return from |
| 2025 * a signal handler. Instead, it relies on user space to provide a | 2025 * a signal handler. Instead, it relies on user space to provide a |
| 2026 * restorer function that calls the rt_sigreturn() system call. | 2026 * restorer function that calls the rt_sigreturn() system call. |
| 2027 * Unfortunately, we cannot just reference the glibc version of this | 2027 * Unfortunately, we cannot just reference the glibc version of this |
| 2028 * function, as glibc goes out of its way to make it inaccessible. | 2028 * function, as glibc goes out of its way to make it inaccessible. |
| 2029 */ | 2029 */ |
| 2030 long long res; | 2030 long long res; |
| 2031 __asm__ __volatile__("call 2f\n" | 2031 __asm__ __volatile__("jmp 2f\n" |
| 2032 "0:.align 16\n" | 2032 ".align 16\n" |
| 2033 "1:movq %1,%%rax\n" | 2033 "1:movq %1,%%rax\n" |
| 2034 LSS_ENTRYPOINT | 2034 LSS_ENTRYPOINT |
| 2035 "2:popq %0\n" | 2035 "2:leaq 1b(%%rip),%0\n" |
| 2036 "addq $(1b-0b),%0\n" | 2036 : "=r" (res) |
|
jln (very slow on Chromium)
2013/08/29 20:22:03
This should always have been =r, right?
Mark Seaborn
2013/08/31 00:01:58
Yes. Constraining this to rax was unnecessary.
| |
| 2037 : "=a" (res) | |
| 2038 : "i" (__NR_rt_sigreturn)); | 2037 : "i" (__NR_rt_sigreturn)); |
| 2039 return (void (*)(void))(uintptr_t)res; | 2038 return (void (*)(void))(uintptr_t)res; |
| 2040 } | 2039 } |
| 2041 #elif defined(__ARM_ARCH_3__) | 2040 #elif defined(__ARM_ARCH_3__) |
| 2042 /* Most definitions of _syscallX() neglect to mark "memory" as being | 2041 /* Most definitions of _syscallX() neglect to mark "memory" as being |
| 2043 * clobbered. This causes problems with compilers, that do a better job | 2042 * clobbered. This causes problems with compilers, that do a better job |
| 2044 * at optimizing across __asm__ calls. | 2043 * at optimizing across __asm__ calls. |
| 2045 * So, we just have to redefine all of the _syscallX() macros. | 2044 * So, we just have to redefine all of the _syscallX() macros. |
| 2046 */ | 2045 */ |
| 2047 #undef LSS_REG | 2046 #undef LSS_REG |
| (...skipping 1615 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3663 # pragma pop_macro("fstat64") | 3662 # pragma pop_macro("fstat64") |
| 3664 # pragma pop_macro("lstat64") | 3663 # pragma pop_macro("lstat64") |
| 3665 #endif | 3664 #endif |
| 3666 | 3665 |
| 3667 #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS) | 3666 #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS) |
| 3668 } | 3667 } |
| 3669 #endif | 3668 #endif |
| 3670 | 3669 |
| 3671 #endif | 3670 #endif |
| 3672 #endif | 3671 #endif |
| OLD | NEW |