OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sandbox/linux/seccomp-bpf/syscall.h" | 5 #include "sandbox/linux/seccomp-bpf/syscall.h" |
6 | 6 |
7 #include <asm/unistd.h> | 7 #include <asm/unistd.h> |
8 #include <errno.h> | 8 #include <errno.h> |
9 | 9 |
10 #include "base/basictypes.h" | 10 #include "base/basictypes.h" |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
82 "pop %edi; .cfi_restore edi; .cfi_adjust_cfa_offset -4\n" | 82 "pop %edi; .cfi_restore edi; .cfi_adjust_cfa_offset -4\n" |
83 "pop %esi; .cfi_restore esi; .cfi_adjust_cfa_offset -4\n" | 83 "pop %esi; .cfi_restore esi; .cfi_adjust_cfa_offset -4\n" |
84 "ret\n" | 84 "ret\n" |
85 ".cfi_endproc\n" | 85 ".cfi_endproc\n" |
86 "9:.size SyscallAsm, 9b-SyscallAsm\n" | 86 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
87 #elif defined(__x86_64__) | 87 #elif defined(__x86_64__) |
88 ".text\n" | 88 ".text\n" |
89 ".align 16, 0x90\n" | 89 ".align 16, 0x90\n" |
90 ".type SyscallAsm, @function\n" | 90 ".type SyscallAsm, @function\n" |
91 "SyscallAsm:.cfi_startproc\n" | 91 "SyscallAsm:.cfi_startproc\n" |
92 // Check if "%rax" is negative. If so, do not attempt to make a | 92 // Check if "%rax" is negative. If so, do not attempt to make a |
rickyz (Google)
2014/10/20 20:39:53
Comment should probably be updated.
mdempsky
2014/10/20 21:41:21
Done.
| |
93 // system call. Instead, compute the return address that is visible | 93 // system call. Instead, compute the return address that is visible |
94 // to the kernel after we execute "syscall". This address can be | 94 // to the kernel after we execute "syscall". This address can be |
95 // used as a marker that BPF code inspects. | 95 // used as a marker that BPF code inspects. |
96 "test %rax, %rax\n" | 96 "test %rdi, %rdi\n" |
97 "jge 1f\n" | 97 "jge 1f\n" |
98 // Always make sure that our code is position-independent, or the | 98 // Always make sure that our code is position-independent, or the |
99 // linker will throw a hissy fit on x86-64. | 99 // linker will throw a hissy fit on x86-64. |
100 "call 0f; .cfi_adjust_cfa_offset 8\n" | 100 "lea 2f(%rip), %rax\n" |
101 "0:pop %rax; .cfi_adjust_cfa_offset -8\n" | |
102 "addq $2f-0b, %rax\n" | |
103 "ret\n" | 101 "ret\n" |
104 // We declared all clobbered registers to the compiler. On x86-64, | 102 // We declared all clobbered registers to the compiler. On x86-64, |
105 // there really isn't much of a problem with register pressure. So, | 103 // there really isn't much of a problem with register pressure. So, |
106 // we can go ahead and directly copy the entries from the arguments | 104 // we can go ahead and directly copy the entries from the arguments |
107 // array into the appropriate CPU registers. | 105 // array into the appropriate CPU registers. |
108 "1:movq 0(%r12), %rdi\n" | 106 "1:movq %rdi, %rax\n" |
109 "movq 8(%r12), %rsi\n" | 107 "movq 0(%rsi), %rdi\n" |
110 "movq 16(%r12), %rdx\n" | 108 "movq 16(%rsi), %rdx\n" |
111 "movq 24(%r12), %r10\n" | 109 "movq 24(%rsi), %r10\n" |
112 "movq 32(%r12), %r8\n" | 110 "movq 32(%rsi), %r8\n" |
113 "movq 40(%r12), %r9\n" | 111 "movq 40(%rsi), %r9\n" |
112 "movq 8(%rsi), %rsi\n" | |
114 // Enter the kernel. | 113 // Enter the kernel. |
115 "syscall\n" | 114 "syscall\n" |
116 // This is our "magic" return address that the BPF filter sees. | 115 // This is our "magic" return address that the BPF filter sees. |
117 "2:ret\n" | 116 "2:ret\n" |
118 ".cfi_endproc\n" | 117 ".cfi_endproc\n" |
119 "9:.size SyscallAsm, 9b-SyscallAsm\n" | 118 "9:.size SyscallAsm, 9b-SyscallAsm\n" |
120 #elif defined(__arm__) | 119 #elif defined(__arm__) |
121 // Throughout this file, we use the same mode (ARM vs. thumb) | 120 // Throughout this file, we use the same mode (ARM vs. thumb) |
122 // that the C++ compiler uses. This means, when transfering control | 121 // that the C++ compiler uses. This means, when transfering control |
123 // from C++ to assembly code, we do not need to switch modes (e.g. | 122 // from C++ to assembly code, we do not need to switch modes (e.g. |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
243 "mov x8, x0\n" | 242 "mov x8, x0\n" |
244 "ldr x0, [x6, #0]\n" | 243 "ldr x0, [x6, #0]\n" |
245 // Enter the kernel | 244 // Enter the kernel |
246 "svc 0\n" | 245 "svc 0\n" |
247 "2:ret\n" | 246 "2:ret\n" |
248 ".cfi_endproc\n" | 247 ".cfi_endproc\n" |
249 ".size SyscallAsm, .-SyscallAsm\n" | 248 ".size SyscallAsm, .-SyscallAsm\n" |
250 #endif | 249 #endif |
251 ); // asm | 250 ); // asm |
252 | 251 |
252 #if defined(__x86_64__) | |
253 extern "C" { | |
254 intptr_t SyscallAsm(intptr_t nr, const intptr_t args[6]); | |
255 } | |
256 #endif | |
257 | |
253 } // namespace | 258 } // namespace |
254 | 259 |
255 intptr_t Syscall::InvalidCall() { | 260 intptr_t Syscall::InvalidCall() { |
256 // Explicitly pass eight zero arguments just in case. | 261 // Explicitly pass eight zero arguments just in case. |
257 return Call(kInvalidSyscallNumber, 0, 0, 0, 0, 0, 0, 0, 0); | 262 return Call(kInvalidSyscallNumber, 0, 0, 0, 0, 0, 0, 0, 0); |
258 } | 263 } |
259 | 264 |
260 intptr_t Syscall::Call(int nr, | 265 intptr_t Syscall::Call(int nr, |
261 intptr_t p0, | 266 intptr_t p0, |
262 intptr_t p1, | 267 intptr_t p1, |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
295 intptr_t ret = nr; | 300 intptr_t ret = nr; |
296 asm volatile( | 301 asm volatile( |
297 "call SyscallAsm\n" | 302 "call SyscallAsm\n" |
298 // N.B. These are not the calling conventions normally used by the ABI. | 303 // N.B. These are not the calling conventions normally used by the ABI. |
299 : "=a"(ret) | 304 : "=a"(ret) |
300 : "0"(ret), "D"(args) | 305 : "0"(ret), "D"(args) |
301 : "cc", "esp", "memory", "ecx", "edx"); | 306 : "cc", "esp", "memory", "ecx", "edx"); |
302 #elif defined(__x86_64__) | 307 #elif defined(__x86_64__) |
303 intptr_t ret = nr; | 308 intptr_t ret = nr; |
304 { | 309 { |
305 register const intptr_t* data __asm__("r12") = args; | 310 ret = SyscallAsm(ret, args); |
306 asm volatile( | |
307 "lea -128(%%rsp), %%rsp\n" // Avoid red zone. | |
308 "call SyscallAsm\n" | |
309 "lea 128(%%rsp), %%rsp\n" | |
310 // N.B. These are not the calling conventions normally used by the ABI. | |
311 : "=a"(ret) | |
312 : "0"(ret), "r"(data) | |
313 : "cc", | |
314 "rsp", | |
315 "memory", | |
316 "rcx", | |
317 "rdi", | |
318 "rsi", | |
319 "rdx", | |
320 "r8", | |
321 "r9", | |
322 "r10", | |
323 "r11"); | |
324 } | 311 } |
325 #elif defined(__arm__) | 312 #elif defined(__arm__) |
326 intptr_t ret; | 313 intptr_t ret; |
327 { | 314 { |
328 register intptr_t inout __asm__("r0") = nr; | 315 register intptr_t inout __asm__("r0") = nr; |
329 register const intptr_t* data __asm__("r6") = args; | 316 register const intptr_t* data __asm__("r6") = args; |
330 asm volatile( | 317 asm volatile( |
331 "bl SyscallAsm\n" | 318 "bl SyscallAsm\n" |
332 // N.B. These are not the calling conventions normally used by the ABI. | 319 // N.B. These are not the calling conventions normally used by the ABI. |
333 : "=r"(inout) | 320 : "=r"(inout) |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
419 } | 406 } |
420 | 407 |
421 // Set an error status so it can be used outside of this function | 408 // Set an error status so it can be used outside of this function |
422 *err_ret = err_stat; | 409 *err_ret = err_stat; |
423 | 410 |
424 return ret; | 411 return ret; |
425 } | 412 } |
426 #endif // defined(__mips__) | 413 #endif // defined(__mips__) |
427 | 414 |
428 } // namespace sandbox | 415 } // namespace sandbox |
OLD | NEW |