OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The Native Client Authors. All rights reserved. | 2 * Copyright (c) 2015 The Native Client Authors. All rights reserved. |
3 * Use of this source code is governed by a BSD-style license that can be | 3 * Use of this source code is governed by a BSD-style license that can be |
4 * found in the LICENSE file. | 4 * found in the LICENSE file. |
5 */ | 5 */ |
6 | 6 |
7 #include <errno.h> | 7 #include <errno.h> |
8 #include <pthread.h> | 8 #include <pthread.h> |
9 #include <signal.h> | 9 #include <signal.h> |
10 #include <stdint.h> | 10 #include <stdint.h> |
11 #include <string.h> | 11 #include <string.h> |
12 #include <unistd.h> | 12 #include <unistd.h> |
13 | 13 |
14 #include "native_client/src/include/elf_constants.h" | 14 #include "native_client/src/include/elf_constants.h" |
15 #include "native_client/src/include/nacl/nacl_exception.h" | 15 #include "native_client/src/include/nacl/nacl_exception.h" |
16 #include "native_client/src/include/nacl_macros.h" | 16 #include "native_client/src/include/nacl_macros.h" |
| 17 #include "native_client/src/nonsfi/linux/irt_signal_handling.h" |
17 #include "native_client/src/nonsfi/linux/linux_sys_private.h" | 18 #include "native_client/src/nonsfi/linux/linux_sys_private.h" |
18 #include "native_client/src/nonsfi/linux/linux_syscall_defines.h" | 19 #include "native_client/src/nonsfi/linux/linux_syscall_defines.h" |
19 #include "native_client/src/nonsfi/linux/linux_syscall_structs.h" | 20 #include "native_client/src/nonsfi/linux/linux_syscall_structs.h" |
20 #include "native_client/src/public/nonsfi/irt_exception_handling.h" | 21 #include "native_client/src/public/linux_syscalls/sys/syscall.h" |
| 22 #include "native_client/src/public/nonsfi/irt_signal_handling.h" |
21 #include "native_client/src/untrusted/irt/irt.h" | 23 #include "native_client/src/untrusted/irt/irt.h" |
22 | 24 |
23 typedef struct compat_sigaltstack { | 25 typedef struct compat_sigaltstack { |
24 uint32_t ss_sp; | 26 uint32_t ss_sp; |
25 int32_t ss_flags; | 27 int32_t ss_flags; |
26 uint32_t ss_size; | 28 uint32_t ss_size; |
27 } linux_stack_t; | 29 } linux_stack_t; |
28 | 30 |
29 #if defined(__i386__) | 31 #if defined(__i386__) |
30 | 32 |
31 /* From linux/arch/x86/include/uapi/asm/sigcontext32.h */ | 33 /* From linux/arch/x86/include/uapi/asm/sigcontext32.h */ |
32 struct sigcontext_ia32 { | 34 struct sigcontext_ia32 { |
33 unsigned short gs, __gsh; | 35 uint16_t gs, __gsh; |
34 unsigned short fs, __fsh; | 36 uint16_t fs, __fsh; |
35 unsigned short es, __esh; | 37 uint16_t es, __esh; |
36 unsigned short ds, __dsh; | 38 uint16_t ds, __dsh; |
37 unsigned int di; | 39 uint32_t di; |
38 unsigned int si; | 40 uint32_t si; |
39 unsigned int bp; | 41 uint32_t bp; |
40 unsigned int sp; | 42 uint32_t sp; |
41 unsigned int bx; | 43 uint32_t bx; |
42 unsigned int dx; | 44 uint32_t dx; |
43 unsigned int cx; | 45 uint32_t cx; |
44 unsigned int ax; | 46 uint32_t ax; |
45 unsigned int trapno; | 47 uint32_t trapno; |
46 unsigned int err; | 48 uint32_t err; |
47 unsigned int ip; | 49 uint32_t ip; |
48 unsigned short cs, __csh; | 50 uint16_t cs, __csh; |
49 unsigned int flags; | 51 uint32_t flags; |
50 unsigned int sp_at_signal; | 52 uint32_t sp_at_signal; |
51 unsigned short ss, __ssh; | 53 uint16_t ss, __ssh; |
52 unsigned int fpstate; | 54 uint32_t fpstate; |
53 unsigned int oldmask; | 55 uint32_t oldmask; |
54 unsigned int cr2; | 56 uint32_t cr2; |
55 }; | 57 }; |
56 | 58 |
57 typedef struct sigcontext_ia32 linux_mcontext_t; | 59 typedef struct sigcontext_ia32 linux_mcontext_t; |
58 | 60 |
59 #elif defined(__arm__) | 61 #elif defined(__arm__) |
60 | 62 |
61 /* From linux/arch/arm/include/uapi/asm/sigcontext.h */ | 63 /* From linux/arch/arm/include/uapi/asm/sigcontext.h */ |
62 struct sigcontext_arm { | 64 struct sigcontext_arm { |
63 uint32_t trap_no; | 65 uint32_t trap_no; |
64 uint32_t error_code; | 66 uint32_t error_code; |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 static const int kSignals[] = { | 108 static const int kSignals[] = { |
107 LINUX_SIGSTKFLT, | 109 LINUX_SIGSTKFLT, |
108 LINUX_SIGINT, LINUX_SIGQUIT, LINUX_SIGILL, LINUX_SIGTRAP, LINUX_SIGBUS, | 110 LINUX_SIGINT, LINUX_SIGQUIT, LINUX_SIGILL, LINUX_SIGTRAP, LINUX_SIGBUS, |
109 LINUX_SIGFPE, LINUX_SIGSEGV, | 111 LINUX_SIGFPE, LINUX_SIGSEGV, |
110 /* Handle SIGABRT in case someone sends it asynchronously using kill(). */ | 112 /* Handle SIGABRT in case someone sends it asynchronously using kill(). */ |
111 LINUX_SIGABRT, | 113 LINUX_SIGABRT, |
112 }; | 114 }; |
113 | 115 |
114 static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; | 116 static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER; |
115 static NaClExceptionHandler g_signal_handler_function_pointer = NULL; | 117 static NaClExceptionHandler g_signal_handler_function_pointer = NULL; |
| 118 static NaClExceptionHandler g_exception_handler_function_pointer = NULL; |
116 static int g_signal_handler_initialized = 0; | 119 static int g_signal_handler_initialized = 0; |
| 120 static int g_tgid = 0; |
| 121 static int g_main_tid; |
117 | 122 |
118 struct NonSfiExceptionFrame { | 123 struct NonSfiExceptionFrame { |
119 struct NaClExceptionContext context; | 124 struct NaClExceptionContext context; |
120 struct NaClExceptionPortableContext portable; | 125 struct NaClExceptionPortableContext portable; |
121 }; | 126 }; |
122 | 127 |
123 static void machine_context_to_register(const linux_mcontext_t *mctx, | 128 static void machine_context_to_register(const linux_mcontext_t *mctx, |
124 NaClUserRegisterState *dest) { | 129 NaClUserRegisterState *dest) { |
125 #if defined(__i386__) | 130 #if defined(__i386__) |
126 #define COPY_REG(A) dest->e##A = mctx->A | 131 #define COPY_REG(A) dest->e##A = mctx->A |
(...skipping 26 matching lines...) Expand all Loading... |
153 #undef COPY_REG | 158 #undef COPY_REG |
154 dest->stack_ptr = mctx->arm_sp; | 159 dest->stack_ptr = mctx->arm_sp; |
155 dest->lr = mctx->arm_lr; | 160 dest->lr = mctx->arm_lr; |
156 dest->prog_ctr = mctx->arm_pc; | 161 dest->prog_ctr = mctx->arm_pc; |
157 dest->cpsr = mctx->arm_cpsr; | 162 dest->cpsr = mctx->arm_cpsr; |
158 #else | 163 #else |
159 # error Unsupported architecture | 164 # error Unsupported architecture |
160 #endif | 165 #endif |
161 } | 166 } |
162 | 167 |
163 static void exception_frame_from_signal_context( | 168 static void nonsfi_exception_frame_from_signal_context( |
164 struct NonSfiExceptionFrame *frame, | 169 struct NonSfiExceptionFrame *frame, |
165 const void *raw_ctx) { | 170 const void *raw_ctx) { |
166 const struct linux_ucontext_t *uctx = (struct linux_ucontext_t *) raw_ctx; | 171 const struct linux_ucontext_t *uctx = (struct linux_ucontext_t *) raw_ctx; |
167 const linux_mcontext_t *mctx = &uctx->uc_mcontext; | 172 const linux_mcontext_t *mctx = &uctx->uc_mcontext; |
168 frame->context.size = (((uintptr_t) (&frame->portable + 1)) | 173 frame->context.size = (((uintptr_t) (&frame->portable + 1)) |
169 - (uintptr_t) &frame->context); | 174 - (uintptr_t) &frame->context); |
170 frame->context.portable_context_offset = ((uintptr_t) &frame->portable | 175 frame->context.portable_context_offset = ((uintptr_t) &frame->portable |
171 - (uintptr_t) &frame->context); | 176 - (uintptr_t) &frame->context); |
172 frame->context.portable_context_size = sizeof(frame->portable); | 177 frame->context.portable_context_size = sizeof(frame->portable); |
173 frame->context.regs_size = sizeof(frame->context.regs); | 178 frame->context.regs_size = sizeof(frame->context.regs); |
174 | 179 |
175 memset(frame->context.reserved, 0, sizeof(frame->context.reserved)); | 180 memset(frame->context.reserved, 0, sizeof(frame->context.reserved)); |
176 machine_context_to_register(mctx, &frame->context.regs); | 181 machine_context_to_register(mctx, &frame->context.regs); |
177 frame->portable.prog_ctr = frame->context.regs.prog_ctr; | 182 frame->portable.prog_ctr = frame->context.regs.prog_ctr; |
178 frame->portable.stack_ptr = frame->context.regs.stack_ptr; | 183 frame->portable.stack_ptr = frame->context.regs.stack_ptr; |
179 | 184 |
180 #if defined(__i386__) | 185 #if defined(__i386__) |
181 frame->context.arch = EM_386; | 186 frame->context.arch = EM_386; |
182 frame->portable.frame_ptr = frame->context.regs.ebp; | 187 frame->portable.frame_ptr = frame->context.regs.ebp; |
183 #elif defined(__arm__) | 188 #elif defined(__arm__) |
184 frame->context.arch = EM_ARM; | 189 frame->context.arch = EM_ARM; |
185 /* R11 is frame pointer in ARM mode, R8 is frame pointer in thumb mode. */ | 190 /* R11 is frame pointer in ARM mode, R8 is frame pointer in thumb mode. */ |
186 frame->portable.frame_ptr = frame->context.regs.r11; | 191 frame->portable.frame_ptr = frame->context.regs.r11; |
187 #else | 192 #else |
188 # error Unsupported architecture | 193 # error Unsupported architecture |
189 #endif | 194 #endif |
190 } | 195 } |
191 | 196 |
192 /* Signal handler, responsible for calling the registered handler. */ | 197 /* A replacement of sigreturn. It does not restore the signal mask. */ |
193 static void signal_catch(int sig, linux_siginfo_t *info, void *uc) { | 198 static void __attribute__((noreturn)) |
194 if (g_signal_handler_function_pointer) { | 199 nonsfi_restore_context(const linux_mcontext_t *mctx) { |
| 200 |
| 201 #if defined(__i386__) |
| 202 |
| 203 #define OFFSET(name) \ |
| 204 [name] "i" (offsetof(linux_mcontext_t, name)) |
| 205 #define RESTORE_SEGMENT(name) \ |
| 206 "mov %c[" #name "](%%eax), %%" #name "\n" |
| 207 #define RESTORE(name) \ |
| 208 "movl %c[" #name "](%%eax), %%e" #name "\n" |
| 209 |
| 210 __asm__ __volatile__( |
| 211 /* Restore floating-point environment */ |
| 212 "mov %c[fpstate](%%eax), %%ecx\n" |
| 213 "fldenv (%%ecx)\n" |
| 214 |
| 215 /* Restore all segment registers */ |
| 216 RESTORE_SEGMENT(gs) |
| 217 RESTORE_SEGMENT(fs) |
| 218 RESTORE_SEGMENT(es) |
| 219 RESTORE_SEGMENT(ds) |
| 220 |
| 221 /* |
| 222 * Restore most of the other registers. |
| 223 */ |
| 224 RESTORE(di) |
| 225 RESTORE(si) |
| 226 RESTORE(bp) |
| 227 RESTORE(bx) |
| 228 |
| 229 /* |
| 230 * Prepare the last registers. eip *must* be one slot above the original |
| 231 * stack, since that is the only way eip and esp can be simultaneously |
| 232 * restored. Here, we are using ecx as the pseudo stack pointer, and edx |
| 233 * as a scratch register. Once the stack is laid out the way we want it to |
| 234 * be, restore edx and eax last. |
| 235 */ |
| 236 "mov %c[sp](%%eax), %%ecx\n" |
| 237 "mov %c[ip](%%eax), %%edx\n" |
| 238 "mov %%edx, -4(%%ecx)\n" |
| 239 "mov %c[flags](%%eax), %%edx\n" |
| 240 "mov %%edx, -8(%%ecx)\n" |
| 241 "mov %c[cx](%%eax), %%edx\n" |
| 242 "mov %%edx, -12(%%ecx)\n" |
| 243 RESTORE(dx) |
| 244 RESTORE(ax) |
| 245 "lea -12(%%ecx), %%esp\n" |
| 246 |
| 247 /* |
| 248 * Finally pop ecx off the stack, restore the processor flags, and return |
| 249 * to simultaneously restore esp and eip. |
| 250 */ |
| 251 "pop %%ecx\n" |
| 252 "popf\n" |
| 253 "ret\n" |
| 254 : |
| 255 : "a" (mctx), |
| 256 OFFSET(gs), |
| 257 OFFSET(fs), |
| 258 OFFSET(es), |
| 259 OFFSET(ds), |
| 260 OFFSET(di), |
| 261 OFFSET(si), |
| 262 OFFSET(bp), |
| 263 OFFSET(sp), |
| 264 OFFSET(bx), |
| 265 OFFSET(dx), |
| 266 OFFSET(cx), |
| 267 OFFSET(ax), |
| 268 OFFSET(ip), |
| 269 OFFSET(flags), |
| 270 OFFSET(fpstate) |
| 271 ); |
| 272 |
| 273 #undef OFFSET |
| 274 #undef RESTORE |
| 275 #undef RESTORE_SEGMENT |
| 276 |
| 277 #elif defined(__arm__) |
| 278 |
| 279 #define OFFSET(name) \ |
| 280 [name] "I" (offsetof(linux_mcontext_t, arm_ ## name) - \ |
| 281 offsetof(linux_mcontext_t, arm_r0)) |
| 282 |
| 283 register uint32_t a14 __asm__("r14") = (uint32_t) &mctx->arm_r0; |
| 284 |
| 285 __asm__ __volatile__( |
| 286 /* Restore flags */ |
| 287 "ldr r0, [r14, %[cpsr]]\n" |
| 288 "msr APSR_nzcvqg, r0\n" |
| 289 |
| 290 /* |
| 291 * Restore general-purpose registers. |
| 292 * This code does not use the simpler 'ldmia r14, {r0-pc}' since using |
| 293 * ldmia with either sp or with both lr and pc is deprecated. |
| 294 */ |
| 295 "ldmia r14, {r0-r10}\n" |
| 296 |
| 297 /* |
| 298 * Copy r11, r12, lr, and pc just before the original sp. |
| 299 * r12 will work as a temporary sp. r11 will be the scratch register, and |
| 300 * will be restored just before moving sp. |
| 301 */ |
| 302 "ldr r12, [r14, %[sp]]\n" |
| 303 |
| 304 "ldr r11, [r14, %[pc]]\n" |
| 305 "stmdb r12!, {r11}\n" |
| 306 "ldr r11, [r14, %[lr]]\n" |
| 307 "stmdb r12!, {r11}\n" |
| 308 "ldr r11, [r14, %[r12]]\n" |
| 309 "stmdb r12!, {r11}\n" |
| 310 "ldr r11, [r14, %[r11]]\n" |
| 311 "mov sp, r12\n" |
| 312 |
| 313 /* |
| 314 * Restore r12, lr, and pc. sp will point to the correct location once |
| 315 * we're done. |
| 316 */ |
| 317 "pop {r12, lr}\n" |
| 318 "pop {pc}\n" |
| 319 : |
| 320 : "r" (a14), |
| 321 OFFSET(cpsr), |
| 322 OFFSET(r11), |
| 323 OFFSET(r12), |
| 324 OFFSET(sp), |
| 325 OFFSET(lr), |
| 326 OFFSET(pc) |
| 327 ); |
| 328 |
| 329 #undef OFFSET |
| 330 |
| 331 #else |
| 332 # error Unsupported architecture |
| 333 #endif |
| 334 |
| 335 /* Should never reach this. */ |
| 336 __builtin_trap(); |
| 337 } |
| 338 |
| 339 static __attribute__((noreturn)) |
| 340 void restore_context(void *raw_ctx) { |
| 341 const struct linux_ucontext_t *uctx = (struct linux_ucontext_t *) raw_ctx; |
| 342 const linux_mcontext_t *mctx = &uctx->uc_mcontext; |
| 343 nonsfi_restore_context(mctx); |
| 344 } |
| 345 |
| 346 /* Signal handlers, responsible for calling the registered handlers. */ |
| 347 static void exception_catch(int sig, linux_siginfo_t *info, void *uc) { |
| 348 if (g_exception_handler_function_pointer) { |
195 struct NonSfiExceptionFrame exception_frame; | 349 struct NonSfiExceptionFrame exception_frame; |
196 exception_frame_from_signal_context(&exception_frame, uc); | 350 nonsfi_exception_frame_from_signal_context(&exception_frame, uc); |
197 g_signal_handler_function_pointer(&exception_frame.context); | 351 g_exception_handler_function_pointer(&exception_frame.context); |
198 } | 352 } |
199 _exit(-sig); | 353 _exit(-sig); |
200 } | 354 } |
201 | 355 |
202 static void nonsfi_initialize_signal_handler_locked() { | 356 static void signal_catch(int sig, linux_siginfo_t *info, void *uc) { |
| 357 if (g_signal_handler_function_pointer) { |
| 358 struct NonSfiExceptionFrame exception_frame; |
| 359 nonsfi_exception_frame_from_signal_context(&exception_frame, uc); |
| 360 g_signal_handler_function_pointer(&exception_frame.context); |
| 361 } |
| 362 restore_context(uc); |
| 363 } |
| 364 |
| 365 static void nonsfi_install_exception_handler_locked() { |
203 struct linux_sigaction sa; | 366 struct linux_sigaction sa; |
204 unsigned int a; | 367 unsigned int a; |
205 | 368 |
206 memset(&sa, 0, sizeof(sa)); | 369 memset(&sa, 0, sizeof(sa)); |
207 sa.sa_sigaction = signal_catch; | 370 sa.sa_sigaction = exception_catch; |
208 sa.sa_flags = LINUX_SA_SIGINFO | LINUX_SA_ONSTACK; | 371 sa.sa_flags = LINUX_SA_SIGINFO | LINUX_SA_ONSTACK; |
209 | 372 |
210 /* | 373 /* |
211 * Reuse the sigemptyset/sigaddset for the first 32 bits of the | 374 * Reuse the sigemptyset/sigaddset for the first 32 bits of the |
212 * sigmask. Works on little endian systems only. | 375 * sigmask. Works on little endian systems only. |
213 */ | 376 */ |
214 sigset_t *mask = (sigset_t*)&sa.sa_mask; | 377 sigset_t *mask = (sigset_t*)&sa.sa_mask; |
215 sigemptyset(mask); | 378 sigemptyset(mask); |
216 | 379 |
217 /* Mask all signals we catch to prevent re-entry. */ | 380 /* Mask all signals we catch to prevent re-entry. */ |
218 for (a = 0; a < NACL_ARRAY_SIZE(kSignals); a++) { | 381 for (a = 0; a < NACL_ARRAY_SIZE(kSignals); a++) { |
219 sigaddset(mask, kSignals[a]); | 382 sigaddset(mask, kSignals[a]); |
220 } | 383 } |
221 | 384 |
222 /* Install all handlers. */ | 385 /* Install all handlers. */ |
223 for (a = 0; a < NACL_ARRAY_SIZE(kSignals); a++) { | 386 for (a = 0; a < NACL_ARRAY_SIZE(kSignals); a++) { |
224 if (linux_sigaction(kSignals[a], &sa, NULL) != 0) | 387 if (linux_sigaction(kSignals[a], &sa, NULL) != 0) |
225 abort(); | 388 abort(); |
226 } | 389 } |
227 } | 390 } |
228 | 391 |
| 392 static void nonsfi_install_signal_handler_locked() { |
| 393 struct linux_sigaction sa; |
| 394 |
| 395 memset(&sa, 0, sizeof(sa)); |
| 396 sa.sa_sigaction = signal_catch; |
| 397 |
| 398 /* |
| 399 * User signal handler can be recursively interrupted to avoid having |
| 400 * to allow sigreturn/sigprocmask. |
| 401 */ |
| 402 sa.sa_flags = LINUX_SA_SIGINFO | LINUX_SA_NODEFER | LINUX_SA_RESTART; |
| 403 sigset_t *mask = (sigset_t*)&sa.sa_mask; |
| 404 sigemptyset(mask); |
| 405 |
| 406 /* |
| 407 * Install a single handler. Multiple signals can be multiplexed in |
| 408 * userspace. |
| 409 */ |
| 410 if (linux_sigaction(LINUX_SIGUSR1, &sa, NULL) != 0) |
| 411 abort(); |
| 412 } |
| 413 |
| 414 static void nonsfi_initialize_signal_handler_locked() { |
| 415 if (g_signal_handler_initialized) |
| 416 return; |
| 417 pid_t tgid = getpid(); |
| 418 if (tgid == -1) |
| 419 abort(); |
| 420 pid_t main_tid = syscall(__NR_gettid); |
| 421 if (main_tid == -1) |
| 422 abort(); |
| 423 nonsfi_install_exception_handler_locked(); |
| 424 nonsfi_install_signal_handler_locked(); |
| 425 g_tgid = tgid; |
| 426 g_main_tid = main_tid; |
| 427 g_signal_handler_initialized = 1; |
| 428 } |
| 429 |
229 /* | 430 /* |
230 * Initialize signal handlers before entering sandbox. | 431 * Initialize signal handlers before entering sandbox. |
231 */ | 432 */ |
232 void nonsfi_initialize_signal_handler() { | 433 void nonsfi_initialize_signal_handler() { |
233 if (pthread_mutex_lock(&g_mutex) != 0) | 434 if (pthread_mutex_lock(&g_mutex) != 0) |
234 abort(); | 435 abort(); |
235 if (!g_signal_handler_initialized) { | 436 nonsfi_initialize_signal_handler_locked(); |
236 nonsfi_initialize_signal_handler_locked(); | |
237 g_signal_handler_initialized = 1; | |
238 } | |
239 if (pthread_mutex_unlock(&g_mutex) != 0) | 437 if (pthread_mutex_unlock(&g_mutex) != 0) |
240 abort(); | 438 abort(); |
241 } | 439 } |
242 | 440 |
243 int nacl_exception_get_and_set_handler(NaClExceptionHandler handler, | 441 int nacl_exception_get_and_set_handler(NaClExceptionHandler handler, |
244 NaClExceptionHandler *old_handler) { | 442 NaClExceptionHandler *old_handler) { |
245 nonsfi_initialize_signal_handler(); | |
246 if (pthread_mutex_lock(&g_mutex) != 0) | 443 if (pthread_mutex_lock(&g_mutex) != 0) |
247 abort(); | 444 abort(); |
| 445 nonsfi_initialize_signal_handler_locked(); |
248 if (old_handler) | 446 if (old_handler) |
249 *old_handler = g_signal_handler_function_pointer; | 447 *old_handler = g_exception_handler_function_pointer; |
250 g_signal_handler_function_pointer = handler; | 448 g_exception_handler_function_pointer = handler; |
251 if (pthread_mutex_unlock(&g_mutex) != 0) | 449 if (pthread_mutex_unlock(&g_mutex) != 0) |
252 abort(); | 450 abort(); |
253 return 0; | 451 return 0; |
254 } | 452 } |
255 | 453 |
256 int nacl_exception_set_handler(NaClExceptionHandler handler) { | 454 int nacl_exception_set_handler(NaClExceptionHandler handler) { |
257 return nacl_exception_get_and_set_handler(handler, NULL); | 455 return nacl_exception_get_and_set_handler(handler, NULL); |
258 } | 456 } |
259 | 457 |
260 int nacl_exception_clear_flag(void) { | 458 int nacl_exception_clear_flag(void) { |
(...skipping 25 matching lines...) Expand all Loading... |
286 if (linux_sigprocmask(LINUX_SIG_UNBLOCK, &mask, NULL) != 0) | 484 if (linux_sigprocmask(LINUX_SIG_UNBLOCK, &mask, NULL) != 0) |
287 abort(); | 485 abort(); |
288 | 486 |
289 return 0; | 487 return 0; |
290 } | 488 } |
291 | 489 |
292 int nacl_exception_set_stack(void *p, size_t s) { | 490 int nacl_exception_set_stack(void *p, size_t s) { |
293 /* Not implemented yet. */ | 491 /* Not implemented yet. */ |
294 return ENOSYS; | 492 return ENOSYS; |
295 } | 493 } |
| 494 |
| 495 int nacl_async_signal_set_handler(NaClIrtAsyncSignalHandler handler) { |
| 496 if (pthread_mutex_lock(&g_mutex) != 0) |
| 497 abort(); |
| 498 nonsfi_initialize_signal_handler_locked(); |
| 499 g_signal_handler_function_pointer = handler; |
| 500 if (pthread_mutex_unlock(&g_mutex) != 0) |
| 501 abort(); |
| 502 return 0; |
| 503 } |
| 504 |
| 505 int nacl_async_signal_send_async_signal(nacl_irt_tid_t tid) { |
| 506 if (!g_signal_handler_initialized) |
| 507 return ESRCH; |
| 508 if (tid == 0) |
| 509 tid = g_main_tid; |
| 510 if (linux_tgkill(g_tgid, tid, LINUX_SIGUSR1) == -1) |
| 511 return errno; |
| 512 return 0; |
| 513 } |
OLD | NEW |