OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "sandbox/linux/bpf_dsl/bpf_dsl.h" | |
6 | |
7 #include <errno.h> | |
8 #include <fcntl.h> | |
9 #include <pthread.h> | |
10 #include <sched.h> | |
11 #include <signal.h> | |
12 #include <sys/prctl.h> | |
13 #include <sys/ptrace.h> | |
14 #include <sys/syscall.h> | |
15 #include <sys/time.h> | |
16 #include <sys/types.h> | |
17 #include <sys/utsname.h> | |
18 #include <unistd.h> | |
19 #include <sys/socket.h> | |
20 | |
21 #if defined(ANDROID) | |
22 // Work-around for buggy headers in Android's NDK | |
23 #define __user | |
24 #endif | |
25 #include <linux/futex.h> | |
26 | |
27 #include "base/bind.h" | |
28 #include "base/logging.h" | |
29 #include "base/macros.h" | |
30 #include "base/memory/scoped_ptr.h" | |
31 #include "base/posix/eintr_wrapper.h" | |
32 #include "base/synchronization/waitable_event.h" | |
33 #include "base/sys_info.h" | |
34 #include "base/threading/thread.h" | |
35 #include "build/build_config.h" | |
36 #include "sandbox/linux/bpf_dsl/policy.h" | |
37 #include "sandbox/linux/seccomp-bpf/bpf_tests.h" | |
38 #include "sandbox/linux/seccomp-bpf/die.h" | |
39 #include "sandbox/linux/seccomp-bpf/errorcode.h" | |
40 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h" | |
41 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" | |
42 #include "sandbox/linux/seccomp-bpf/syscall.h" | |
43 #include "sandbox/linux/seccomp-bpf/trap.h" | |
44 #include "sandbox/linux/services/linux_syscalls.h" | |
45 #include "sandbox/linux/services/syscall_wrappers.h" | |
46 #include "sandbox/linux/syscall_broker/broker_file_permission.h" | |
47 #include "sandbox/linux/syscall_broker/broker_process.h" | |
48 #include "sandbox/linux/tests/scoped_temporary_file.h" | |
49 #include "sandbox/linux/tests/unit_tests.h" | |
50 #include "testing/gtest/include/gtest/gtest.h" | |
51 | |
52 // Workaround for Android's prctl.h file. | |
53 #ifndef PR_GET_ENDIAN | |
54 #define PR_GET_ENDIAN 19 | |
55 #endif | |
56 #ifndef PR_CAPBSET_READ | |
57 #define PR_CAPBSET_READ 23 | |
58 #define PR_CAPBSET_DROP 24 | |
59 #endif | |
60 | |
61 namespace sandbox { | |
62 namespace bpf_dsl { | |
63 | |
64 namespace { | |
65 | |
66 const int kExpectedReturnValue = 42; | |
67 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING"; | |
68 | |
69 // Set the global environment to allow the use of UnsafeTrap() policies. | |
70 void EnableUnsafeTraps() { | |
71 // The use of UnsafeTrap() causes us to print a warning message. This is | |
72 // generally desirable, but it results in the unittest failing, as it doesn't | |
73 // expect any messages on "stderr". So, temporarily disable messages. The | |
74 // BPF_TEST() is guaranteed to turn messages back on, after the policy | |
75 // function has completed. | |
76 setenv(kSandboxDebuggingEnv, "t", 0); | |
77 Die::SuppressInfoMessages(true); | |
78 } | |
79 | |
80 // BPF_TEST does a lot of the boiler-plate code around setting up a | |
81 // policy and optional passing data between the caller, the policy and | |
82 // any Trap() handlers. This is great for writing short and concise tests, | |
83 // and it helps us accidentally forgetting any of the crucial steps in | |
84 // setting up the sandbox. But it wouldn't hurt to have at least one test | |
85 // that explicitly walks through all these steps. | |
86 | |
87 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) { | |
88 BPF_ASSERT(aux); | |
89 int* counter = static_cast<int*>(aux); | |
90 return (*counter)++; | |
91 } | |
92 | |
93 class VerboseAPITestingPolicy : public Policy { | |
94 public: | |
95 explicit VerboseAPITestingPolicy(int* counter_ptr) | |
96 : counter_ptr_(counter_ptr) {} | |
97 ~VerboseAPITestingPolicy() override {} | |
98 | |
99 ResultExpr EvaluateSyscall(int sysno) const override { | |
100 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
101 if (sysno == __NR_uname) { | |
102 return Trap(IncreaseCounter, counter_ptr_); | |
103 } | |
104 return Allow(); | |
105 } | |
106 | |
107 private: | |
108 int* counter_ptr_; | |
109 | |
110 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy); | |
111 }; | |
112 | |
113 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) { | |
114 if (SandboxBPF::SupportsSeccompSandbox( | |
115 SandboxBPF::SeccompLevel::SINGLE_THREADED)) { | |
116 static int counter = 0; | |
117 | |
118 SandboxBPF sandbox(new VerboseAPITestingPolicy(&counter)); | |
119 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
120 | |
121 BPF_ASSERT_EQ(0, counter); | |
122 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0)); | |
123 BPF_ASSERT_EQ(1, counter); | |
124 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0)); | |
125 BPF_ASSERT_EQ(2, counter); | |
126 } | |
127 } | |
128 | |
129 // A simple blacklist test | |
130 | |
131 class BlacklistNanosleepPolicy : public Policy { | |
132 public: | |
133 BlacklistNanosleepPolicy() {} | |
134 ~BlacklistNanosleepPolicy() override {} | |
135 | |
136 ResultExpr EvaluateSyscall(int sysno) const override { | |
137 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
138 switch (sysno) { | |
139 case __NR_nanosleep: | |
140 return Error(EACCES); | |
141 default: | |
142 return Allow(); | |
143 } | |
144 } | |
145 | |
146 static void AssertNanosleepFails() { | |
147 const struct timespec ts = {0, 0}; | |
148 errno = 0; | |
149 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); | |
150 BPF_ASSERT_EQ(EACCES, errno); | |
151 } | |
152 | |
153 private: | |
154 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy); | |
155 }; | |
156 | |
157 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) { | |
158 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
159 } | |
160 | |
161 BPF_TEST_C(SandboxBPF, UseVsyscall, BlacklistNanosleepPolicy) { | |
162 time_t current_time; | |
163 // time() is implemented as a vsyscall. With an older glibc, with | |
164 // vsyscall=emulate and some versions of the seccomp BPF patch | |
165 // we may get SIGKILL-ed. Detect this! | |
166 BPF_ASSERT_NE(static_cast<time_t>(-1), time(¤t_time)); | |
167 } | |
168 | |
169 // Now do a simple whitelist test | |
170 | |
171 class WhitelistGetpidPolicy : public Policy { | |
172 public: | |
173 WhitelistGetpidPolicy() {} | |
174 ~WhitelistGetpidPolicy() override {} | |
175 | |
176 ResultExpr EvaluateSyscall(int sysno) const override { | |
177 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
178 switch (sysno) { | |
179 case __NR_getpid: | |
180 case __NR_exit_group: | |
181 return Allow(); | |
182 default: | |
183 return Error(ENOMEM); | |
184 } | |
185 } | |
186 | |
187 private: | |
188 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy); | |
189 }; | |
190 | |
191 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) { | |
192 // getpid() should be allowed | |
193 errno = 0; | |
194 BPF_ASSERT(sys_getpid() > 0); | |
195 BPF_ASSERT(errno == 0); | |
196 | |
197 // getpgid() should be denied | |
198 BPF_ASSERT(getpgid(0) == -1); | |
199 BPF_ASSERT(errno == ENOMEM); | |
200 } | |
201 | |
202 // A simple blacklist policy, with a SIGSYS handler | |
203 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) { | |
204 // We also check that the auxiliary data is correct | |
205 SANDBOX_ASSERT(aux); | |
206 *(static_cast<int*>(aux)) = kExpectedReturnValue; | |
207 return -ENOMEM; | |
208 } | |
209 | |
210 class BlacklistNanosleepTrapPolicy : public Policy { | |
211 public: | |
212 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {} | |
213 ~BlacklistNanosleepTrapPolicy() override {} | |
214 | |
215 ResultExpr EvaluateSyscall(int sysno) const override { | |
216 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
217 switch (sysno) { | |
218 case __NR_nanosleep: | |
219 return Trap(EnomemHandler, aux_); | |
220 default: | |
221 return Allow(); | |
222 } | |
223 } | |
224 | |
225 private: | |
226 int* aux_; | |
227 | |
228 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy); | |
229 }; | |
230 | |
231 BPF_TEST(SandboxBPF, | |
232 BasicBlacklistWithSigsys, | |
233 BlacklistNanosleepTrapPolicy, | |
234 int /* (*BPF_AUX) */) { | |
235 // getpid() should work properly | |
236 errno = 0; | |
237 BPF_ASSERT(sys_getpid() > 0); | |
238 BPF_ASSERT(errno == 0); | |
239 | |
240 // Our Auxiliary Data, should be reset by the signal handler | |
241 *BPF_AUX = -1; | |
242 const struct timespec ts = {0, 0}; | |
243 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1); | |
244 BPF_ASSERT(errno == ENOMEM); | |
245 | |
246 // We expect the signal handler to modify AuxData | |
247 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue); | |
248 } | |
249 | |
250 // A simple test that verifies we can return arbitrary errno values. | |
251 | |
252 class ErrnoTestPolicy : public Policy { | |
253 public: | |
254 ErrnoTestPolicy() {} | |
255 ~ErrnoTestPolicy() override {} | |
256 | |
257 ResultExpr EvaluateSyscall(int sysno) const override; | |
258 | |
259 private: | |
260 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy); | |
261 }; | |
262 | |
263 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const { | |
264 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
265 switch (sysno) { | |
266 case __NR_dup3: // dup2 is a wrapper of dup3 in android | |
267 #if defined(__NR_dup2) | |
268 case __NR_dup2: | |
269 #endif | |
270 // Pretend that dup2() worked, but don't actually do anything. | |
271 return Error(0); | |
272 case __NR_setuid: | |
273 #if defined(__NR_setuid32) | |
274 case __NR_setuid32: | |
275 #endif | |
276 // Return errno = 1. | |
277 return Error(1); | |
278 case __NR_setgid: | |
279 #if defined(__NR_setgid32) | |
280 case __NR_setgid32: | |
281 #endif | |
282 // Return maximum errno value (typically 4095). | |
283 return Error(ErrorCode::ERR_MAX_ERRNO); | |
284 case __NR_uname: | |
285 // Return errno = 42; | |
286 return Error(42); | |
287 default: | |
288 return Allow(); | |
289 } | |
290 } | |
291 | |
292 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) { | |
293 // Verify that dup2() returns success, but doesn't actually run. | |
294 int fds[4]; | |
295 BPF_ASSERT(pipe(fds) == 0); | |
296 BPF_ASSERT(pipe(fds + 2) == 0); | |
297 BPF_ASSERT(dup2(fds[2], fds[0]) == 0); | |
298 char buf[1] = {}; | |
299 BPF_ASSERT(write(fds[1], "\x55", 1) == 1); | |
300 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1); | |
301 BPF_ASSERT(read(fds[0], buf, 1) == 1); | |
302 | |
303 // If dup2() executed, we will read \xAA, but it dup2() has been turned | |
304 // into a no-op by our policy, then we will read \x55. | |
305 BPF_ASSERT(buf[0] == '\x55'); | |
306 | |
307 // Verify that we can return the minimum and maximum errno values. | |
308 errno = 0; | |
309 BPF_ASSERT(setuid(0) == -1); | |
310 BPF_ASSERT(errno == 1); | |
311 | |
312 // On Android, errno is only supported up to 255, otherwise errno | |
313 // processing is skipped. | |
314 // We work around this (crbug.com/181647). | |
315 if (sandbox::IsAndroid() && setgid(0) != -1) { | |
316 errno = 0; | |
317 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO); | |
318 BPF_ASSERT(errno == 0); | |
319 } else { | |
320 errno = 0; | |
321 BPF_ASSERT(setgid(0) == -1); | |
322 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO); | |
323 } | |
324 | |
325 // Finally, test an errno in between the minimum and maximum. | |
326 errno = 0; | |
327 struct utsname uts_buf; | |
328 BPF_ASSERT(uname(&uts_buf) == -1); | |
329 BPF_ASSERT(errno == 42); | |
330 } | |
331 | |
332 // Testing the stacking of two sandboxes | |
333 | |
334 class StackingPolicyPartOne : public Policy { | |
335 public: | |
336 StackingPolicyPartOne() {} | |
337 ~StackingPolicyPartOne() override {} | |
338 | |
339 ResultExpr EvaluateSyscall(int sysno) const override { | |
340 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
341 switch (sysno) { | |
342 case __NR_getppid: { | |
343 const Arg<int> arg(0); | |
344 return If(arg == 0, Allow()).Else(Error(EPERM)); | |
345 } | |
346 default: | |
347 return Allow(); | |
348 } | |
349 } | |
350 | |
351 private: | |
352 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne); | |
353 }; | |
354 | |
355 class StackingPolicyPartTwo : public Policy { | |
356 public: | |
357 StackingPolicyPartTwo() {} | |
358 ~StackingPolicyPartTwo() override {} | |
359 | |
360 ResultExpr EvaluateSyscall(int sysno) const override { | |
361 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
362 switch (sysno) { | |
363 case __NR_getppid: { | |
364 const Arg<int> arg(0); | |
365 return If(arg == 0, Error(EINVAL)).Else(Allow()); | |
366 } | |
367 default: | |
368 return Allow(); | |
369 } | |
370 } | |
371 | |
372 private: | |
373 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo); | |
374 }; | |
375 | |
376 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) { | |
377 errno = 0; | |
378 BPF_ASSERT(syscall(__NR_getppid, 0) > 0); | |
379 BPF_ASSERT(errno == 0); | |
380 | |
381 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); | |
382 BPF_ASSERT(errno == EPERM); | |
383 | |
384 // Stack a second sandbox with its own policy. Verify that we can further | |
385 // restrict filters, but we cannot relax existing filters. | |
386 SandboxBPF sandbox(new StackingPolicyPartTwo()); | |
387 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
388 | |
389 errno = 0; | |
390 BPF_ASSERT(syscall(__NR_getppid, 0) == -1); | |
391 BPF_ASSERT(errno == EINVAL); | |
392 | |
393 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); | |
394 BPF_ASSERT(errno == EPERM); | |
395 } | |
396 | |
397 // A more complex, but synthetic policy. This tests the correctness of the BPF | |
398 // program by iterating through all syscalls and checking for an errno that | |
399 // depends on the syscall number. Unlike the Verifier, this exercises the BPF | |
400 // interpreter in the kernel. | |
401 | |
402 // We try to make sure we exercise optimizations in the BPF compiler. We make | |
403 // sure that the compiler can have an opportunity to coalesce syscalls with | |
404 // contiguous numbers and we also make sure that disjoint sets can return the | |
405 // same errno. | |
406 int SysnoToRandomErrno(int sysno) { | |
407 // Small contiguous sets of 3 system calls return an errno equal to the | |
408 // index of that set + 1 (so that we never return a NUL errno). | |
409 return ((sysno & ~3) >> 2) % 29 + 1; | |
410 } | |
411 | |
412 class SyntheticPolicy : public Policy { | |
413 public: | |
414 SyntheticPolicy() {} | |
415 ~SyntheticPolicy() override {} | |
416 | |
417 ResultExpr EvaluateSyscall(int sysno) const override { | |
418 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
419 if (sysno == __NR_exit_group || sysno == __NR_write) { | |
420 // exit_group() is special, we really need it to work. | |
421 // write() is needed for BPF_ASSERT() to report a useful error message. | |
422 return Allow(); | |
423 } | |
424 return Error(SysnoToRandomErrno(sysno)); | |
425 } | |
426 | |
427 private: | |
428 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy); | |
429 }; | |
430 | |
431 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) { | |
432 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int | |
433 // overflow. | |
434 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >= | |
435 static_cast<int>(MAX_PUBLIC_SYSCALL)); | |
436 | |
437 for (int syscall_number = static_cast<int>(MIN_SYSCALL); | |
438 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL); | |
439 ++syscall_number) { | |
440 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) { | |
441 // exit_group() is special | |
442 continue; | |
443 } | |
444 errno = 0; | |
445 BPF_ASSERT(syscall(syscall_number) == -1); | |
446 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number)); | |
447 } | |
448 } | |
449 | |
450 #if defined(__arm__) | |
451 // A simple policy that tests whether ARM private system calls are supported | |
452 // by our BPF compiler and by the BPF interpreter in the kernel. | |
453 | |
454 // For ARM private system calls, return an errno equal to their offset from | |
455 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno). | |
456 int ArmPrivateSysnoToErrno(int sysno) { | |
457 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) && | |
458 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { | |
459 return (sysno - MIN_PRIVATE_SYSCALL) + 1; | |
460 } else { | |
461 return ENOSYS; | |
462 } | |
463 } | |
464 | |
465 class ArmPrivatePolicy : public Policy { | |
466 public: | |
467 ArmPrivatePolicy() {} | |
468 ~ArmPrivatePolicy() override {} | |
469 | |
470 ResultExpr EvaluateSyscall(int sysno) const override { | |
471 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
472 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual | |
473 // ARM private system calls. | |
474 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) && | |
475 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { | |
476 return Error(ArmPrivateSysnoToErrno(sysno)); | |
477 } | |
478 return Allow(); | |
479 } | |
480 | |
481 private: | |
482 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy); | |
483 }; | |
484 | |
485 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) { | |
486 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1); | |
487 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL); | |
488 ++syscall_number) { | |
489 errno = 0; | |
490 BPF_ASSERT(syscall(syscall_number) == -1); | |
491 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number)); | |
492 } | |
493 } | |
494 #endif // defined(__arm__) | |
495 | |
496 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) { | |
497 // Count all invocations of our callback function. | |
498 ++*reinterpret_cast<int*>(aux); | |
499 | |
500 // Verify that within the callback function all filtering is temporarily | |
501 // disabled. | |
502 BPF_ASSERT(sys_getpid() > 1); | |
503 | |
504 // Verify that we can now call the underlying system call without causing | |
505 // infinite recursion. | |
506 return SandboxBPF::ForwardSyscall(args); | |
507 } | |
508 | |
509 class GreyListedPolicy : public Policy { | |
510 public: | |
511 explicit GreyListedPolicy(int* aux) : aux_(aux) { | |
512 // Set the global environment for unsafe traps once. | |
513 EnableUnsafeTraps(); | |
514 } | |
515 ~GreyListedPolicy() override {} | |
516 | |
517 ResultExpr EvaluateSyscall(int sysno) const override { | |
518 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
519 // Some system calls must always be allowed, if our policy wants to make | |
520 // use of UnsafeTrap() | |
521 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) { | |
522 return Allow(); | |
523 } else if (sysno == __NR_getpid) { | |
524 // Disallow getpid() | |
525 return Error(EPERM); | |
526 } else { | |
527 // Allow (and count) all other system calls. | |
528 return UnsafeTrap(CountSyscalls, aux_); | |
529 } | |
530 } | |
531 | |
532 private: | |
533 int* aux_; | |
534 | |
535 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy); | |
536 }; | |
537 | |
538 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) { | |
539 BPF_ASSERT(sys_getpid() == -1); | |
540 BPF_ASSERT(errno == EPERM); | |
541 BPF_ASSERT(*BPF_AUX == 0); | |
542 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid)); | |
543 BPF_ASSERT(*BPF_AUX == 2); | |
544 char name[17] = {}; | |
545 BPF_ASSERT(!syscall(__NR_prctl, | |
546 PR_GET_NAME, | |
547 name, | |
548 (void*)NULL, | |
549 (void*)NULL, | |
550 (void*)NULL)); | |
551 BPF_ASSERT(*BPF_AUX == 3); | |
552 BPF_ASSERT(*name); | |
553 } | |
554 | |
555 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) { | |
556 // Disabling warning messages that could confuse our test framework. | |
557 setenv(kSandboxDebuggingEnv, "t", 0); | |
558 Die::SuppressInfoMessages(true); | |
559 | |
560 unsetenv(kSandboxDebuggingEnv); | |
561 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); | |
562 setenv(kSandboxDebuggingEnv, "", 1); | |
563 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); | |
564 setenv(kSandboxDebuggingEnv, "t", 1); | |
565 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true); | |
566 } | |
567 | |
568 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) { | |
569 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) { | |
570 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always | |
571 // return an error. But our handler allows this call. | |
572 return 0; | |
573 } else { | |
574 return SandboxBPF::ForwardSyscall(args); | |
575 } | |
576 } | |
577 | |
578 class PrctlPolicy : public Policy { | |
579 public: | |
580 PrctlPolicy() {} | |
581 ~PrctlPolicy() override {} | |
582 | |
583 ResultExpr EvaluateSyscall(int sysno) const override { | |
584 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
585 setenv(kSandboxDebuggingEnv, "t", 0); | |
586 Die::SuppressInfoMessages(true); | |
587 | |
588 if (sysno == __NR_prctl) { | |
589 // Handle prctl() inside an UnsafeTrap() | |
590 return UnsafeTrap(PrctlHandler, NULL); | |
591 } | |
592 | |
593 // Allow all other system calls. | |
594 return Allow(); | |
595 } | |
596 | |
597 private: | |
598 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy); | |
599 }; | |
600 | |
601 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) { | |
602 // This call should never be allowed. But our policy will intercept it and | |
603 // let it pass successfully. | |
604 BPF_ASSERT( | |
605 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL)); | |
606 | |
607 // Verify that the call will fail, if it makes it all the way to the kernel. | |
608 BPF_ASSERT( | |
609 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1); | |
610 | |
611 // And verify that other uses of prctl() work just fine. | |
612 char name[17] = {}; | |
613 BPF_ASSERT(!syscall(__NR_prctl, | |
614 PR_GET_NAME, | |
615 name, | |
616 (void*)NULL, | |
617 (void*)NULL, | |
618 (void*)NULL)); | |
619 BPF_ASSERT(*name); | |
620 | |
621 // Finally, verify that system calls other than prctl() are completely | |
622 // unaffected by our policy. | |
623 struct utsname uts = {}; | |
624 BPF_ASSERT(!uname(&uts)); | |
625 BPF_ASSERT(!strcmp(uts.sysname, "Linux")); | |
626 } | |
627 | |
628 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) { | |
629 return SandboxBPF::ForwardSyscall(args); | |
630 } | |
631 | |
632 class RedirectAllSyscallsPolicy : public Policy { | |
633 public: | |
634 RedirectAllSyscallsPolicy() {} | |
635 ~RedirectAllSyscallsPolicy() override {} | |
636 | |
637 ResultExpr EvaluateSyscall(int sysno) const override; | |
638 | |
639 private: | |
640 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy); | |
641 }; | |
642 | |
643 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const { | |
644 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
645 setenv(kSandboxDebuggingEnv, "t", 0); | |
646 Die::SuppressInfoMessages(true); | |
647 | |
648 // Some system calls must always be allowed, if our policy wants to make | |
649 // use of UnsafeTrap() | |
650 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) | |
651 return Allow(); | |
652 return UnsafeTrap(AllowRedirectedSyscall, NULL); | |
653 } | |
654 | |
655 int bus_handler_fd_ = -1; | |
656 | |
657 void SigBusHandler(int, siginfo_t* info, void* void_context) { | |
658 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1); | |
659 } | |
660 | |
661 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) { | |
662 // We use the SIGBUS bit in the signal mask as a thread-local boolean | |
663 // value in the implementation of UnsafeTrap(). This is obviously a bit | |
664 // of a hack that could conceivably interfere with code that uses SIGBUS | |
665 // in more traditional ways. This test verifies that basic functionality | |
666 // of SIGBUS is not impacted, but it is certainly possibly to construe | |
667 // more complex uses of signals where our use of the SIGBUS mask is not | |
668 // 100% transparent. This is expected behavior. | |
669 int fds[2]; | |
670 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0); | |
671 bus_handler_fd_ = fds[1]; | |
672 struct sigaction sa = {}; | |
673 sa.sa_sigaction = SigBusHandler; | |
674 sa.sa_flags = SA_SIGINFO; | |
675 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0); | |
676 raise(SIGBUS); | |
677 char c = '\000'; | |
678 BPF_ASSERT(read(fds[0], &c, 1) == 1); | |
679 BPF_ASSERT(close(fds[0]) == 0); | |
680 BPF_ASSERT(close(fds[1]) == 0); | |
681 BPF_ASSERT(c == 0x55); | |
682 } | |
683 | |
684 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) { | |
685 // Signal masks are potentially tricky to handle. For instance, if we | |
686 // ever tried to update them from inside a Trap() or UnsafeTrap() handler, | |
687 // the call to sigreturn() at the end of the signal handler would undo | |
688 // all of our efforts. So, it makes sense to test that sigprocmask() | |
689 // works, even if we have a policy in place that makes use of UnsafeTrap(). | |
690 // In practice, this works because we force sigprocmask() to be handled | |
691 // entirely in the kernel. | |
692 sigset_t mask0, mask1, mask2; | |
693 | |
694 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't | |
695 // change the mask (it shouldn't have been, as it isn't blocked by default | |
696 // in POSIX). | |
697 // | |
698 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose. | |
699 sigemptyset(&mask0); | |
700 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1)); | |
701 BPF_ASSERT(!sigismember(&mask1, SIGUSR2)); | |
702 | |
703 // Try again, and this time we verify that we can block it. This | |
704 // requires a second call to sigprocmask(). | |
705 sigaddset(&mask0, SIGUSR2); | |
706 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL)); | |
707 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2)); | |
708 BPF_ASSERT(sigismember(&mask2, SIGUSR2)); | |
709 } | |
710 | |
711 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) { | |
712 // An UnsafeTrap() (or for that matter, a Trap()) has to report error | |
713 // conditions by returning an exit code in the range -1..-4096. This | |
714 // should happen automatically if using ForwardSyscall(). If the TrapFnc() | |
715 // uses some other method to make system calls, then it is responsible | |
716 // for computing the correct return code. | |
717 // This test verifies that ForwardSyscall() does the correct thing. | |
718 | |
719 // The glibc system wrapper will ultimately set errno for us. So, from normal | |
720 // userspace, all of this should be completely transparent. | |
721 errno = 0; | |
722 BPF_ASSERT(close(-1) == -1); | |
723 BPF_ASSERT(errno == EBADF); | |
724 | |
725 // Explicitly avoid the glibc wrapper. This is not normally the way anybody | |
726 // would make system calls, but it allows us to verify that we don't | |
727 // accidentally mess with errno, when we shouldn't. | |
728 errno = 0; | |
729 struct arch_seccomp_data args = {}; | |
730 args.nr = __NR_close; | |
731 args.args[0] = -1; | |
732 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF); | |
733 BPF_ASSERT(errno == 0); | |
734 } | |
735 | |
736 bool NoOpCallback() { | |
737 return true; | |
738 } | |
739 | |
740 // Test a trap handler that makes use of a broker process to open(). | |
741 | |
742 class InitializedOpenBroker { | |
743 public: | |
744 InitializedOpenBroker() : initialized_(false) { | |
745 std::vector<syscall_broker::BrokerFilePermission> permissions; | |
746 permissions.push_back( | |
747 syscall_broker::BrokerFilePermission::ReadOnly("/proc/allowed")); | |
748 permissions.push_back( | |
749 syscall_broker::BrokerFilePermission::ReadOnly("/proc/cpuinfo")); | |
750 | |
751 broker_process_.reset( | |
752 new syscall_broker::BrokerProcess(EPERM, permissions)); | |
753 BPF_ASSERT(broker_process() != NULL); | |
754 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback))); | |
755 | |
756 initialized_ = true; | |
757 } | |
758 bool initialized() { return initialized_; } | |
759 class syscall_broker::BrokerProcess* broker_process() { | |
760 return broker_process_.get(); | |
761 } | |
762 | |
763 private: | |
764 bool initialized_; | |
765 scoped_ptr<class syscall_broker::BrokerProcess> broker_process_; | |
766 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker); | |
767 }; | |
768 | |
769 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args, | |
770 void* aux) { | |
771 BPF_ASSERT(aux); | |
772 syscall_broker::BrokerProcess* broker_process = | |
773 static_cast<syscall_broker::BrokerProcess*>(aux); | |
774 switch (args.nr) { | |
775 case __NR_faccessat: // access is a wrapper of faccessat in android | |
776 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); | |
777 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]), | |
778 static_cast<int>(args.args[2])); | |
779 #if defined(__NR_access) | |
780 case __NR_access: | |
781 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]), | |
782 static_cast<int>(args.args[1])); | |
783 #endif | |
784 #if defined(__NR_open) | |
785 case __NR_open: | |
786 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]), | |
787 static_cast<int>(args.args[1])); | |
788 #endif | |
789 case __NR_openat: | |
790 // We only call open() so if we arrive here, it's because glibc uses | |
791 // the openat() system call. | |
792 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); | |
793 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]), | |
794 static_cast<int>(args.args[2])); | |
795 default: | |
796 BPF_ASSERT(false); | |
797 return -ENOSYS; | |
798 } | |
799 } | |
800 | |
801 class DenyOpenPolicy : public Policy { | |
802 public: | |
803 explicit DenyOpenPolicy(InitializedOpenBroker* iob) : iob_(iob) {} | |
804 ~DenyOpenPolicy() override {} | |
805 | |
806 ResultExpr EvaluateSyscall(int sysno) const override { | |
807 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
808 | |
809 switch (sysno) { | |
810 case __NR_faccessat: | |
811 #if defined(__NR_access) | |
812 case __NR_access: | |
813 #endif | |
814 #if defined(__NR_open) | |
815 case __NR_open: | |
816 #endif | |
817 case __NR_openat: | |
818 // We get a InitializedOpenBroker class, but our trap handler wants | |
819 // the syscall_broker::BrokerProcess object. | |
820 return Trap(BrokerOpenTrapHandler, iob_->broker_process()); | |
821 default: | |
822 return Allow(); | |
823 } | |
824 } | |
825 | |
826 private: | |
827 InitializedOpenBroker* iob_; | |
828 | |
829 DISALLOW_COPY_AND_ASSIGN(DenyOpenPolicy); | |
830 }; | |
831 | |
832 // We use a InitializedOpenBroker class, so that we can run unsandboxed | |
833 // code in its constructor, which is the only way to do so in a BPF_TEST. | |
834 BPF_TEST(SandboxBPF, | |
835 UseOpenBroker, | |
836 DenyOpenPolicy, | |
837 InitializedOpenBroker /* (*BPF_AUX) */) { | |
838 BPF_ASSERT(BPF_AUX->initialized()); | |
839 syscall_broker::BrokerProcess* broker_process = BPF_AUX->broker_process(); | |
840 BPF_ASSERT(broker_process != NULL); | |
841 | |
842 // First, use the broker "manually" | |
843 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM); | |
844 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM); | |
845 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT); | |
846 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT); | |
847 | |
848 // Now use glibc's open() as an external library would. | |
849 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1); | |
850 BPF_ASSERT(errno == EPERM); | |
851 | |
852 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1); | |
853 BPF_ASSERT(errno == ENOENT); | |
854 | |
855 // Also test glibc's openat(), some versions of libc use it transparently | |
856 // instead of open(). | |
857 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1); | |
858 BPF_ASSERT(errno == EPERM); | |
859 | |
860 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1); | |
861 BPF_ASSERT(errno == ENOENT); | |
862 | |
863 // And test glibc's access(). | |
864 BPF_ASSERT(access("/proc/denied", R_OK) == -1); | |
865 BPF_ASSERT(errno == EPERM); | |
866 | |
867 BPF_ASSERT(access("/proc/allowed", R_OK) == -1); | |
868 BPF_ASSERT(errno == ENOENT); | |
869 | |
870 // This is also white listed and does exist. | |
871 int cpu_info_access = access("/proc/cpuinfo", R_OK); | |
872 BPF_ASSERT(cpu_info_access == 0); | |
873 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY); | |
874 BPF_ASSERT(cpu_info_fd >= 0); | |
875 char buf[1024]; | |
876 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0); | |
877 } | |
878 | |
879 // Simple test demonstrating how to use SandboxBPF::Cond() | |
880 | |
881 class SimpleCondTestPolicy : public Policy { | |
882 public: | |
883 SimpleCondTestPolicy() {} | |
884 ~SimpleCondTestPolicy() override {} | |
885 | |
886 ResultExpr EvaluateSyscall(int sysno) const override; | |
887 | |
888 private: | |
889 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy); | |
890 }; | |
891 | |
892 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const { | |
893 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
894 | |
895 // We deliberately return unusual errno values upon failure, so that we | |
896 // can uniquely test for these values. In a "real" policy, you would want | |
897 // to return more traditional values. | |
898 int flags_argument_position = -1; | |
899 switch (sysno) { | |
900 #if defined(__NR_open) | |
901 case __NR_open: | |
902 flags_argument_position = 1; | |
903 #endif | |
904 case __NR_openat: { // open can be a wrapper for openat(2). | |
905 if (sysno == __NR_openat) | |
906 flags_argument_position = 2; | |
907 | |
908 // Allow opening files for reading, but don't allow writing. | |
909 static_assert(O_RDONLY == 0, "O_RDONLY must be all zero bits"); | |
910 const Arg<int> flags(flags_argument_position); | |
911 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow()); | |
912 } | |
913 case __NR_prctl: { | |
914 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but | |
915 // disallow everything else. | |
916 const Arg<int> option(0); | |
917 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow()) | |
918 .Else(Error(ENOMEM)); | |
919 } | |
920 default: | |
921 return Allow(); | |
922 } | |
923 } | |
924 | |
925 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) { | |
926 int fd; | |
927 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1); | |
928 BPF_ASSERT(errno == EROFS); | |
929 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0); | |
930 close(fd); | |
931 | |
932 int ret; | |
933 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0); | |
934 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0); | |
935 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1); | |
936 BPF_ASSERT(errno == ENOMEM); | |
937 } | |
938 | |
939 // This test exercises the SandboxBPF::Cond() method by building a complex | |
940 // tree of conditional equality operations. It then makes system calls and | |
941 // verifies that they return the values that we expected from our BPF | |
942 // program. | |
943 class EqualityStressTest { | |
944 public: | |
945 EqualityStressTest() { | |
946 // We want a deterministic test | |
947 srand(0); | |
948 | |
949 // Iterates over system call numbers and builds a random tree of | |
950 // equality tests. | |
951 // We are actually constructing a graph of ArgValue objects. This | |
952 // graph will later be used to a) compute our sandbox policy, and | |
953 // b) drive the code that verifies the output from the BPF program. | |
954 static_assert( | |
955 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10), | |
956 "kNumTestCases must be significantly smaller than the number " | |
957 "of system calls"); | |
958 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) { | |
959 if (IsReservedSyscall(sysno)) { | |
960 // Skip reserved system calls. This ensures that our test frame | |
961 // work isn't impacted by the fact that we are overriding | |
962 // a lot of different system calls. | |
963 ++end; | |
964 arg_values_.push_back(NULL); | |
965 } else { | |
966 arg_values_.push_back( | |
967 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs)); | |
968 } | |
969 } | |
970 } | |
971 | |
972 ~EqualityStressTest() { | |
973 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin(); | |
974 iter != arg_values_.end(); | |
975 ++iter) { | |
976 DeleteArgValue(*iter); | |
977 } | |
978 } | |
979 | |
980 ResultExpr Policy(int sysno) { | |
981 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
982 if (sysno < 0 || sysno >= (int)arg_values_.size() || | |
983 IsReservedSyscall(sysno)) { | |
984 // We only return ErrorCode values for the system calls that | |
985 // are part of our test data. Every other system call remains | |
986 // allowed. | |
987 return Allow(); | |
988 } else { | |
989 // ToErrorCode() turns an ArgValue object into an ErrorCode that is | |
990 // suitable for use by a sandbox policy. | |
991 return ToErrorCode(arg_values_[sysno]); | |
992 } | |
993 } | |
994 | |
995 void VerifyFilter() { | |
996 // Iterate over all system calls. Skip the system calls that have | |
997 // previously been determined as being reserved. | |
998 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) { | |
999 if (!arg_values_[sysno]) { | |
1000 // Skip reserved system calls. | |
1001 continue; | |
1002 } | |
1003 // Verify that system calls return the values that we expect them to | |
1004 // return. This involves passing different combinations of system call | |
1005 // parameters in order to exercise all possible code paths through the | |
1006 // BPF filter program. | |
1007 // We arbitrarily start by setting all six system call arguments to | |
1008 // zero. And we then recursive traverse our tree of ArgValues to | |
1009 // determine the necessary combinations of parameters. | |
1010 intptr_t args[6] = {}; | |
1011 Verify(sysno, args, *arg_values_[sysno]); | |
1012 } | |
1013 } | |
1014 | |
1015 private: | |
1016 struct ArgValue { | |
1017 int argno; // Argument number to inspect. | |
1018 int size; // Number of test cases (must be > 0). | |
1019 struct Tests { | |
1020 uint32_t k_value; // Value to compare syscall arg against. | |
1021 int err; // If non-zero, errno value to return. | |
1022 struct ArgValue* arg_value; // Otherwise, more args needs inspecting. | |
1023 }* tests; | |
1024 int err; // If none of the tests passed, this is what | |
1025 struct ArgValue* arg_value; // we'll return (this is the "else" branch). | |
1026 }; | |
1027 | |
1028 bool IsReservedSyscall(int sysno) { | |
1029 // There are a handful of system calls that we should never use in our | |
1030 // test cases. These system calls are needed to allow the test framework | |
1031 // to run properly. | |
1032 // If we wanted to write fully generic code, there are more system calls | |
1033 // that could be listed here, and it is quite difficult to come up with a | |
1034 // truly comprehensive list. After all, we are deliberately making system | |
1035 // calls unavailable. In practice, we have a pretty good idea of the system | |
1036 // calls that will be made by this particular test. So, this small list is | |
1037 // sufficient. But if anybody copy'n'pasted this code for other uses, they | |
1038 // would have to review that the list. | |
1039 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit || | |
1040 sysno == __NR_exit_group || sysno == __NR_restart_syscall; | |
1041 } | |
1042 | |
1043 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) { | |
1044 // Create a new ArgValue and fill it with random data. We use as bit mask | |
1045 // to keep track of the system call parameters that have previously been | |
1046 // set; this ensures that we won't accidentally define a contradictory | |
1047 // set of equality tests. | |
1048 struct ArgValue* arg_value = new ArgValue(); | |
1049 args_mask |= 1 << argno; | |
1050 arg_value->argno = argno; | |
1051 | |
1052 // Apply some restrictions on just how complex our tests can be. | |
1053 // Otherwise, we end up with a BPF program that is too complicated for | |
1054 // the kernel to load. | |
1055 int fan_out = kMaxFanOut; | |
1056 if (remaining_args > 3) { | |
1057 fan_out = 1; | |
1058 } else if (remaining_args > 2) { | |
1059 fan_out = 2; | |
1060 } | |
1061 | |
1062 // Create a couple of different test cases with randomized values that | |
1063 // we want to use when comparing system call parameter number "argno". | |
1064 arg_value->size = rand() % fan_out + 1; | |
1065 arg_value->tests = new ArgValue::Tests[arg_value->size]; | |
1066 | |
1067 uint32_t k_value = rand(); | |
1068 for (int n = 0; n < arg_value->size; ++n) { | |
1069 // Ensure that we have unique values | |
1070 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1; | |
1071 | |
1072 // There are two possible types of nodes. Either this is a leaf node; | |
1073 // in that case, we have completed all the equality tests that we | |
1074 // wanted to perform, and we can now compute a random "errno" value that | |
1075 // we should return. Or this is part of a more complex boolean | |
1076 // expression; in that case, we have to recursively add tests for some | |
1077 // of system call parameters that we have not yet included in our | |
1078 // tests. | |
1079 arg_value->tests[n].k_value = k_value; | |
1080 if (!remaining_args || (rand() & 1)) { | |
1081 arg_value->tests[n].err = (rand() % 1000) + 1; | |
1082 arg_value->tests[n].arg_value = NULL; | |
1083 } else { | |
1084 arg_value->tests[n].err = 0; | |
1085 arg_value->tests[n].arg_value = | |
1086 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); | |
1087 } | |
1088 } | |
1089 // Finally, we have to define what we should return if none of the | |
1090 // previous equality tests pass. Again, we can either deal with a leaf | |
1091 // node, or we can randomly add another couple of tests. | |
1092 if (!remaining_args || (rand() & 1)) { | |
1093 arg_value->err = (rand() % 1000) + 1; | |
1094 arg_value->arg_value = NULL; | |
1095 } else { | |
1096 arg_value->err = 0; | |
1097 arg_value->arg_value = | |
1098 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); | |
1099 } | |
1100 // We have now built a new (sub-)tree of ArgValues defining a set of | |
1101 // boolean expressions for testing random system call arguments against | |
1102 // random values. Return this tree to our caller. | |
1103 return arg_value; | |
1104 } | |
1105 | |
1106 int RandomArg(int args_mask) { | |
1107 // Compute a random system call parameter number. | |
1108 int argno = rand() % kMaxArgs; | |
1109 | |
1110 // Make sure that this same parameter number has not previously been | |
1111 // used. Otherwise, we could end up with a test that is impossible to | |
1112 // satisfy (e.g. args[0] == 1 && args[0] == 2). | |
1113 while (args_mask & (1 << argno)) { | |
1114 argno = (argno + 1) % kMaxArgs; | |
1115 } | |
1116 return argno; | |
1117 } | |
1118 | |
1119 void DeleteArgValue(ArgValue* arg_value) { | |
1120 // Delete an ArgValue and all of its child nodes. This requires | |
1121 // recursively descending into the tree. | |
1122 if (arg_value) { | |
1123 if (arg_value->size) { | |
1124 for (int n = 0; n < arg_value->size; ++n) { | |
1125 if (!arg_value->tests[n].err) { | |
1126 DeleteArgValue(arg_value->tests[n].arg_value); | |
1127 } | |
1128 } | |
1129 delete[] arg_value->tests; | |
1130 } | |
1131 if (!arg_value->err) { | |
1132 DeleteArgValue(arg_value->arg_value); | |
1133 } | |
1134 delete arg_value; | |
1135 } | |
1136 } | |
1137 | |
1138 ResultExpr ToErrorCode(ArgValue* arg_value) { | |
1139 // Compute the ResultExpr that should be returned, if none of our | |
1140 // tests succeed (i.e. the system call parameter doesn't match any | |
1141 // of the values in arg_value->tests[].k_value). | |
1142 ResultExpr err; | |
1143 if (arg_value->err) { | |
1144 // If this was a leaf node, return the errno value that we expect to | |
1145 // return from the BPF filter program. | |
1146 err = Error(arg_value->err); | |
1147 } else { | |
1148 // If this wasn't a leaf node yet, recursively descend into the rest | |
1149 // of the tree. This will end up adding a few more SandboxBPF::Cond() | |
1150 // tests to our ErrorCode. | |
1151 err = ToErrorCode(arg_value->arg_value); | |
1152 } | |
1153 | |
1154 // Now, iterate over all the test cases that we want to compare against. | |
1155 // This builds a chain of SandboxBPF::Cond() tests | |
1156 // (aka "if ... elif ... elif ... elif ... fi") | |
1157 for (int n = arg_value->size; n-- > 0;) { | |
1158 ResultExpr matched; | |
1159 // Again, we distinguish between leaf nodes and subtrees. | |
1160 if (arg_value->tests[n].err) { | |
1161 matched = Error(arg_value->tests[n].err); | |
1162 } else { | |
1163 matched = ToErrorCode(arg_value->tests[n].arg_value); | |
1164 } | |
1165 // For now, all of our tests are limited to 32bit. | |
1166 // We have separate tests that check the behavior of 32bit vs. 64bit | |
1167 // conditional expressions. | |
1168 const Arg<uint32_t> arg(arg_value->argno); | |
1169 err = If(arg == arg_value->tests[n].k_value, matched).Else(err); | |
1170 } | |
1171 return err; | |
1172 } | |
1173 | |
1174 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) { | |
1175 uint32_t mismatched = 0; | |
1176 // Iterate over all the k_values in arg_value.tests[] and verify that | |
1177 // we see the expected return values from system calls, when we pass | |
1178 // the k_value as a parameter in a system call. | |
1179 for (int n = arg_value.size; n-- > 0;) { | |
1180 mismatched += arg_value.tests[n].k_value; | |
1181 args[arg_value.argno] = arg_value.tests[n].k_value; | |
1182 if (arg_value.tests[n].err) { | |
1183 VerifyErrno(sysno, args, arg_value.tests[n].err); | |
1184 } else { | |
1185 Verify(sysno, args, *arg_value.tests[n].arg_value); | |
1186 } | |
1187 } | |
1188 // Find a k_value that doesn't match any of the k_values in | |
1189 // arg_value.tests[]. In most cases, the current value of "mismatched" | |
1190 // would fit this requirement. But on the off-chance that it happens | |
1191 // to collide, we double-check. | |
1192 try_again: | |
1193 for (int n = arg_value.size; n-- > 0;) { | |
1194 if (mismatched == arg_value.tests[n].k_value) { | |
1195 ++mismatched; | |
1196 goto try_again; | |
1197 } | |
1198 } | |
1199 // Now verify that we see the expected return value from system calls, | |
1200 // if we pass a value that doesn't match any of the conditions (i.e. this | |
1201 // is testing the "else" clause of the conditions). | |
1202 args[arg_value.argno] = mismatched; | |
1203 if (arg_value.err) { | |
1204 VerifyErrno(sysno, args, arg_value.err); | |
1205 } else { | |
1206 Verify(sysno, args, *arg_value.arg_value); | |
1207 } | |
1208 // Reset args[arg_value.argno]. This is not technically needed, but it | |
1209 // makes it easier to reason about the correctness of our tests. | |
1210 args[arg_value.argno] = 0; | |
1211 } | |
1212 | |
1213 void VerifyErrno(int sysno, intptr_t* args, int err) { | |
1214 // We installed BPF filters that return different errno values | |
1215 // based on the system call number and the parameters that we decided | |
1216 // to pass in. Verify that this condition holds true. | |
1217 BPF_ASSERT( | |
1218 Syscall::Call( | |
1219 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) == | |
1220 -err); | |
1221 } | |
1222 | |
1223 // Vector of ArgValue trees. These trees define all the possible boolean | |
1224 // expressions that we want to turn into a BPF filter program. | |
1225 std::vector<ArgValue*> arg_values_; | |
1226 | |
1227 // Don't increase these values. We are pushing the limits of the maximum | |
1228 // BPF program that the kernel will allow us to load. If the values are | |
1229 // increased too much, the test will start failing. | |
1230 #if defined(__aarch64__) | |
1231 static const int kNumTestCases = 30; | |
1232 #else | |
1233 static const int kNumTestCases = 40; | |
1234 #endif | |
1235 static const int kMaxFanOut = 3; | |
1236 static const int kMaxArgs = 6; | |
1237 }; | |
1238 | |
1239 class EqualityStressTestPolicy : public Policy { | |
1240 public: | |
1241 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {} | |
1242 ~EqualityStressTestPolicy() override {} | |
1243 | |
1244 ResultExpr EvaluateSyscall(int sysno) const override { | |
1245 return aux_->Policy(sysno); | |
1246 } | |
1247 | |
1248 private: | |
1249 EqualityStressTest* aux_; | |
1250 | |
1251 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy); | |
1252 }; | |
1253 | |
1254 BPF_TEST(SandboxBPF, | |
1255 EqualityTests, | |
1256 EqualityStressTestPolicy, | |
1257 EqualityStressTest /* (*BPF_AUX) */) { | |
1258 BPF_AUX->VerifyFilter(); | |
1259 } | |
1260 | |
1261 class EqualityArgumentWidthPolicy : public Policy { | |
1262 public: | |
1263 EqualityArgumentWidthPolicy() {} | |
1264 ~EqualityArgumentWidthPolicy() override {} | |
1265 | |
1266 ResultExpr EvaluateSyscall(int sysno) const override; | |
1267 | |
1268 private: | |
1269 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy); | |
1270 }; | |
1271 | |
1272 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const { | |
1273 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1274 if (sysno == __NR_uname) { | |
1275 const Arg<int> option(0); | |
1276 const Arg<uint32_t> arg32(1); | |
1277 const Arg<uint64_t> arg64(1); | |
1278 return Switch(option) | |
1279 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2))) | |
1280 #if __SIZEOF_POINTER__ > 4 | |
1281 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2))) | |
1282 #endif | |
1283 .Default(Error(3)); | |
1284 } | |
1285 return Allow(); | |
1286 } | |
1287 | |
1288 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) { | |
1289 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1); | |
1290 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2); | |
1291 #if __SIZEOF_POINTER__ > 4 | |
1292 // On 32bit machines, there is no way to pass a 64bit argument through the | |
1293 // syscall interface. So, we have to skip the part of the test that requires | |
1294 // 64bit arguments. | |
1295 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1); | |
1296 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2); | |
1297 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2); | |
1298 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2); | |
1299 #endif | |
1300 } | |
1301 | |
1302 #if __SIZEOF_POINTER__ > 4 | |
1303 // On 32bit machines, there is no way to pass a 64bit argument through the | |
1304 // syscall interface. So, we have to skip the part of the test that requires | |
1305 // 64bit arguments. | |
1306 BPF_DEATH_TEST_C(SandboxBPF, | |
1307 EqualityArgumentUnallowed64bit, | |
1308 DEATH_MESSAGE("Unexpected 64bit argument detected"), | |
1309 EqualityArgumentWidthPolicy) { | |
1310 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL); | |
1311 } | |
1312 #endif | |
1313 | |
1314 class EqualityWithNegativeArgumentsPolicy : public Policy { | |
1315 public: | |
1316 EqualityWithNegativeArgumentsPolicy() {} | |
1317 ~EqualityWithNegativeArgumentsPolicy() override {} | |
1318 | |
1319 ResultExpr EvaluateSyscall(int sysno) const override { | |
1320 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1321 if (sysno == __NR_uname) { | |
1322 // TODO(mdempsky): This currently can't be Arg<int> because then | |
1323 // 0xFFFFFFFF will be treated as a (signed) int, and then when | |
1324 // Arg::EqualTo casts it to uint64_t, it will be sign extended. | |
1325 const Arg<unsigned> arg(0); | |
1326 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2)); | |
1327 } | |
1328 return Allow(); | |
1329 } | |
1330 | |
1331 private: | |
1332 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy); | |
1333 }; | |
1334 | |
1335 BPF_TEST_C(SandboxBPF, | |
1336 EqualityWithNegativeArguments, | |
1337 EqualityWithNegativeArgumentsPolicy) { | |
1338 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1); | |
1339 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1); | |
1340 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1); | |
1341 } | |
1342 | |
1343 #if __SIZEOF_POINTER__ > 4 | |
1344 BPF_DEATH_TEST_C(SandboxBPF, | |
1345 EqualityWithNegative64bitArguments, | |
1346 DEATH_MESSAGE("Unexpected 64bit argument detected"), | |
1347 EqualityWithNegativeArgumentsPolicy) { | |
1348 // When expecting a 32bit system call argument, we look at the MSB of the | |
1349 // 64bit value and allow both "0" and "-1". But the latter is allowed only | |
1350 // iff the LSB was negative. So, this death test should error out. | |
1351 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1); | |
1352 } | |
1353 #endif | |
1354 | |
1355 class AllBitTestPolicy : public Policy { | |
1356 public: | |
1357 AllBitTestPolicy() {} | |
1358 ~AllBitTestPolicy() override {} | |
1359 | |
1360 ResultExpr EvaluateSyscall(int sysno) const override; | |
1361 | |
1362 private: | |
1363 static ResultExpr HasAllBits32(uint32_t bits); | |
1364 static ResultExpr HasAllBits64(uint64_t bits); | |
1365 | |
1366 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy); | |
1367 }; | |
1368 | |
1369 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) { | |
1370 if (bits == 0) { | |
1371 return Error(1); | |
1372 } | |
1373 const Arg<uint32_t> arg(1); | |
1374 return If((arg & bits) == bits, Error(1)).Else(Error(0)); | |
1375 } | |
1376 | |
1377 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) { | |
1378 if (bits == 0) { | |
1379 return Error(1); | |
1380 } | |
1381 const Arg<uint64_t> arg(1); | |
1382 return If((arg & bits) == bits, Error(1)).Else(Error(0)); | |
1383 } | |
1384 | |
1385 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const { | |
1386 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1387 // Test masked-equality cases that should trigger the "has all bits" | |
1388 // peephole optimizations. We try to find bitmasks that could conceivably | |
1389 // touch corner cases. | |
1390 // For all of these tests, we override the uname(). We can make use with | |
1391 // a single system call number, as we use the first system call argument to | |
1392 // select the different bit masks that we want to test against. | |
1393 if (sysno == __NR_uname) { | |
1394 const Arg<int> option(0); | |
1395 return Switch(option) | |
1396 .Case(0, HasAllBits32(0x0)) | |
1397 .Case(1, HasAllBits32(0x1)) | |
1398 .Case(2, HasAllBits32(0x3)) | |
1399 .Case(3, HasAllBits32(0x80000000)) | |
1400 #if __SIZEOF_POINTER__ > 4 | |
1401 .Case(4, HasAllBits64(0x0)) | |
1402 .Case(5, HasAllBits64(0x1)) | |
1403 .Case(6, HasAllBits64(0x3)) | |
1404 .Case(7, HasAllBits64(0x80000000)) | |
1405 .Case(8, HasAllBits64(0x100000000ULL)) | |
1406 .Case(9, HasAllBits64(0x300000000ULL)) | |
1407 .Case(10, HasAllBits64(0x100000001ULL)) | |
1408 #endif | |
1409 .Default(Kill("Invalid test case number")); | |
1410 } | |
1411 return Allow(); | |
1412 } | |
1413 | |
1414 // Define a macro that performs tests using our test policy. | |
1415 // NOTE: Not all of the arguments in this macro are actually used! | |
1416 // They are here just to serve as documentation of the conditions | |
1417 // implemented in the test policy. | |
1418 // Most notably, "op" and "mask" are unused by the macro. If you want | |
1419 // to make changes to these values, you will have to edit the | |
1420 // test policy instead. | |
1421 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \ | |
1422 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value)) | |
1423 | |
1424 // Our uname() system call returns ErrorCode(1) for success and | |
1425 // ErrorCode(0) for failure. Syscall::Call() turns this into an | |
1426 // exit code of -1 or 0. | |
1427 #define EXPECT_FAILURE 0 | |
1428 #define EXPECT_SUCCESS -1 | |
1429 | |
1430 // A couple of our tests behave differently on 32bit and 64bit systems, as | |
1431 // there is no way for a 32bit system call to pass in a 64bit system call | |
1432 // argument "arg". | |
1433 // We expect these tests to succeed on 64bit systems, but to tail on 32bit | |
1434 // systems. | |
1435 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE) | |
1436 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) { | |
1437 // 32bit test: all of 0x0 (should always be true) | |
1438 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS); | |
1439 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS); | |
1440 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS); | |
1441 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS); | |
1442 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS); | |
1443 | |
1444 // 32bit test: all of 0x1 | |
1445 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE); | |
1446 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS); | |
1447 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE); | |
1448 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS); | |
1449 | |
1450 // 32bit test: all of 0x3 | |
1451 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1452 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1453 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1454 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS); | |
1455 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS); | |
1456 | |
1457 // 32bit test: all of 0x80000000 | |
1458 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE); | |
1459 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE); | |
1460 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1461 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1462 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1463 | |
1464 #if __SIZEOF_POINTER__ > 4 | |
1465 // 64bit test: all of 0x0 (should always be true) | |
1466 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS); | |
1467 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS); | |
1468 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS); | |
1469 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS); | |
1470 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1471 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1472 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1473 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1474 | |
1475 // 64bit test: all of 0x1 | |
1476 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE); | |
1477 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS); | |
1478 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE); | |
1479 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS); | |
1480 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE); | |
1481 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS); | |
1482 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE); | |
1483 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS); | |
1484 | |
1485 // 64bit test: all of 0x3 | |
1486 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE); | |
1487 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE); | |
1488 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE); | |
1489 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS); | |
1490 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS); | |
1491 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1492 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1493 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1494 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS); | |
1495 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS); | |
1496 | |
1497 // 64bit test: all of 0x80000000 | |
1498 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1499 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1500 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1501 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1502 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1503 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1504 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1505 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1506 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1507 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1508 | |
1509 // 64bit test: all of 0x100000000 | |
1510 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1511 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1512 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1513 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1514 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1515 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1516 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1517 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1518 | |
1519 // 64bit test: all of 0x300000000 | |
1520 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1521 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1522 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1523 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1524 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1525 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1526 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1527 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1528 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1529 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1530 | |
1531 // 64bit test: all of 0x100000001 | |
1532 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1533 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1534 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1535 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS); | |
1536 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1537 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS); | |
1538 #endif | |
1539 } | |
1540 | |
1541 class AnyBitTestPolicy : public Policy { | |
1542 public: | |
1543 AnyBitTestPolicy() {} | |
1544 ~AnyBitTestPolicy() override {} | |
1545 | |
1546 ResultExpr EvaluateSyscall(int sysno) const override; | |
1547 | |
1548 private: | |
1549 static ResultExpr HasAnyBits32(uint32_t); | |
1550 static ResultExpr HasAnyBits64(uint64_t); | |
1551 | |
1552 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy); | |
1553 }; | |
1554 | |
1555 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) { | |
1556 if (bits == 0) { | |
1557 return Error(0); | |
1558 } | |
1559 const Arg<uint32_t> arg(1); | |
1560 return If((arg & bits) != 0, Error(1)).Else(Error(0)); | |
1561 } | |
1562 | |
1563 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) { | |
1564 if (bits == 0) { | |
1565 return Error(0); | |
1566 } | |
1567 const Arg<uint64_t> arg(1); | |
1568 return If((arg & bits) != 0, Error(1)).Else(Error(0)); | |
1569 } | |
1570 | |
1571 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const { | |
1572 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1573 // Test masked-equality cases that should trigger the "has any bits" | |
1574 // peephole optimizations. We try to find bitmasks that could conceivably | |
1575 // touch corner cases. | |
1576 // For all of these tests, we override the uname(). We can make use with | |
1577 // a single system call number, as we use the first system call argument to | |
1578 // select the different bit masks that we want to test against. | |
1579 if (sysno == __NR_uname) { | |
1580 const Arg<int> option(0); | |
1581 return Switch(option) | |
1582 .Case(0, HasAnyBits32(0x0)) | |
1583 .Case(1, HasAnyBits32(0x1)) | |
1584 .Case(2, HasAnyBits32(0x3)) | |
1585 .Case(3, HasAnyBits32(0x80000000)) | |
1586 #if __SIZEOF_POINTER__ > 4 | |
1587 .Case(4, HasAnyBits64(0x0)) | |
1588 .Case(5, HasAnyBits64(0x1)) | |
1589 .Case(6, HasAnyBits64(0x3)) | |
1590 .Case(7, HasAnyBits64(0x80000000)) | |
1591 .Case(8, HasAnyBits64(0x100000000ULL)) | |
1592 .Case(9, HasAnyBits64(0x300000000ULL)) | |
1593 .Case(10, HasAnyBits64(0x100000001ULL)) | |
1594 #endif | |
1595 .Default(Kill("Invalid test case number")); | |
1596 } | |
1597 return Allow(); | |
1598 } | |
1599 | |
1600 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) { | |
1601 // 32bit test: any of 0x0 (should always be false) | |
1602 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1603 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1604 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1605 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1606 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1607 | |
1608 // 32bit test: any of 0x1 | |
1609 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE); | |
1610 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS); | |
1611 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE); | |
1612 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS); | |
1613 | |
1614 // 32bit test: any of 0x3 | |
1615 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE); | |
1616 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1617 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1618 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1619 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1620 | |
1621 // 32bit test: any of 0x80000000 | |
1622 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE); | |
1623 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE); | |
1624 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1625 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1626 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1627 | |
1628 #if __SIZEOF_POINTER__ > 4 | |
1629 // 64bit test: any of 0x0 (should always be false) | |
1630 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1631 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1632 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1633 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1634 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1635 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1636 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1637 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1638 | |
1639 // 64bit test: any of 0x1 | |
1640 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1641 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1642 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1643 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1644 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1645 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1646 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1647 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1648 | |
1649 // 64bit test: any of 0x3 | |
1650 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE); | |
1651 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1652 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1653 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1654 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1655 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE); | |
1656 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1657 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1658 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1659 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1660 | |
1661 // 64bit test: any of 0x80000000 | |
1662 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1663 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1664 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1665 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1666 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1667 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1668 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1669 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1670 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1671 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1672 | |
1673 // 64bit test: any of 0x100000000 | |
1674 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1675 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1676 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1677 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1678 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1679 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1680 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1681 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1682 | |
1683 // 64bit test: any of 0x300000000 | |
1684 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE); | |
1685 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1686 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1687 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1688 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1689 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE); | |
1690 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1691 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1692 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1693 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1694 | |
1695 // 64bit test: any of 0x100000001 | |
1696 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE); | |
1697 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1698 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS); | |
1699 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1700 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1701 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1702 #endif | |
1703 } | |
1704 | |
1705 class MaskedEqualTestPolicy : public Policy { | |
1706 public: | |
1707 MaskedEqualTestPolicy() {} | |
1708 ~MaskedEqualTestPolicy() override {} | |
1709 | |
1710 ResultExpr EvaluateSyscall(int sysno) const override; | |
1711 | |
1712 private: | |
1713 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value); | |
1714 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value); | |
1715 | |
1716 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy); | |
1717 }; | |
1718 | |
1719 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) { | |
1720 const Arg<uint32_t> arg(1); | |
1721 return If((arg & mask) == value, Error(1)).Else(Error(0)); | |
1722 } | |
1723 | |
1724 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) { | |
1725 const Arg<uint64_t> arg(1); | |
1726 return If((arg & mask) == value, Error(1)).Else(Error(0)); | |
1727 } | |
1728 | |
1729 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const { | |
1730 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1731 | |
1732 if (sysno == __NR_uname) { | |
1733 const Arg<int> option(0); | |
1734 return Switch(option) | |
1735 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa)) | |
1736 #if __SIZEOF_POINTER__ > 4 | |
1737 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000)) | |
1738 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa)) | |
1739 #endif | |
1740 .Default(Kill("Invalid test case number")); | |
1741 } | |
1742 | |
1743 return Allow(); | |
1744 } | |
1745 | |
1746 #define MASKEQ_TEST(rulenum, arg, expected_result) \ | |
1747 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result)) | |
1748 | |
1749 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) { | |
1750 // Allowed: 0x__55__aa | |
1751 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE); | |
1752 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE); | |
1753 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE); | |
1754 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE); | |
1755 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE); | |
1756 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS); | |
1757 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE); | |
1758 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE); | |
1759 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS); | |
1760 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS); | |
1761 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS); | |
1762 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS); | |
1763 | |
1764 #if __SIZEOF_POINTER__ > 4 | |
1765 // Allowed: 0x__55__aa________ | |
1766 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE); | |
1767 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE); | |
1768 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE); | |
1769 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE); | |
1770 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE); | |
1771 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE); | |
1772 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE); | |
1773 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS); | |
1774 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE); | |
1775 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE); | |
1776 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS); | |
1777 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS); | |
1778 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS); | |
1779 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); | |
1780 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); | |
1781 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS); | |
1782 | |
1783 // Allowed: 0x__55__aa__55__aa | |
1784 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE); | |
1785 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE); | |
1786 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE); | |
1787 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE); | |
1788 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE); | |
1789 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE); | |
1790 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE); | |
1791 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE); | |
1792 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE); | |
1793 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS); | |
1794 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE); | |
1795 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE); | |
1796 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE); | |
1797 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE); | |
1798 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS); | |
1799 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS); | |
1800 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS); | |
1801 #endif | |
1802 } | |
1803 | |
1804 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) { | |
1805 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) { | |
1806 // We expect to get called for an attempt to fork(). No need to log that | |
1807 // call. But if we ever get called for anything else, we want to verbosely | |
1808 // print as much information as possible. | |
1809 const char* msg = (const char*)aux; | |
1810 printf( | |
1811 "Clone() was called with unexpected arguments\n" | |
1812 " nr: %d\n" | |
1813 " 1: 0x%llX\n" | |
1814 " 2: 0x%llX\n" | |
1815 " 3: 0x%llX\n" | |
1816 " 4: 0x%llX\n" | |
1817 " 5: 0x%llX\n" | |
1818 " 6: 0x%llX\n" | |
1819 "%s\n", | |
1820 args.nr, | |
1821 (long long)args.args[0], | |
1822 (long long)args.args[1], | |
1823 (long long)args.args[2], | |
1824 (long long)args.args[3], | |
1825 (long long)args.args[4], | |
1826 (long long)args.args[5], | |
1827 msg); | |
1828 } | |
1829 return -EPERM; | |
1830 } | |
1831 | |
1832 class PthreadPolicyEquality : public Policy { | |
1833 public: | |
1834 PthreadPolicyEquality() {} | |
1835 ~PthreadPolicyEquality() override {} | |
1836 | |
1837 ResultExpr EvaluateSyscall(int sysno) const override; | |
1838 | |
1839 private: | |
1840 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality); | |
1841 }; | |
1842 | |
1843 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const { | |
1844 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1845 // This policy allows creating threads with pthread_create(). But it | |
1846 // doesn't allow any other uses of clone(). Most notably, it does not | |
1847 // allow callers to implement fork() or vfork() by passing suitable flags | |
1848 // to the clone() system call. | |
1849 if (sysno == __NR_clone) { | |
1850 // We have seen two different valid combinations of flags. Glibc | |
1851 // uses the more modern flags, sets the TLS from the call to clone(), and | |
1852 // uses futexes to monitor threads. Android's C run-time library, doesn't | |
1853 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. | |
1854 // More recent versions of Android don't set CLONE_DETACHED anymore, so | |
1855 // the last case accounts for that. | |
1856 // The following policy is very strict. It only allows the exact masks | |
1857 // that we have seen in known implementations. It is probably somewhat | |
1858 // stricter than what we would want to do. | |
1859 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1860 CLONE_SIGHAND | CLONE_THREAD | | |
1861 CLONE_SYSVSEM | CLONE_SETTLS | | |
1862 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; | |
1863 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1864 CLONE_SIGHAND | CLONE_THREAD | | |
1865 CLONE_SYSVSEM; | |
1866 const Arg<unsigned long> flags(0); | |
1867 return If(flags == kGlibcCloneMask || | |
1868 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) || | |
1869 flags == kBaseAndroidCloneMask, | |
1870 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask")); | |
1871 } | |
1872 | |
1873 return Allow(); | |
1874 } | |
1875 | |
1876 class PthreadPolicyBitMask : public Policy { | |
1877 public: | |
1878 PthreadPolicyBitMask() {} | |
1879 ~PthreadPolicyBitMask() override {} | |
1880 | |
1881 ResultExpr EvaluateSyscall(int sysno) const override; | |
1882 | |
1883 private: | |
1884 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits); | |
1885 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits); | |
1886 | |
1887 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask); | |
1888 }; | |
1889 | |
1890 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg, | |
1891 unsigned long bits) { | |
1892 return (arg & bits) != 0; | |
1893 } | |
1894 | |
1895 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg, | |
1896 unsigned long bits) { | |
1897 return (arg & bits) == bits; | |
1898 } | |
1899 | |
1900 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const { | |
1901 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1902 // This policy allows creating threads with pthread_create(). But it | |
1903 // doesn't allow any other uses of clone(). Most notably, it does not | |
1904 // allow callers to implement fork() or vfork() by passing suitable flags | |
1905 // to the clone() system call. | |
1906 if (sysno == __NR_clone) { | |
1907 // We have seen two different valid combinations of flags. Glibc | |
1908 // uses the more modern flags, sets the TLS from the call to clone(), and | |
1909 // uses futexes to monitor threads. Android's C run-time library, doesn't | |
1910 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. | |
1911 // The following policy allows for either combination of flags, but it | |
1912 // is generally a little more conservative than strictly necessary. We | |
1913 // err on the side of rather safe than sorry. | |
1914 // Very noticeably though, we disallow fork() (which is often just a | |
1915 // wrapper around clone()). | |
1916 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1917 CLONE_SIGHAND | CLONE_THREAD | | |
1918 CLONE_SYSVSEM; | |
1919 const unsigned long kFutexFlags = | |
1920 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; | |
1921 const unsigned long kNoopFlags = CLONE_DETACHED; | |
1922 const unsigned long kKnownFlags = | |
1923 kMandatoryFlags | kFutexFlags | kNoopFlags; | |
1924 | |
1925 const Arg<unsigned long> flags(0); | |
1926 return If(HasAnyBits(flags, ~kKnownFlags), | |
1927 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found")) | |
1928 .ElseIf(!HasAllBits(flags, kMandatoryFlags), | |
1929 Trap(PthreadTrapHandler, | |
1930 "Missing mandatory CLONE_XXX flags " | |
1931 "when creating new thread")) | |
1932 .ElseIf( | |
1933 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags), | |
1934 Trap(PthreadTrapHandler, | |
1935 "Must set either all or none of the TLS and futex bits in " | |
1936 "call to clone()")) | |
1937 .Else(Allow()); | |
1938 } | |
1939 | |
1940 return Allow(); | |
1941 } | |
1942 | |
1943 static void* ThreadFnc(void* arg) { | |
1944 ++*reinterpret_cast<int*>(arg); | |
1945 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0); | |
1946 return NULL; | |
1947 } | |
1948 | |
1949 static void PthreadTest() { | |
1950 // Attempt to start a joinable thread. This should succeed. | |
1951 pthread_t thread; | |
1952 int thread_ran = 0; | |
1953 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran)); | |
1954 BPF_ASSERT(!pthread_join(thread, NULL)); | |
1955 BPF_ASSERT(thread_ran); | |
1956 | |
1957 // Attempt to start a detached thread. This should succeed. | |
1958 thread_ran = 0; | |
1959 pthread_attr_t attr; | |
1960 BPF_ASSERT(!pthread_attr_init(&attr)); | |
1961 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); | |
1962 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran)); | |
1963 BPF_ASSERT(!pthread_attr_destroy(&attr)); | |
1964 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) == | |
1965 -EINTR) { | |
1966 } | |
1967 BPF_ASSERT(thread_ran); | |
1968 | |
1969 // Attempt to fork() a process using clone(). This should fail. We use the | |
1970 // same flags that glibc uses when calling fork(). But we don't actually | |
1971 // try calling the fork() implementation in the C run-time library, as | |
1972 // run-time libraries other than glibc might call __NR_fork instead of | |
1973 // __NR_clone, and that would introduce a bogus test failure. | |
1974 int pid; | |
1975 BPF_ASSERT(Syscall::Call(__NR_clone, | |
1976 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD, | |
1977 0, | |
1978 0, | |
1979 &pid) == -EPERM); | |
1980 } | |
1981 | |
1982 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) { | |
1983 PthreadTest(); | |
1984 } | |
1985 | |
1986 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { | |
1987 PthreadTest(); | |
1988 } | |
1989 | |
1990 // libc might not define these even though the kernel supports it. | |
1991 #ifndef PTRACE_O_TRACESECCOMP | |
1992 #define PTRACE_O_TRACESECCOMP 0x00000080 | |
1993 #endif | |
1994 | |
1995 #ifdef PTRACE_EVENT_SECCOMP | |
1996 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) | |
1997 #else | |
1998 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they | |
1999 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by | |
2000 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If | |
2001 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both | |
2002 // values here. | |
2003 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8) | |
2004 #endif | |
2005 | |
2006 #if defined(__arm__) | |
2007 #ifndef PTRACE_SET_SYSCALL | |
2008 #define PTRACE_SET_SYSCALL 23 | |
2009 #endif | |
2010 #endif | |
2011 | |
2012 #if defined(__aarch64__) | |
2013 #ifndef PTRACE_GETREGS | |
2014 #define PTRACE_GETREGS 12 | |
2015 #endif | |
2016 #endif | |
2017 | |
2018 #if defined(__aarch64__) | |
2019 #ifndef PTRACE_SETREGS | |
2020 #define PTRACE_SETREGS 13 | |
2021 #endif | |
2022 #endif | |
2023 | |
2024 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with | |
2025 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on | |
2026 // PTRACE_EVENT_SECCOMP. | |
2027 // | |
2028 // regs should contain the current set of registers of the child, obtained using | |
2029 // PTRACE_GETREGS. | |
2030 // | |
2031 // Depending on the architecture, this may modify regs, so the caller is | |
2032 // responsible for committing these changes using PTRACE_SETREGS. | |
2033 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) { | |
2034 #if defined(__arm__) | |
2035 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the | |
2036 // libc ptrace call as the request parameter is an enum, and | |
2037 // PTRACE_SET_SYSCALL may not be in the enum. | |
2038 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number); | |
2039 #endif | |
2040 | |
2041 SECCOMP_PT_SYSCALL(*regs) = syscall_number; | |
2042 return 0; | |
2043 } | |
2044 | |
2045 const uint16_t kTraceData = 0xcc; | |
2046 | |
2047 class TraceAllPolicy : public Policy { | |
2048 public: | |
2049 TraceAllPolicy() {} | |
2050 ~TraceAllPolicy() override {} | |
2051 | |
2052 ResultExpr EvaluateSyscall(int system_call_number) const override { | |
2053 return Trace(kTraceData); | |
2054 } | |
2055 | |
2056 private: | |
2057 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy); | |
2058 }; | |
2059 | |
2060 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) { | |
2061 if (!SandboxBPF::SupportsSeccompSandbox( | |
2062 SandboxBPF::SeccompLevel::SINGLE_THREADED)) { | |
2063 return; | |
2064 } | |
2065 | |
2066 // This test is disabled on arm due to a kernel bug. | |
2067 // See https://code.google.com/p/chromium/issues/detail?id=383977 | |
2068 #if defined(__arm__) || defined(__aarch64__) | |
2069 printf("This test is currently disabled on ARM32/64 due to a kernel bug."); | |
2070 return; | |
2071 #endif | |
2072 | |
2073 #if defined(__mips__) | |
2074 // TODO: Figure out how to support specificity of handling indirect syscalls | |
2075 // in this test and enable it. | |
2076 printf("This test is currently disabled on MIPS."); | |
2077 return; | |
2078 #endif | |
2079 | |
2080 pid_t pid = fork(); | |
2081 BPF_ASSERT_NE(-1, pid); | |
2082 if (pid == 0) { | |
2083 pid_t my_pid = getpid(); | |
2084 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL)); | |
2085 BPF_ASSERT_EQ(0, raise(SIGSTOP)); | |
2086 SandboxBPF sandbox(new TraceAllPolicy); | |
2087 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
2088 | |
2089 // getpid is allowed. | |
2090 BPF_ASSERT_EQ(my_pid, sys_getpid()); | |
2091 | |
2092 // write to stdout is skipped and returns a fake value. | |
2093 BPF_ASSERT_EQ(kExpectedReturnValue, | |
2094 syscall(__NR_write, STDOUT_FILENO, "A", 1)); | |
2095 | |
2096 // kill is rewritten to exit(kExpectedReturnValue). | |
2097 syscall(__NR_kill, my_pid, SIGKILL); | |
2098 | |
2099 // Should not be reached. | |
2100 BPF_ASSERT(false); | |
2101 } | |
2102 | |
2103 int status; | |
2104 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1); | |
2105 BPF_ASSERT(WIFSTOPPED(status)); | |
2106 | |
2107 BPF_ASSERT_NE(-1, | |
2108 ptrace(PTRACE_SETOPTIONS, | |
2109 pid, | |
2110 NULL, | |
2111 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP))); | |
2112 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2113 while (true) { | |
2114 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1); | |
2115 if (WIFEXITED(status) || WIFSIGNALED(status)) { | |
2116 BPF_ASSERT(WIFEXITED(status)); | |
2117 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status)); | |
2118 break; | |
2119 } | |
2120 | |
2121 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP || | |
2122 !IS_SECCOMP_EVENT(status)) { | |
2123 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2124 continue; | |
2125 } | |
2126 | |
2127 unsigned long data; | |
2128 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data)); | |
2129 BPF_ASSERT_EQ(kTraceData, data); | |
2130 | |
2131 regs_struct regs; | |
2132 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, ®s)); | |
2133 switch (SECCOMP_PT_SYSCALL(regs)) { | |
2134 case __NR_write: | |
2135 // Skip writes to stdout, make it return kExpectedReturnValue. Allow | |
2136 // writes to stderr so that BPF_ASSERT messages show up. | |
2137 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) { | |
2138 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, -1)); | |
2139 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue; | |
2140 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); | |
2141 } | |
2142 break; | |
2143 | |
2144 case __NR_kill: | |
2145 // Rewrite to exit(kExpectedReturnValue). | |
2146 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, __NR_exit)); | |
2147 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue; | |
2148 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); | |
2149 break; | |
2150 | |
2151 default: | |
2152 // Allow all other syscalls. | |
2153 break; | |
2154 } | |
2155 | |
2156 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2157 } | |
2158 } | |
2159 | |
2160 // Android does not expose pread64 nor pwrite64. | |
2161 #if !defined(OS_ANDROID) | |
2162 | |
2163 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) { | |
2164 while (count > 0) { | |
2165 const ssize_t transfered = | |
2166 HANDLE_EINTR(pwrite64(fd, buffer, count, offset)); | |
2167 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { | |
2168 return false; | |
2169 } | |
2170 count -= transfered; | |
2171 buffer += transfered; | |
2172 offset += transfered; | |
2173 } | |
2174 return true; | |
2175 } | |
2176 | |
2177 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) { | |
2178 while (count > 0) { | |
2179 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset)); | |
2180 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { | |
2181 return false; | |
2182 } | |
2183 count -= transfered; | |
2184 buffer += transfered; | |
2185 offset += transfered; | |
2186 } | |
2187 return true; | |
2188 } | |
2189 | |
2190 bool pread_64_was_forwarded = false; | |
2191 | |
2192 class TrapPread64Policy : public Policy { | |
2193 public: | |
2194 TrapPread64Policy() {} | |
2195 ~TrapPread64Policy() override {} | |
2196 | |
2197 ResultExpr EvaluateSyscall(int system_call_number) const override { | |
2198 // Set the global environment for unsafe traps once. | |
2199 if (system_call_number == MIN_SYSCALL) { | |
2200 EnableUnsafeTraps(); | |
2201 } | |
2202 | |
2203 if (system_call_number == __NR_pread64) { | |
2204 return UnsafeTrap(ForwardPreadHandler, NULL); | |
2205 } | |
2206 return Allow(); | |
2207 } | |
2208 | |
2209 private: | |
2210 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args, | |
2211 void* aux) { | |
2212 BPF_ASSERT(args.nr == __NR_pread64); | |
2213 pread_64_was_forwarded = true; | |
2214 | |
2215 return SandboxBPF::ForwardSyscall(args); | |
2216 } | |
2217 | |
2218 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy); | |
2219 }; | |
2220 | |
2221 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split | |
2222 // between two arguments. In this test, we make sure that ForwardSyscall() can | |
2223 // forward it properly. | |
2224 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) { | |
2225 ScopedTemporaryFile temp_file; | |
2226 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF; | |
2227 const char kTestString[] = "This is a test!"; | |
2228 BPF_ASSERT(FullPwrite64( | |
2229 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset)); | |
2230 | |
2231 char read_test_string[sizeof(kTestString)] = {0}; | |
2232 BPF_ASSERT(FullPread64(temp_file.fd(), | |
2233 read_test_string, | |
2234 sizeof(read_test_string), | |
2235 kLargeOffset)); | |
2236 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString))); | |
2237 BPF_ASSERT(pread_64_was_forwarded); | |
2238 } | |
2239 | |
2240 #endif // !defined(OS_ANDROID) | |
2241 | |
2242 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) { | |
2243 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr); | |
2244 | |
2245 // Wait for the main thread to signal that the filter has been applied. | |
2246 if (!event->IsSignaled()) { | |
2247 event->Wait(); | |
2248 } | |
2249 | |
2250 BPF_ASSERT(event->IsSignaled()); | |
2251 | |
2252 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
2253 | |
2254 return NULL; | |
2255 } | |
2256 | |
2257 SANDBOX_TEST(SandboxBPF, Tsync) { | |
2258 const bool supports_multi_threaded = SandboxBPF::SupportsSeccompSandbox( | |
2259 SandboxBPF::SeccompLevel::MULTI_THREADED); | |
2260 // On Chrome OS tsync is mandatory. | |
2261 #if defined(OS_CHROMEOS) | |
2262 if (base::SysInfo::IsRunningOnChromeOS()) { | |
2263 BPF_ASSERT_EQ(true, supports_multi_threaded); | |
2264 } | |
2265 // else a Chrome OS build not running on a Chrome OS device e.g. Chrome bots. | |
2266 // In this case fall through. | |
2267 #endif | |
2268 if (!supports_multi_threaded) { | |
2269 return; | |
2270 } | |
2271 | |
2272 base::WaitableEvent event(true, false); | |
2273 | |
2274 // Create a thread on which to invoke the blocked syscall. | |
2275 pthread_t thread; | |
2276 BPF_ASSERT_EQ( | |
2277 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event)); | |
2278 | |
2279 // Test that nanoseelp success. | |
2280 const struct timespec ts = {0, 0}; | |
2281 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); | |
2282 | |
2283 // Engage the sandbox. | |
2284 SandboxBPF sandbox(new BlacklistNanosleepPolicy()); | |
2285 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED)); | |
2286 | |
2287 // This thread should have the filter applied as well. | |
2288 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
2289 | |
2290 // Signal the condition to invoke the system call. | |
2291 event.Signal(); | |
2292 | |
2293 // Wait for the thread to finish. | |
2294 BPF_ASSERT_EQ(0, pthread_join(thread, NULL)); | |
2295 } | |
2296 | |
2297 class AllowAllPolicy : public Policy { | |
2298 public: | |
2299 AllowAllPolicy() {} | |
2300 ~AllowAllPolicy() override {} | |
2301 | |
2302 ResultExpr EvaluateSyscall(int sysno) const override { return Allow(); } | |
2303 | |
2304 private: | |
2305 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy); | |
2306 }; | |
2307 | |
2308 SANDBOX_DEATH_TEST( | |
2309 SandboxBPF, | |
2310 StartMultiThreadedAsSingleThreaded, | |
2311 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) { | |
2312 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded"); | |
2313 BPF_ASSERT(thread.Start()); | |
2314 | |
2315 SandboxBPF sandbox(new AllowAllPolicy()); | |
2316 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
2317 } | |
2318 | |
2319 // http://crbug.com/407357 | |
2320 #if !defined(THREAD_SANITIZER) | |
2321 SANDBOX_DEATH_TEST( | |
2322 SandboxBPF, | |
2323 StartSingleThreadedAsMultiThreaded, | |
2324 DEATH_MESSAGE( | |
2325 "Cannot start sandbox; process may be single-threaded when " | |
2326 "reported as not")) { | |
2327 SandboxBPF sandbox(new AllowAllPolicy()); | |
2328 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED)); | |
2329 } | |
2330 #endif // !defined(THREAD_SANITIZER) | |
2331 | |
2332 // A stub handler for the UnsafeTrap. Never called. | |
2333 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) { | |
2334 return -1; | |
2335 } | |
2336 | |
2337 class UnsafeTrapWithCondPolicy : public Policy { | |
2338 public: | |
2339 UnsafeTrapWithCondPolicy() {} | |
2340 ~UnsafeTrapWithCondPolicy() override {} | |
2341 | |
2342 ResultExpr EvaluateSyscall(int sysno) const override { | |
2343 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
2344 setenv(kSandboxDebuggingEnv, "t", 0); | |
2345 Die::SuppressInfoMessages(true); | |
2346 | |
2347 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) | |
2348 return Allow(); | |
2349 | |
2350 switch (sysno) { | |
2351 case __NR_uname: { | |
2352 const Arg<uint32_t> arg(0); | |
2353 return If(arg == 0, Allow()).Else(Error(EPERM)); | |
2354 } | |
2355 case __NR_setgid: { | |
2356 const Arg<uint32_t> arg(0); | |
2357 return Switch(arg) | |
2358 .Case(100, Error(ENOMEM)) | |
2359 .Case(200, Error(ENOSYS)) | |
2360 .Default(Error(EPERM)); | |
2361 } | |
2362 case __NR_close: | |
2363 case __NR_exit_group: | |
2364 case __NR_write: | |
2365 return Allow(); | |
2366 case __NR_getppid: | |
2367 return UnsafeTrap(NoOpHandler, NULL); | |
2368 default: | |
2369 return Error(EPERM); | |
2370 } | |
2371 } | |
2372 | |
2373 private: | |
2374 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy); | |
2375 }; | |
2376 | |
2377 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) { | |
2378 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0)); | |
2379 BPF_ASSERT_EQ(EFAULT, errno); | |
2380 | |
2381 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1)); | |
2382 BPF_ASSERT_EQ(EPERM, errno); | |
2383 | |
2384 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100)); | |
2385 BPF_ASSERT_EQ(ENOMEM, errno); | |
2386 | |
2387 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200)); | |
2388 BPF_ASSERT_EQ(ENOSYS, errno); | |
2389 | |
2390 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300)); | |
2391 BPF_ASSERT_EQ(EPERM, errno); | |
2392 } | |
2393 | |
2394 } // namespace | |
2395 | |
2396 } // namespace bpf_dsl | |
2397 } // namespace sandbox | |
OLD | NEW |