OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "sandbox/linux/bpf_dsl/bpf_dsl.h" | |
6 | |
7 #include <errno.h> | |
8 #include <fcntl.h> | |
9 #include <pthread.h> | |
10 #include <sched.h> | |
11 #include <signal.h> | |
12 #include <sys/prctl.h> | |
13 #include <sys/ptrace.h> | |
14 #include <sys/syscall.h> | |
15 #include <sys/time.h> | |
16 #include <sys/types.h> | |
17 #include <sys/utsname.h> | |
18 #include <unistd.h> | |
19 #include <sys/socket.h> | |
20 | |
21 #if defined(ANDROID) | |
22 // Work-around for buggy headers in Android's NDK | |
23 #define __user | |
24 #endif | |
25 #include <linux/futex.h> | |
26 | |
27 #include "base/bind.h" | |
28 #include "base/logging.h" | |
29 #include "base/macros.h" | |
30 #include "base/memory/scoped_ptr.h" | |
31 #include "base/posix/eintr_wrapper.h" | |
32 #include "base/synchronization/waitable_event.h" | |
33 #include "base/sys_info.h" | |
34 #include "base/threading/thread.h" | |
35 #include "build/build_config.h" | |
36 #include "sandbox/linux/bpf_dsl/policy.h" | |
37 #include "sandbox/linux/seccomp-bpf/bpf_tests.h" | |
38 #include "sandbox/linux/seccomp-bpf/die.h" | |
39 #include "sandbox/linux/seccomp-bpf/errorcode.h" | |
40 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h" | |
41 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" | |
42 #include "sandbox/linux/seccomp-bpf/syscall.h" | |
43 #include "sandbox/linux/seccomp-bpf/trap.h" | |
44 #include "sandbox/linux/services/linux_syscalls.h" | |
45 #include "sandbox/linux/services/syscall_wrappers.h" | |
46 #include "sandbox/linux/services/thread_helpers.h" | |
47 #include "sandbox/linux/syscall_broker/broker_file_permission.h" | |
48 #include "sandbox/linux/syscall_broker/broker_process.h" | |
49 #include "sandbox/linux/tests/scoped_temporary_file.h" | |
50 #include "sandbox/linux/tests/unit_tests.h" | |
51 #include "testing/gtest/include/gtest/gtest.h" | |
52 | |
53 // Workaround for Android's prctl.h file. | |
54 #ifndef PR_GET_ENDIAN | |
55 #define PR_GET_ENDIAN 19 | |
56 #endif | |
57 #ifndef PR_CAPBSET_READ | |
58 #define PR_CAPBSET_READ 23 | |
59 #define PR_CAPBSET_DROP 24 | |
60 #endif | |
61 | |
62 namespace sandbox { | |
63 namespace bpf_dsl { | |
64 | |
65 namespace { | |
66 | |
67 const int kExpectedReturnValue = 42; | |
68 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING"; | |
69 | |
70 // Set the global environment to allow the use of UnsafeTrap() policies. | |
71 void EnableUnsafeTraps() { | |
72 // The use of UnsafeTrap() causes us to print a warning message. This is | |
73 // generally desirable, but it results in the unittest failing, as it doesn't | |
74 // expect any messages on "stderr". So, temporarily disable messages. The | |
75 // BPF_TEST() is guaranteed to turn messages back on, after the policy | |
76 // function has completed. | |
77 setenv(kSandboxDebuggingEnv, "t", 0); | |
78 Die::SuppressInfoMessages(true); | |
79 } | |
80 | |
81 // BPF_TEST does a lot of the boiler-plate code around setting up a | |
82 // policy and optional passing data between the caller, the policy and | |
83 // any Trap() handlers. This is great for writing short and concise tests, | |
84 // and it helps us accidentally forgetting any of the crucial steps in | |
85 // setting up the sandbox. But it wouldn't hurt to have at least one test | |
86 // that explicitly walks through all these steps. | |
87 | |
88 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) { | |
89 BPF_ASSERT(aux); | |
90 int* counter = static_cast<int*>(aux); | |
91 return (*counter)++; | |
92 } | |
93 | |
94 class VerboseAPITestingPolicy : public Policy { | |
95 public: | |
96 explicit VerboseAPITestingPolicy(int* counter_ptr) | |
97 : counter_ptr_(counter_ptr) {} | |
98 ~VerboseAPITestingPolicy() override {} | |
99 | |
100 ResultExpr EvaluateSyscall(int sysno) const override { | |
101 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
102 if (sysno == __NR_uname) { | |
103 return Trap(IncreaseCounter, counter_ptr_); | |
104 } | |
105 return Allow(); | |
106 } | |
107 | |
108 private: | |
109 int* counter_ptr_; | |
110 | |
111 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy); | |
112 }; | |
113 | |
114 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) { | |
115 if (SandboxBPF::SupportsSeccompSandbox( | |
116 SandboxBPF::SeccompLevel::SINGLE_THREADED)) { | |
117 static int counter = 0; | |
118 | |
119 SandboxBPF sandbox(new VerboseAPITestingPolicy(&counter)); | |
120 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
121 | |
122 BPF_ASSERT_EQ(0, counter); | |
123 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0)); | |
124 BPF_ASSERT_EQ(1, counter); | |
125 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0)); | |
126 BPF_ASSERT_EQ(2, counter); | |
127 } | |
128 } | |
129 | |
130 // A simple blacklist test | |
131 | |
132 class BlacklistNanosleepPolicy : public Policy { | |
133 public: | |
134 BlacklistNanosleepPolicy() {} | |
135 ~BlacklistNanosleepPolicy() override {} | |
136 | |
137 ResultExpr EvaluateSyscall(int sysno) const override { | |
138 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
139 switch (sysno) { | |
140 case __NR_nanosleep: | |
141 return Error(EACCES); | |
142 default: | |
143 return Allow(); | |
144 } | |
145 } | |
146 | |
147 static void AssertNanosleepFails() { | |
148 const struct timespec ts = {0, 0}; | |
149 errno = 0; | |
150 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); | |
151 BPF_ASSERT_EQ(EACCES, errno); | |
152 } | |
153 | |
154 private: | |
155 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy); | |
156 }; | |
157 | |
158 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) { | |
159 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
160 } | |
161 | |
162 BPF_TEST_C(SandboxBPF, UseVsyscall, BlacklistNanosleepPolicy) { | |
163 time_t current_time; | |
164 // time() is implemented as a vsyscall. With an older glibc, with | |
165 // vsyscall=emulate and some versions of the seccomp BPF patch | |
166 // we may get SIGKILL-ed. Detect this! | |
167 BPF_ASSERT_NE(static_cast<time_t>(-1), time(¤t_time)); | |
168 } | |
169 | |
170 // Now do a simple whitelist test | |
171 | |
172 class WhitelistGetpidPolicy : public Policy { | |
173 public: | |
174 WhitelistGetpidPolicy() {} | |
175 ~WhitelistGetpidPolicy() override {} | |
176 | |
177 ResultExpr EvaluateSyscall(int sysno) const override { | |
178 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
179 switch (sysno) { | |
180 case __NR_getpid: | |
181 case __NR_exit_group: | |
182 return Allow(); | |
183 default: | |
184 return Error(ENOMEM); | |
185 } | |
186 } | |
187 | |
188 private: | |
189 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy); | |
190 }; | |
191 | |
192 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) { | |
193 // getpid() should be allowed | |
194 errno = 0; | |
195 BPF_ASSERT(sys_getpid() > 0); | |
196 BPF_ASSERT(errno == 0); | |
197 | |
198 // getpgid() should be denied | |
199 BPF_ASSERT(getpgid(0) == -1); | |
200 BPF_ASSERT(errno == ENOMEM); | |
201 } | |
202 | |
203 // A simple blacklist policy, with a SIGSYS handler | |
204 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) { | |
205 // We also check that the auxiliary data is correct | |
206 SANDBOX_ASSERT(aux); | |
207 *(static_cast<int*>(aux)) = kExpectedReturnValue; | |
208 return -ENOMEM; | |
209 } | |
210 | |
211 class BlacklistNanosleepTrapPolicy : public Policy { | |
212 public: | |
213 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {} | |
214 ~BlacklistNanosleepTrapPolicy() override {} | |
215 | |
216 ResultExpr EvaluateSyscall(int sysno) const override { | |
217 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
218 switch (sysno) { | |
219 case __NR_nanosleep: | |
220 return Trap(EnomemHandler, aux_); | |
221 default: | |
222 return Allow(); | |
223 } | |
224 } | |
225 | |
226 private: | |
227 int* aux_; | |
228 | |
229 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy); | |
230 }; | |
231 | |
232 BPF_TEST(SandboxBPF, | |
233 BasicBlacklistWithSigsys, | |
234 BlacklistNanosleepTrapPolicy, | |
235 int /* (*BPF_AUX) */) { | |
236 // getpid() should work properly | |
237 errno = 0; | |
238 BPF_ASSERT(sys_getpid() > 0); | |
239 BPF_ASSERT(errno == 0); | |
240 | |
241 // Our Auxiliary Data, should be reset by the signal handler | |
242 *BPF_AUX = -1; | |
243 const struct timespec ts = {0, 0}; | |
244 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1); | |
245 BPF_ASSERT(errno == ENOMEM); | |
246 | |
247 // We expect the signal handler to modify AuxData | |
248 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue); | |
249 } | |
250 | |
251 // A simple test that verifies we can return arbitrary errno values. | |
252 | |
253 class ErrnoTestPolicy : public Policy { | |
254 public: | |
255 ErrnoTestPolicy() {} | |
256 ~ErrnoTestPolicy() override {} | |
257 | |
258 ResultExpr EvaluateSyscall(int sysno) const override; | |
259 | |
260 private: | |
261 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy); | |
262 }; | |
263 | |
264 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const { | |
265 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
266 switch (sysno) { | |
267 case __NR_dup3: // dup2 is a wrapper of dup3 in android | |
268 #if defined(__NR_dup2) | |
269 case __NR_dup2: | |
270 #endif | |
271 // Pretend that dup2() worked, but don't actually do anything. | |
272 return Error(0); | |
273 case __NR_setuid: | |
274 #if defined(__NR_setuid32) | |
275 case __NR_setuid32: | |
276 #endif | |
277 // Return errno = 1. | |
278 return Error(1); | |
279 case __NR_setgid: | |
280 #if defined(__NR_setgid32) | |
281 case __NR_setgid32: | |
282 #endif | |
283 // Return maximum errno value (typically 4095). | |
284 return Error(ErrorCode::ERR_MAX_ERRNO); | |
285 case __NR_uname: | |
286 // Return errno = 42; | |
287 return Error(42); | |
288 default: | |
289 return Allow(); | |
290 } | |
291 } | |
292 | |
293 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) { | |
294 // Verify that dup2() returns success, but doesn't actually run. | |
295 int fds[4]; | |
296 BPF_ASSERT(pipe(fds) == 0); | |
297 BPF_ASSERT(pipe(fds + 2) == 0); | |
298 BPF_ASSERT(dup2(fds[2], fds[0]) == 0); | |
299 char buf[1] = {}; | |
300 BPF_ASSERT(write(fds[1], "\x55", 1) == 1); | |
301 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1); | |
302 BPF_ASSERT(read(fds[0], buf, 1) == 1); | |
303 | |
304 // If dup2() executed, we will read \xAA, but it dup2() has been turned | |
305 // into a no-op by our policy, then we will read \x55. | |
306 BPF_ASSERT(buf[0] == '\x55'); | |
307 | |
308 // Verify that we can return the minimum and maximum errno values. | |
309 errno = 0; | |
310 BPF_ASSERT(setuid(0) == -1); | |
311 BPF_ASSERT(errno == 1); | |
312 | |
313 // On Android, errno is only supported up to 255, otherwise errno | |
314 // processing is skipped. | |
315 // We work around this (crbug.com/181647). | |
316 if (sandbox::IsAndroid() && setgid(0) != -1) { | |
317 errno = 0; | |
318 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO); | |
319 BPF_ASSERT(errno == 0); | |
320 } else { | |
321 errno = 0; | |
322 BPF_ASSERT(setgid(0) == -1); | |
323 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO); | |
324 } | |
325 | |
326 // Finally, test an errno in between the minimum and maximum. | |
327 errno = 0; | |
328 struct utsname uts_buf; | |
329 BPF_ASSERT(uname(&uts_buf) == -1); | |
330 BPF_ASSERT(errno == 42); | |
331 } | |
332 | |
333 // Testing the stacking of two sandboxes | |
334 | |
335 class StackingPolicyPartOne : public Policy { | |
336 public: | |
337 StackingPolicyPartOne() {} | |
338 ~StackingPolicyPartOne() override {} | |
339 | |
340 ResultExpr EvaluateSyscall(int sysno) const override { | |
341 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
342 switch (sysno) { | |
343 case __NR_getppid: { | |
344 const Arg<int> arg(0); | |
345 return If(arg == 0, Allow()).Else(Error(EPERM)); | |
346 } | |
347 default: | |
348 return Allow(); | |
349 } | |
350 } | |
351 | |
352 private: | |
353 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne); | |
354 }; | |
355 | |
356 class StackingPolicyPartTwo : public Policy { | |
357 public: | |
358 StackingPolicyPartTwo() {} | |
359 ~StackingPolicyPartTwo() override {} | |
360 | |
361 ResultExpr EvaluateSyscall(int sysno) const override { | |
362 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
363 switch (sysno) { | |
364 case __NR_getppid: { | |
365 const Arg<int> arg(0); | |
366 return If(arg == 0, Error(EINVAL)).Else(Allow()); | |
367 } | |
368 default: | |
369 return Allow(); | |
370 } | |
371 } | |
372 | |
373 private: | |
374 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo); | |
375 }; | |
376 | |
377 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) { | |
378 errno = 0; | |
379 BPF_ASSERT(syscall(__NR_getppid, 0) > 0); | |
380 BPF_ASSERT(errno == 0); | |
381 | |
382 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); | |
383 BPF_ASSERT(errno == EPERM); | |
384 | |
385 // Stack a second sandbox with its own policy. Verify that we can further | |
386 // restrict filters, but we cannot relax existing filters. | |
387 SandboxBPF sandbox(new StackingPolicyPartTwo()); | |
388 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
389 | |
390 errno = 0; | |
391 BPF_ASSERT(syscall(__NR_getppid, 0) == -1); | |
392 BPF_ASSERT(errno == EINVAL); | |
393 | |
394 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); | |
395 BPF_ASSERT(errno == EPERM); | |
396 } | |
397 | |
398 // A more complex, but synthetic policy. This tests the correctness of the BPF | |
399 // program by iterating through all syscalls and checking for an errno that | |
400 // depends on the syscall number. Unlike the Verifier, this exercises the BPF | |
401 // interpreter in the kernel. | |
402 | |
403 // We try to make sure we exercise optimizations in the BPF compiler. We make | |
404 // sure that the compiler can have an opportunity to coalesce syscalls with | |
405 // contiguous numbers and we also make sure that disjoint sets can return the | |
406 // same errno. | |
407 int SysnoToRandomErrno(int sysno) { | |
408 // Small contiguous sets of 3 system calls return an errno equal to the | |
409 // index of that set + 1 (so that we never return a NUL errno). | |
410 return ((sysno & ~3) >> 2) % 29 + 1; | |
411 } | |
412 | |
413 class SyntheticPolicy : public Policy { | |
414 public: | |
415 SyntheticPolicy() {} | |
416 ~SyntheticPolicy() override {} | |
417 | |
418 ResultExpr EvaluateSyscall(int sysno) const override { | |
419 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
420 if (sysno == __NR_exit_group || sysno == __NR_write) { | |
421 // exit_group() is special, we really need it to work. | |
422 // write() is needed for BPF_ASSERT() to report a useful error message. | |
423 return Allow(); | |
424 } | |
425 return Error(SysnoToRandomErrno(sysno)); | |
426 } | |
427 | |
428 private: | |
429 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy); | |
430 }; | |
431 | |
432 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) { | |
433 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int | |
434 // overflow. | |
435 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >= | |
436 static_cast<int>(MAX_PUBLIC_SYSCALL)); | |
437 | |
438 for (int syscall_number = static_cast<int>(MIN_SYSCALL); | |
439 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL); | |
440 ++syscall_number) { | |
441 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) { | |
442 // exit_group() is special | |
443 continue; | |
444 } | |
445 errno = 0; | |
446 BPF_ASSERT(syscall(syscall_number) == -1); | |
447 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number)); | |
448 } | |
449 } | |
450 | |
451 #if defined(__arm__) | |
452 // A simple policy that tests whether ARM private system calls are supported | |
453 // by our BPF compiler and by the BPF interpreter in the kernel. | |
454 | |
455 // For ARM private system calls, return an errno equal to their offset from | |
456 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno). | |
457 int ArmPrivateSysnoToErrno(int sysno) { | |
458 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) && | |
459 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { | |
460 return (sysno - MIN_PRIVATE_SYSCALL) + 1; | |
461 } else { | |
462 return ENOSYS; | |
463 } | |
464 } | |
465 | |
466 class ArmPrivatePolicy : public Policy { | |
467 public: | |
468 ArmPrivatePolicy() {} | |
469 ~ArmPrivatePolicy() override {} | |
470 | |
471 ResultExpr EvaluateSyscall(int sysno) const override { | |
472 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
473 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual | |
474 // ARM private system calls. | |
475 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) && | |
476 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { | |
477 return Error(ArmPrivateSysnoToErrno(sysno)); | |
478 } | |
479 return Allow(); | |
480 } | |
481 | |
482 private: | |
483 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy); | |
484 }; | |
485 | |
486 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) { | |
487 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1); | |
488 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL); | |
489 ++syscall_number) { | |
490 errno = 0; | |
491 BPF_ASSERT(syscall(syscall_number) == -1); | |
492 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number)); | |
493 } | |
494 } | |
495 #endif // defined(__arm__) | |
496 | |
497 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) { | |
498 // Count all invocations of our callback function. | |
499 ++*reinterpret_cast<int*>(aux); | |
500 | |
501 // Verify that within the callback function all filtering is temporarily | |
502 // disabled. | |
503 BPF_ASSERT(sys_getpid() > 1); | |
504 | |
505 // Verify that we can now call the underlying system call without causing | |
506 // infinite recursion. | |
507 return SandboxBPF::ForwardSyscall(args); | |
508 } | |
509 | |
510 class GreyListedPolicy : public Policy { | |
511 public: | |
512 explicit GreyListedPolicy(int* aux) : aux_(aux) { | |
513 // Set the global environment for unsafe traps once. | |
514 EnableUnsafeTraps(); | |
515 } | |
516 ~GreyListedPolicy() override {} | |
517 | |
518 ResultExpr EvaluateSyscall(int sysno) const override { | |
519 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
520 // Some system calls must always be allowed, if our policy wants to make | |
521 // use of UnsafeTrap() | |
522 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) { | |
523 return Allow(); | |
524 } else if (sysno == __NR_getpid) { | |
525 // Disallow getpid() | |
526 return Error(EPERM); | |
527 } else { | |
528 // Allow (and count) all other system calls. | |
529 return UnsafeTrap(CountSyscalls, aux_); | |
530 } | |
531 } | |
532 | |
533 private: | |
534 int* aux_; | |
535 | |
536 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy); | |
537 }; | |
538 | |
539 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) { | |
540 BPF_ASSERT(sys_getpid() == -1); | |
541 BPF_ASSERT(errno == EPERM); | |
542 BPF_ASSERT(*BPF_AUX == 0); | |
543 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid)); | |
544 BPF_ASSERT(*BPF_AUX == 2); | |
545 char name[17] = {}; | |
546 BPF_ASSERT(!syscall(__NR_prctl, | |
547 PR_GET_NAME, | |
548 name, | |
549 (void*)NULL, | |
550 (void*)NULL, | |
551 (void*)NULL)); | |
552 BPF_ASSERT(*BPF_AUX == 3); | |
553 BPF_ASSERT(*name); | |
554 } | |
555 | |
556 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) { | |
557 // Disabling warning messages that could confuse our test framework. | |
558 setenv(kSandboxDebuggingEnv, "t", 0); | |
559 Die::SuppressInfoMessages(true); | |
560 | |
561 unsetenv(kSandboxDebuggingEnv); | |
562 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); | |
563 setenv(kSandboxDebuggingEnv, "", 1); | |
564 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); | |
565 setenv(kSandboxDebuggingEnv, "t", 1); | |
566 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true); | |
567 } | |
568 | |
569 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) { | |
570 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) { | |
571 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always | |
572 // return an error. But our handler allows this call. | |
573 return 0; | |
574 } else { | |
575 return SandboxBPF::ForwardSyscall(args); | |
576 } | |
577 } | |
578 | |
579 class PrctlPolicy : public Policy { | |
580 public: | |
581 PrctlPolicy() {} | |
582 ~PrctlPolicy() override {} | |
583 | |
584 ResultExpr EvaluateSyscall(int sysno) const override { | |
585 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
586 setenv(kSandboxDebuggingEnv, "t", 0); | |
587 Die::SuppressInfoMessages(true); | |
588 | |
589 if (sysno == __NR_prctl) { | |
590 // Handle prctl() inside an UnsafeTrap() | |
591 return UnsafeTrap(PrctlHandler, NULL); | |
592 } | |
593 | |
594 // Allow all other system calls. | |
595 return Allow(); | |
596 } | |
597 | |
598 private: | |
599 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy); | |
600 }; | |
601 | |
602 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) { | |
603 // This call should never be allowed. But our policy will intercept it and | |
604 // let it pass successfully. | |
605 BPF_ASSERT( | |
606 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL)); | |
607 | |
608 // Verify that the call will fail, if it makes it all the way to the kernel. | |
609 BPF_ASSERT( | |
610 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1); | |
611 | |
612 // And verify that other uses of prctl() work just fine. | |
613 char name[17] = {}; | |
614 BPF_ASSERT(!syscall(__NR_prctl, | |
615 PR_GET_NAME, | |
616 name, | |
617 (void*)NULL, | |
618 (void*)NULL, | |
619 (void*)NULL)); | |
620 BPF_ASSERT(*name); | |
621 | |
622 // Finally, verify that system calls other than prctl() are completely | |
623 // unaffected by our policy. | |
624 struct utsname uts = {}; | |
625 BPF_ASSERT(!uname(&uts)); | |
626 BPF_ASSERT(!strcmp(uts.sysname, "Linux")); | |
627 } | |
628 | |
629 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) { | |
630 return SandboxBPF::ForwardSyscall(args); | |
631 } | |
632 | |
633 class RedirectAllSyscallsPolicy : public Policy { | |
634 public: | |
635 RedirectAllSyscallsPolicy() {} | |
636 ~RedirectAllSyscallsPolicy() override {} | |
637 | |
638 ResultExpr EvaluateSyscall(int sysno) const override; | |
639 | |
640 private: | |
641 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy); | |
642 }; | |
643 | |
644 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const { | |
645 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
646 setenv(kSandboxDebuggingEnv, "t", 0); | |
647 Die::SuppressInfoMessages(true); | |
648 | |
649 // Some system calls must always be allowed, if our policy wants to make | |
650 // use of UnsafeTrap() | |
651 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) | |
652 return Allow(); | |
653 return UnsafeTrap(AllowRedirectedSyscall, NULL); | |
654 } | |
655 | |
656 int bus_handler_fd_ = -1; | |
657 | |
658 void SigBusHandler(int, siginfo_t* info, void* void_context) { | |
659 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1); | |
660 } | |
661 | |
662 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) { | |
663 // We use the SIGBUS bit in the signal mask as a thread-local boolean | |
664 // value in the implementation of UnsafeTrap(). This is obviously a bit | |
665 // of a hack that could conceivably interfere with code that uses SIGBUS | |
666 // in more traditional ways. This test verifies that basic functionality | |
667 // of SIGBUS is not impacted, but it is certainly possibly to construe | |
668 // more complex uses of signals where our use of the SIGBUS mask is not | |
669 // 100% transparent. This is expected behavior. | |
670 int fds[2]; | |
671 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0); | |
672 bus_handler_fd_ = fds[1]; | |
673 struct sigaction sa = {}; | |
674 sa.sa_sigaction = SigBusHandler; | |
675 sa.sa_flags = SA_SIGINFO; | |
676 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0); | |
677 raise(SIGBUS); | |
678 char c = '\000'; | |
679 BPF_ASSERT(read(fds[0], &c, 1) == 1); | |
680 BPF_ASSERT(close(fds[0]) == 0); | |
681 BPF_ASSERT(close(fds[1]) == 0); | |
682 BPF_ASSERT(c == 0x55); | |
683 } | |
684 | |
685 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) { | |
686 // Signal masks are potentially tricky to handle. For instance, if we | |
687 // ever tried to update them from inside a Trap() or UnsafeTrap() handler, | |
688 // the call to sigreturn() at the end of the signal handler would undo | |
689 // all of our efforts. So, it makes sense to test that sigprocmask() | |
690 // works, even if we have a policy in place that makes use of UnsafeTrap(). | |
691 // In practice, this works because we force sigprocmask() to be handled | |
692 // entirely in the kernel. | |
693 sigset_t mask0, mask1, mask2; | |
694 | |
695 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't | |
696 // change the mask (it shouldn't have been, as it isn't blocked by default | |
697 // in POSIX). | |
698 // | |
699 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose. | |
700 sigemptyset(&mask0); | |
701 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1)); | |
702 BPF_ASSERT(!sigismember(&mask1, SIGUSR2)); | |
703 | |
704 // Try again, and this time we verify that we can block it. This | |
705 // requires a second call to sigprocmask(). | |
706 sigaddset(&mask0, SIGUSR2); | |
707 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL)); | |
708 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2)); | |
709 BPF_ASSERT(sigismember(&mask2, SIGUSR2)); | |
710 } | |
711 | |
712 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) { | |
713 // An UnsafeTrap() (or for that matter, a Trap()) has to report error | |
714 // conditions by returning an exit code in the range -1..-4096. This | |
715 // should happen automatically if using ForwardSyscall(). If the TrapFnc() | |
716 // uses some other method to make system calls, then it is responsible | |
717 // for computing the correct return code. | |
718 // This test verifies that ForwardSyscall() does the correct thing. | |
719 | |
720 // The glibc system wrapper will ultimately set errno for us. So, from normal | |
721 // userspace, all of this should be completely transparent. | |
722 errno = 0; | |
723 BPF_ASSERT(close(-1) == -1); | |
724 BPF_ASSERT(errno == EBADF); | |
725 | |
726 // Explicitly avoid the glibc wrapper. This is not normally the way anybody | |
727 // would make system calls, but it allows us to verify that we don't | |
728 // accidentally mess with errno, when we shouldn't. | |
729 errno = 0; | |
730 struct arch_seccomp_data args = {}; | |
731 args.nr = __NR_close; | |
732 args.args[0] = -1; | |
733 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF); | |
734 BPF_ASSERT(errno == 0); | |
735 } | |
736 | |
737 bool NoOpCallback() { | |
738 return true; | |
739 } | |
740 | |
741 // Test a trap handler that makes use of a broker process to open(). | |
742 | |
743 class InitializedOpenBroker { | |
744 public: | |
745 InitializedOpenBroker() : initialized_(false) { | |
746 std::vector<syscall_broker::BrokerFilePermission> permissions; | |
747 permissions.push_back( | |
748 syscall_broker::BrokerFilePermission::ReadOnly("/proc/allowed")); | |
749 permissions.push_back( | |
750 syscall_broker::BrokerFilePermission::ReadOnly("/proc/cpuinfo")); | |
751 | |
752 broker_process_.reset( | |
753 new syscall_broker::BrokerProcess(EPERM, permissions)); | |
754 BPF_ASSERT(broker_process() != NULL); | |
755 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback))); | |
756 | |
757 initialized_ = true; | |
758 } | |
759 bool initialized() { return initialized_; } | |
760 class syscall_broker::BrokerProcess* broker_process() { | |
761 return broker_process_.get(); | |
762 } | |
763 | |
764 private: | |
765 bool initialized_; | |
766 scoped_ptr<class syscall_broker::BrokerProcess> broker_process_; | |
767 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker); | |
768 }; | |
769 | |
770 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args, | |
771 void* aux) { | |
772 BPF_ASSERT(aux); | |
773 syscall_broker::BrokerProcess* broker_process = | |
774 static_cast<syscall_broker::BrokerProcess*>(aux); | |
775 switch (args.nr) { | |
776 case __NR_faccessat: // access is a wrapper of faccessat in android | |
777 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); | |
778 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]), | |
779 static_cast<int>(args.args[2])); | |
780 #if defined(__NR_access) | |
781 case __NR_access: | |
782 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]), | |
783 static_cast<int>(args.args[1])); | |
784 #endif | |
785 #if defined(__NR_open) | |
786 case __NR_open: | |
787 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]), | |
788 static_cast<int>(args.args[1])); | |
789 #endif | |
790 case __NR_openat: | |
791 // We only call open() so if we arrive here, it's because glibc uses | |
792 // the openat() system call. | |
793 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); | |
794 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]), | |
795 static_cast<int>(args.args[2])); | |
796 default: | |
797 BPF_ASSERT(false); | |
798 return -ENOSYS; | |
799 } | |
800 } | |
801 | |
802 class DenyOpenPolicy : public Policy { | |
803 public: | |
804 explicit DenyOpenPolicy(InitializedOpenBroker* iob) : iob_(iob) {} | |
805 ~DenyOpenPolicy() override {} | |
806 | |
807 ResultExpr EvaluateSyscall(int sysno) const override { | |
808 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
809 | |
810 switch (sysno) { | |
811 case __NR_faccessat: | |
812 #if defined(__NR_access) | |
813 case __NR_access: | |
814 #endif | |
815 #if defined(__NR_open) | |
816 case __NR_open: | |
817 #endif | |
818 case __NR_openat: | |
819 // We get a InitializedOpenBroker class, but our trap handler wants | |
820 // the syscall_broker::BrokerProcess object. | |
821 return Trap(BrokerOpenTrapHandler, iob_->broker_process()); | |
822 default: | |
823 return Allow(); | |
824 } | |
825 } | |
826 | |
827 private: | |
828 InitializedOpenBroker* iob_; | |
829 | |
830 DISALLOW_COPY_AND_ASSIGN(DenyOpenPolicy); | |
831 }; | |
832 | |
833 // We use a InitializedOpenBroker class, so that we can run unsandboxed | |
834 // code in its constructor, which is the only way to do so in a BPF_TEST. | |
835 BPF_TEST(SandboxBPF, | |
836 UseOpenBroker, | |
837 DenyOpenPolicy, | |
838 InitializedOpenBroker /* (*BPF_AUX) */) { | |
839 BPF_ASSERT(BPF_AUX->initialized()); | |
840 syscall_broker::BrokerProcess* broker_process = BPF_AUX->broker_process(); | |
841 BPF_ASSERT(broker_process != NULL); | |
842 | |
843 // First, use the broker "manually" | |
844 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM); | |
845 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM); | |
846 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT); | |
847 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT); | |
848 | |
849 // Now use glibc's open() as an external library would. | |
850 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1); | |
851 BPF_ASSERT(errno == EPERM); | |
852 | |
853 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1); | |
854 BPF_ASSERT(errno == ENOENT); | |
855 | |
856 // Also test glibc's openat(), some versions of libc use it transparently | |
857 // instead of open(). | |
858 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1); | |
859 BPF_ASSERT(errno == EPERM); | |
860 | |
861 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1); | |
862 BPF_ASSERT(errno == ENOENT); | |
863 | |
864 // And test glibc's access(). | |
865 BPF_ASSERT(access("/proc/denied", R_OK) == -1); | |
866 BPF_ASSERT(errno == EPERM); | |
867 | |
868 BPF_ASSERT(access("/proc/allowed", R_OK) == -1); | |
869 BPF_ASSERT(errno == ENOENT); | |
870 | |
871 // This is also white listed and does exist. | |
872 int cpu_info_access = access("/proc/cpuinfo", R_OK); | |
873 BPF_ASSERT(cpu_info_access == 0); | |
874 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY); | |
875 BPF_ASSERT(cpu_info_fd >= 0); | |
876 char buf[1024]; | |
877 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0); | |
878 } | |
879 | |
880 // Simple test demonstrating how to use SandboxBPF::Cond() | |
881 | |
882 class SimpleCondTestPolicy : public Policy { | |
883 public: | |
884 SimpleCondTestPolicy() {} | |
885 ~SimpleCondTestPolicy() override {} | |
886 | |
887 ResultExpr EvaluateSyscall(int sysno) const override; | |
888 | |
889 private: | |
890 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy); | |
891 }; | |
892 | |
893 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const { | |
894 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
895 | |
896 // We deliberately return unusual errno values upon failure, so that we | |
897 // can uniquely test for these values. In a "real" policy, you would want | |
898 // to return more traditional values. | |
899 int flags_argument_position = -1; | |
900 switch (sysno) { | |
901 #if defined(__NR_open) | |
902 case __NR_open: | |
903 flags_argument_position = 1; | |
904 #endif | |
905 case __NR_openat: { // open can be a wrapper for openat(2). | |
906 if (sysno == __NR_openat) | |
907 flags_argument_position = 2; | |
908 | |
909 // Allow opening files for reading, but don't allow writing. | |
910 static_assert(O_RDONLY == 0, "O_RDONLY must be all zero bits"); | |
911 const Arg<int> flags(flags_argument_position); | |
912 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow()); | |
913 } | |
914 case __NR_prctl: { | |
915 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but | |
916 // disallow everything else. | |
917 const Arg<int> option(0); | |
918 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow()) | |
919 .Else(Error(ENOMEM)); | |
920 } | |
921 default: | |
922 return Allow(); | |
923 } | |
924 } | |
925 | |
926 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) { | |
927 int fd; | |
928 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1); | |
929 BPF_ASSERT(errno == EROFS); | |
930 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0); | |
931 close(fd); | |
932 | |
933 int ret; | |
934 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0); | |
935 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0); | |
936 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1); | |
937 BPF_ASSERT(errno == ENOMEM); | |
938 } | |
939 | |
940 // This test exercises the SandboxBPF::Cond() method by building a complex | |
941 // tree of conditional equality operations. It then makes system calls and | |
942 // verifies that they return the values that we expected from our BPF | |
943 // program. | |
944 class EqualityStressTest { | |
945 public: | |
946 EqualityStressTest() { | |
947 // We want a deterministic test | |
948 srand(0); | |
949 | |
950 // Iterates over system call numbers and builds a random tree of | |
951 // equality tests. | |
952 // We are actually constructing a graph of ArgValue objects. This | |
953 // graph will later be used to a) compute our sandbox policy, and | |
954 // b) drive the code that verifies the output from the BPF program. | |
955 static_assert( | |
956 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10), | |
957 "kNumTestCases must be significantly smaller than the number " | |
958 "of system calls"); | |
959 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) { | |
960 if (IsReservedSyscall(sysno)) { | |
961 // Skip reserved system calls. This ensures that our test frame | |
962 // work isn't impacted by the fact that we are overriding | |
963 // a lot of different system calls. | |
964 ++end; | |
965 arg_values_.push_back(NULL); | |
966 } else { | |
967 arg_values_.push_back( | |
968 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs)); | |
969 } | |
970 } | |
971 } | |
972 | |
973 ~EqualityStressTest() { | |
974 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin(); | |
975 iter != arg_values_.end(); | |
976 ++iter) { | |
977 DeleteArgValue(*iter); | |
978 } | |
979 } | |
980 | |
981 ResultExpr Policy(int sysno) { | |
982 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
983 if (sysno < 0 || sysno >= (int)arg_values_.size() || | |
984 IsReservedSyscall(sysno)) { | |
985 // We only return ErrorCode values for the system calls that | |
986 // are part of our test data. Every other system call remains | |
987 // allowed. | |
988 return Allow(); | |
989 } else { | |
990 // ToErrorCode() turns an ArgValue object into an ErrorCode that is | |
991 // suitable for use by a sandbox policy. | |
992 return ToErrorCode(arg_values_[sysno]); | |
993 } | |
994 } | |
995 | |
996 void VerifyFilter() { | |
997 // Iterate over all system calls. Skip the system calls that have | |
998 // previously been determined as being reserved. | |
999 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) { | |
1000 if (!arg_values_[sysno]) { | |
1001 // Skip reserved system calls. | |
1002 continue; | |
1003 } | |
1004 // Verify that system calls return the values that we expect them to | |
1005 // return. This involves passing different combinations of system call | |
1006 // parameters in order to exercise all possible code paths through the | |
1007 // BPF filter program. | |
1008 // We arbitrarily start by setting all six system call arguments to | |
1009 // zero. And we then recursive traverse our tree of ArgValues to | |
1010 // determine the necessary combinations of parameters. | |
1011 intptr_t args[6] = {}; | |
1012 Verify(sysno, args, *arg_values_[sysno]); | |
1013 } | |
1014 } | |
1015 | |
1016 private: | |
1017 struct ArgValue { | |
1018 int argno; // Argument number to inspect. | |
1019 int size; // Number of test cases (must be > 0). | |
1020 struct Tests { | |
1021 uint32_t k_value; // Value to compare syscall arg against. | |
1022 int err; // If non-zero, errno value to return. | |
1023 struct ArgValue* arg_value; // Otherwise, more args needs inspecting. | |
1024 }* tests; | |
1025 int err; // If none of the tests passed, this is what | |
1026 struct ArgValue* arg_value; // we'll return (this is the "else" branch). | |
1027 }; | |
1028 | |
1029 bool IsReservedSyscall(int sysno) { | |
1030 // There are a handful of system calls that we should never use in our | |
1031 // test cases. These system calls are needed to allow the test framework | |
1032 // to run properly. | |
1033 // If we wanted to write fully generic code, there are more system calls | |
1034 // that could be listed here, and it is quite difficult to come up with a | |
1035 // truly comprehensive list. After all, we are deliberately making system | |
1036 // calls unavailable. In practice, we have a pretty good idea of the system | |
1037 // calls that will be made by this particular test. So, this small list is | |
1038 // sufficient. But if anybody copy'n'pasted this code for other uses, they | |
1039 // would have to review that the list. | |
1040 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit || | |
1041 sysno == __NR_exit_group || sysno == __NR_restart_syscall; | |
1042 } | |
1043 | |
1044 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) { | |
1045 // Create a new ArgValue and fill it with random data. We use as bit mask | |
1046 // to keep track of the system call parameters that have previously been | |
1047 // set; this ensures that we won't accidentally define a contradictory | |
1048 // set of equality tests. | |
1049 struct ArgValue* arg_value = new ArgValue(); | |
1050 args_mask |= 1 << argno; | |
1051 arg_value->argno = argno; | |
1052 | |
1053 // Apply some restrictions on just how complex our tests can be. | |
1054 // Otherwise, we end up with a BPF program that is too complicated for | |
1055 // the kernel to load. | |
1056 int fan_out = kMaxFanOut; | |
1057 if (remaining_args > 3) { | |
1058 fan_out = 1; | |
1059 } else if (remaining_args > 2) { | |
1060 fan_out = 2; | |
1061 } | |
1062 | |
1063 // Create a couple of different test cases with randomized values that | |
1064 // we want to use when comparing system call parameter number "argno". | |
1065 arg_value->size = rand() % fan_out + 1; | |
1066 arg_value->tests = new ArgValue::Tests[arg_value->size]; | |
1067 | |
1068 uint32_t k_value = rand(); | |
1069 for (int n = 0; n < arg_value->size; ++n) { | |
1070 // Ensure that we have unique values | |
1071 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1; | |
1072 | |
1073 // There are two possible types of nodes. Either this is a leaf node; | |
1074 // in that case, we have completed all the equality tests that we | |
1075 // wanted to perform, and we can now compute a random "errno" value that | |
1076 // we should return. Or this is part of a more complex boolean | |
1077 // expression; in that case, we have to recursively add tests for some | |
1078 // of system call parameters that we have not yet included in our | |
1079 // tests. | |
1080 arg_value->tests[n].k_value = k_value; | |
1081 if (!remaining_args || (rand() & 1)) { | |
1082 arg_value->tests[n].err = (rand() % 1000) + 1; | |
1083 arg_value->tests[n].arg_value = NULL; | |
1084 } else { | |
1085 arg_value->tests[n].err = 0; | |
1086 arg_value->tests[n].arg_value = | |
1087 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); | |
1088 } | |
1089 } | |
1090 // Finally, we have to define what we should return if none of the | |
1091 // previous equality tests pass. Again, we can either deal with a leaf | |
1092 // node, or we can randomly add another couple of tests. | |
1093 if (!remaining_args || (rand() & 1)) { | |
1094 arg_value->err = (rand() % 1000) + 1; | |
1095 arg_value->arg_value = NULL; | |
1096 } else { | |
1097 arg_value->err = 0; | |
1098 arg_value->arg_value = | |
1099 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); | |
1100 } | |
1101 // We have now built a new (sub-)tree of ArgValues defining a set of | |
1102 // boolean expressions for testing random system call arguments against | |
1103 // random values. Return this tree to our caller. | |
1104 return arg_value; | |
1105 } | |
1106 | |
1107 int RandomArg(int args_mask) { | |
1108 // Compute a random system call parameter number. | |
1109 int argno = rand() % kMaxArgs; | |
1110 | |
1111 // Make sure that this same parameter number has not previously been | |
1112 // used. Otherwise, we could end up with a test that is impossible to | |
1113 // satisfy (e.g. args[0] == 1 && args[0] == 2). | |
1114 while (args_mask & (1 << argno)) { | |
1115 argno = (argno + 1) % kMaxArgs; | |
1116 } | |
1117 return argno; | |
1118 } | |
1119 | |
1120 void DeleteArgValue(ArgValue* arg_value) { | |
1121 // Delete an ArgValue and all of its child nodes. This requires | |
1122 // recursively descending into the tree. | |
1123 if (arg_value) { | |
1124 if (arg_value->size) { | |
1125 for (int n = 0; n < arg_value->size; ++n) { | |
1126 if (!arg_value->tests[n].err) { | |
1127 DeleteArgValue(arg_value->tests[n].arg_value); | |
1128 } | |
1129 } | |
1130 delete[] arg_value->tests; | |
1131 } | |
1132 if (!arg_value->err) { | |
1133 DeleteArgValue(arg_value->arg_value); | |
1134 } | |
1135 delete arg_value; | |
1136 } | |
1137 } | |
1138 | |
1139 ResultExpr ToErrorCode(ArgValue* arg_value) { | |
1140 // Compute the ResultExpr that should be returned, if none of our | |
1141 // tests succeed (i.e. the system call parameter doesn't match any | |
1142 // of the values in arg_value->tests[].k_value). | |
1143 ResultExpr err; | |
1144 if (arg_value->err) { | |
1145 // If this was a leaf node, return the errno value that we expect to | |
1146 // return from the BPF filter program. | |
1147 err = Error(arg_value->err); | |
1148 } else { | |
1149 // If this wasn't a leaf node yet, recursively descend into the rest | |
1150 // of the tree. This will end up adding a few more SandboxBPF::Cond() | |
1151 // tests to our ErrorCode. | |
1152 err = ToErrorCode(arg_value->arg_value); | |
1153 } | |
1154 | |
1155 // Now, iterate over all the test cases that we want to compare against. | |
1156 // This builds a chain of SandboxBPF::Cond() tests | |
1157 // (aka "if ... elif ... elif ... elif ... fi") | |
1158 for (int n = arg_value->size; n-- > 0;) { | |
1159 ResultExpr matched; | |
1160 // Again, we distinguish between leaf nodes and subtrees. | |
1161 if (arg_value->tests[n].err) { | |
1162 matched = Error(arg_value->tests[n].err); | |
1163 } else { | |
1164 matched = ToErrorCode(arg_value->tests[n].arg_value); | |
1165 } | |
1166 // For now, all of our tests are limited to 32bit. | |
1167 // We have separate tests that check the behavior of 32bit vs. 64bit | |
1168 // conditional expressions. | |
1169 const Arg<uint32_t> arg(arg_value->argno); | |
1170 err = If(arg == arg_value->tests[n].k_value, matched).Else(err); | |
1171 } | |
1172 return err; | |
1173 } | |
1174 | |
1175 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) { | |
1176 uint32_t mismatched = 0; | |
1177 // Iterate over all the k_values in arg_value.tests[] and verify that | |
1178 // we see the expected return values from system calls, when we pass | |
1179 // the k_value as a parameter in a system call. | |
1180 for (int n = arg_value.size; n-- > 0;) { | |
1181 mismatched += arg_value.tests[n].k_value; | |
1182 args[arg_value.argno] = arg_value.tests[n].k_value; | |
1183 if (arg_value.tests[n].err) { | |
1184 VerifyErrno(sysno, args, arg_value.tests[n].err); | |
1185 } else { | |
1186 Verify(sysno, args, *arg_value.tests[n].arg_value); | |
1187 } | |
1188 } | |
1189 // Find a k_value that doesn't match any of the k_values in | |
1190 // arg_value.tests[]. In most cases, the current value of "mismatched" | |
1191 // would fit this requirement. But on the off-chance that it happens | |
1192 // to collide, we double-check. | |
1193 try_again: | |
1194 for (int n = arg_value.size; n-- > 0;) { | |
1195 if (mismatched == arg_value.tests[n].k_value) { | |
1196 ++mismatched; | |
1197 goto try_again; | |
1198 } | |
1199 } | |
1200 // Now verify that we see the expected return value from system calls, | |
1201 // if we pass a value that doesn't match any of the conditions (i.e. this | |
1202 // is testing the "else" clause of the conditions). | |
1203 args[arg_value.argno] = mismatched; | |
1204 if (arg_value.err) { | |
1205 VerifyErrno(sysno, args, arg_value.err); | |
1206 } else { | |
1207 Verify(sysno, args, *arg_value.arg_value); | |
1208 } | |
1209 // Reset args[arg_value.argno]. This is not technically needed, but it | |
1210 // makes it easier to reason about the correctness of our tests. | |
1211 args[arg_value.argno] = 0; | |
1212 } | |
1213 | |
1214 void VerifyErrno(int sysno, intptr_t* args, int err) { | |
1215 // We installed BPF filters that return different errno values | |
1216 // based on the system call number and the parameters that we decided | |
1217 // to pass in. Verify that this condition holds true. | |
1218 BPF_ASSERT( | |
1219 Syscall::Call( | |
1220 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) == | |
1221 -err); | |
1222 } | |
1223 | |
1224 // Vector of ArgValue trees. These trees define all the possible boolean | |
1225 // expressions that we want to turn into a BPF filter program. | |
1226 std::vector<ArgValue*> arg_values_; | |
1227 | |
1228 // Don't increase these values. We are pushing the limits of the maximum | |
1229 // BPF program that the kernel will allow us to load. If the values are | |
1230 // increased too much, the test will start failing. | |
1231 #if defined(__aarch64__) | |
1232 static const int kNumTestCases = 30; | |
1233 #else | |
1234 static const int kNumTestCases = 40; | |
1235 #endif | |
1236 static const int kMaxFanOut = 3; | |
1237 static const int kMaxArgs = 6; | |
1238 }; | |
1239 | |
1240 class EqualityStressTestPolicy : public Policy { | |
1241 public: | |
1242 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {} | |
1243 ~EqualityStressTestPolicy() override {} | |
1244 | |
1245 ResultExpr EvaluateSyscall(int sysno) const override { | |
1246 return aux_->Policy(sysno); | |
1247 } | |
1248 | |
1249 private: | |
1250 EqualityStressTest* aux_; | |
1251 | |
1252 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy); | |
1253 }; | |
1254 | |
1255 BPF_TEST(SandboxBPF, | |
1256 EqualityTests, | |
1257 EqualityStressTestPolicy, | |
1258 EqualityStressTest /* (*BPF_AUX) */) { | |
1259 BPF_AUX->VerifyFilter(); | |
1260 } | |
1261 | |
1262 class EqualityArgumentWidthPolicy : public Policy { | |
1263 public: | |
1264 EqualityArgumentWidthPolicy() {} | |
1265 ~EqualityArgumentWidthPolicy() override {} | |
1266 | |
1267 ResultExpr EvaluateSyscall(int sysno) const override; | |
1268 | |
1269 private: | |
1270 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy); | |
1271 }; | |
1272 | |
1273 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const { | |
1274 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1275 if (sysno == __NR_uname) { | |
1276 const Arg<int> option(0); | |
1277 const Arg<uint32_t> arg32(1); | |
1278 const Arg<uint64_t> arg64(1); | |
1279 return Switch(option) | |
1280 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2))) | |
1281 #if __SIZEOF_POINTER__ > 4 | |
1282 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2))) | |
1283 #endif | |
1284 .Default(Error(3)); | |
1285 } | |
1286 return Allow(); | |
1287 } | |
1288 | |
1289 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) { | |
1290 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1); | |
1291 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2); | |
1292 #if __SIZEOF_POINTER__ > 4 | |
1293 // On 32bit machines, there is no way to pass a 64bit argument through the | |
1294 // syscall interface. So, we have to skip the part of the test that requires | |
1295 // 64bit arguments. | |
1296 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1); | |
1297 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2); | |
1298 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2); | |
1299 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2); | |
1300 #endif | |
1301 } | |
1302 | |
1303 #if __SIZEOF_POINTER__ > 4 | |
1304 // On 32bit machines, there is no way to pass a 64bit argument through the | |
1305 // syscall interface. So, we have to skip the part of the test that requires | |
1306 // 64bit arguments. | |
1307 BPF_DEATH_TEST_C(SandboxBPF, | |
1308 EqualityArgumentUnallowed64bit, | |
1309 DEATH_MESSAGE("Unexpected 64bit argument detected"), | |
1310 EqualityArgumentWidthPolicy) { | |
1311 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL); | |
1312 } | |
1313 #endif | |
1314 | |
1315 class EqualityWithNegativeArgumentsPolicy : public Policy { | |
1316 public: | |
1317 EqualityWithNegativeArgumentsPolicy() {} | |
1318 ~EqualityWithNegativeArgumentsPolicy() override {} | |
1319 | |
1320 ResultExpr EvaluateSyscall(int sysno) const override { | |
1321 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1322 if (sysno == __NR_uname) { | |
1323 // TODO(mdempsky): This currently can't be Arg<int> because then | |
1324 // 0xFFFFFFFF will be treated as a (signed) int, and then when | |
1325 // Arg::EqualTo casts it to uint64_t, it will be sign extended. | |
1326 const Arg<unsigned> arg(0); | |
1327 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2)); | |
1328 } | |
1329 return Allow(); | |
1330 } | |
1331 | |
1332 private: | |
1333 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy); | |
1334 }; | |
1335 | |
1336 BPF_TEST_C(SandboxBPF, | |
1337 EqualityWithNegativeArguments, | |
1338 EqualityWithNegativeArgumentsPolicy) { | |
1339 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1); | |
1340 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1); | |
1341 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1); | |
1342 } | |
1343 | |
1344 #if __SIZEOF_POINTER__ > 4 | |
1345 BPF_DEATH_TEST_C(SandboxBPF, | |
1346 EqualityWithNegative64bitArguments, | |
1347 DEATH_MESSAGE("Unexpected 64bit argument detected"), | |
1348 EqualityWithNegativeArgumentsPolicy) { | |
1349 // When expecting a 32bit system call argument, we look at the MSB of the | |
1350 // 64bit value and allow both "0" and "-1". But the latter is allowed only | |
1351 // iff the LSB was negative. So, this death test should error out. | |
1352 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1); | |
1353 } | |
1354 #endif | |
1355 | |
1356 class AllBitTestPolicy : public Policy { | |
1357 public: | |
1358 AllBitTestPolicy() {} | |
1359 ~AllBitTestPolicy() override {} | |
1360 | |
1361 ResultExpr EvaluateSyscall(int sysno) const override; | |
1362 | |
1363 private: | |
1364 static ResultExpr HasAllBits32(uint32_t bits); | |
1365 static ResultExpr HasAllBits64(uint64_t bits); | |
1366 | |
1367 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy); | |
1368 }; | |
1369 | |
1370 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) { | |
1371 if (bits == 0) { | |
1372 return Error(1); | |
1373 } | |
1374 const Arg<uint32_t> arg(1); | |
1375 return If((arg & bits) == bits, Error(1)).Else(Error(0)); | |
1376 } | |
1377 | |
1378 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) { | |
1379 if (bits == 0) { | |
1380 return Error(1); | |
1381 } | |
1382 const Arg<uint64_t> arg(1); | |
1383 return If((arg & bits) == bits, Error(1)).Else(Error(0)); | |
1384 } | |
1385 | |
1386 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const { | |
1387 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1388 // Test masked-equality cases that should trigger the "has all bits" | |
1389 // peephole optimizations. We try to find bitmasks that could conceivably | |
1390 // touch corner cases. | |
1391 // For all of these tests, we override the uname(). We can make use with | |
1392 // a single system call number, as we use the first system call argument to | |
1393 // select the different bit masks that we want to test against. | |
1394 if (sysno == __NR_uname) { | |
1395 const Arg<int> option(0); | |
1396 return Switch(option) | |
1397 .Case(0, HasAllBits32(0x0)) | |
1398 .Case(1, HasAllBits32(0x1)) | |
1399 .Case(2, HasAllBits32(0x3)) | |
1400 .Case(3, HasAllBits32(0x80000000)) | |
1401 #if __SIZEOF_POINTER__ > 4 | |
1402 .Case(4, HasAllBits64(0x0)) | |
1403 .Case(5, HasAllBits64(0x1)) | |
1404 .Case(6, HasAllBits64(0x3)) | |
1405 .Case(7, HasAllBits64(0x80000000)) | |
1406 .Case(8, HasAllBits64(0x100000000ULL)) | |
1407 .Case(9, HasAllBits64(0x300000000ULL)) | |
1408 .Case(10, HasAllBits64(0x100000001ULL)) | |
1409 #endif | |
1410 .Default(Kill("Invalid test case number")); | |
1411 } | |
1412 return Allow(); | |
1413 } | |
1414 | |
1415 // Define a macro that performs tests using our test policy. | |
1416 // NOTE: Not all of the arguments in this macro are actually used! | |
1417 // They are here just to serve as documentation of the conditions | |
1418 // implemented in the test policy. | |
1419 // Most notably, "op" and "mask" are unused by the macro. If you want | |
1420 // to make changes to these values, you will have to edit the | |
1421 // test policy instead. | |
1422 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \ | |
1423 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value)) | |
1424 | |
1425 // Our uname() system call returns ErrorCode(1) for success and | |
1426 // ErrorCode(0) for failure. Syscall::Call() turns this into an | |
1427 // exit code of -1 or 0. | |
1428 #define EXPECT_FAILURE 0 | |
1429 #define EXPECT_SUCCESS -1 | |
1430 | |
1431 // A couple of our tests behave differently on 32bit and 64bit systems, as | |
1432 // there is no way for a 32bit system call to pass in a 64bit system call | |
1433 // argument "arg". | |
1434 // We expect these tests to succeed on 64bit systems, but to tail on 32bit | |
1435 // systems. | |
1436 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE) | |
1437 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) { | |
1438 // 32bit test: all of 0x0 (should always be true) | |
1439 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS); | |
1440 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS); | |
1441 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS); | |
1442 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS); | |
1443 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS); | |
1444 | |
1445 // 32bit test: all of 0x1 | |
1446 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE); | |
1447 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS); | |
1448 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE); | |
1449 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS); | |
1450 | |
1451 // 32bit test: all of 0x3 | |
1452 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1453 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1454 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE); | |
1455 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS); | |
1456 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS); | |
1457 | |
1458 // 32bit test: all of 0x80000000 | |
1459 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE); | |
1460 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE); | |
1461 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1462 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1463 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS); | |
1464 | |
1465 #if __SIZEOF_POINTER__ > 4 | |
1466 // 64bit test: all of 0x0 (should always be true) | |
1467 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS); | |
1468 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS); | |
1469 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS); | |
1470 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS); | |
1471 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1472 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1473 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1474 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS); | |
1475 | |
1476 // 64bit test: all of 0x1 | |
1477 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE); | |
1478 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS); | |
1479 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE); | |
1480 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS); | |
1481 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE); | |
1482 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS); | |
1483 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE); | |
1484 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS); | |
1485 | |
1486 // 64bit test: all of 0x3 | |
1487 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE); | |
1488 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE); | |
1489 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE); | |
1490 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS); | |
1491 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS); | |
1492 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1493 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1494 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE); | |
1495 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS); | |
1496 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS); | |
1497 | |
1498 // 64bit test: all of 0x80000000 | |
1499 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1500 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1501 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1502 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1503 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1504 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1505 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); | |
1506 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1507 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1508 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); | |
1509 | |
1510 // 64bit test: all of 0x100000000 | |
1511 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1512 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1513 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1514 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1515 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1516 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1517 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); | |
1518 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); | |
1519 | |
1520 // 64bit test: all of 0x300000000 | |
1521 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1522 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1523 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1524 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1525 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1526 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1527 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1528 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); | |
1529 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1530 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); | |
1531 | |
1532 // 64bit test: all of 0x100000001 | |
1533 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1534 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1535 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1536 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS); | |
1537 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE); | |
1538 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS); | |
1539 #endif | |
1540 } | |
1541 | |
1542 class AnyBitTestPolicy : public Policy { | |
1543 public: | |
1544 AnyBitTestPolicy() {} | |
1545 ~AnyBitTestPolicy() override {} | |
1546 | |
1547 ResultExpr EvaluateSyscall(int sysno) const override; | |
1548 | |
1549 private: | |
1550 static ResultExpr HasAnyBits32(uint32_t); | |
1551 static ResultExpr HasAnyBits64(uint64_t); | |
1552 | |
1553 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy); | |
1554 }; | |
1555 | |
1556 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) { | |
1557 if (bits == 0) { | |
1558 return Error(0); | |
1559 } | |
1560 const Arg<uint32_t> arg(1); | |
1561 return If((arg & bits) != 0, Error(1)).Else(Error(0)); | |
1562 } | |
1563 | |
1564 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) { | |
1565 if (bits == 0) { | |
1566 return Error(0); | |
1567 } | |
1568 const Arg<uint64_t> arg(1); | |
1569 return If((arg & bits) != 0, Error(1)).Else(Error(0)); | |
1570 } | |
1571 | |
1572 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const { | |
1573 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1574 // Test masked-equality cases that should trigger the "has any bits" | |
1575 // peephole optimizations. We try to find bitmasks that could conceivably | |
1576 // touch corner cases. | |
1577 // For all of these tests, we override the uname(). We can make use with | |
1578 // a single system call number, as we use the first system call argument to | |
1579 // select the different bit masks that we want to test against. | |
1580 if (sysno == __NR_uname) { | |
1581 const Arg<int> option(0); | |
1582 return Switch(option) | |
1583 .Case(0, HasAnyBits32(0x0)) | |
1584 .Case(1, HasAnyBits32(0x1)) | |
1585 .Case(2, HasAnyBits32(0x3)) | |
1586 .Case(3, HasAnyBits32(0x80000000)) | |
1587 #if __SIZEOF_POINTER__ > 4 | |
1588 .Case(4, HasAnyBits64(0x0)) | |
1589 .Case(5, HasAnyBits64(0x1)) | |
1590 .Case(6, HasAnyBits64(0x3)) | |
1591 .Case(7, HasAnyBits64(0x80000000)) | |
1592 .Case(8, HasAnyBits64(0x100000000ULL)) | |
1593 .Case(9, HasAnyBits64(0x300000000ULL)) | |
1594 .Case(10, HasAnyBits64(0x100000001ULL)) | |
1595 #endif | |
1596 .Default(Kill("Invalid test case number")); | |
1597 } | |
1598 return Allow(); | |
1599 } | |
1600 | |
1601 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) { | |
1602 // 32bit test: any of 0x0 (should always be false) | |
1603 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1604 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1605 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1606 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1607 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE); | |
1608 | |
1609 // 32bit test: any of 0x1 | |
1610 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE); | |
1611 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS); | |
1612 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE); | |
1613 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS); | |
1614 | |
1615 // 32bit test: any of 0x3 | |
1616 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE); | |
1617 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1618 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1619 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1620 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS); | |
1621 | |
1622 // 32bit test: any of 0x80000000 | |
1623 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE); | |
1624 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE); | |
1625 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1626 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1627 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS); | |
1628 | |
1629 #if __SIZEOF_POINTER__ > 4 | |
1630 // 64bit test: any of 0x0 (should always be false) | |
1631 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1632 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1633 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1634 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1635 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1636 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1637 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1638 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE); | |
1639 | |
1640 // 64bit test: any of 0x1 | |
1641 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1642 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1643 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1644 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1645 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1646 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1647 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE); | |
1648 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS); | |
1649 | |
1650 // 64bit test: any of 0x3 | |
1651 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE); | |
1652 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1653 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1654 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1655 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1656 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE); | |
1657 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1658 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1659 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1660 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS); | |
1661 | |
1662 // 64bit test: any of 0x80000000 | |
1663 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1664 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1665 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1666 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1667 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1668 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1669 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); | |
1670 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1671 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1672 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); | |
1673 | |
1674 // 64bit test: any of 0x100000000 | |
1675 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1676 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1677 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1678 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1679 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1680 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1681 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); | |
1682 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); | |
1683 | |
1684 // 64bit test: any of 0x300000000 | |
1685 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE); | |
1686 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1687 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1688 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1689 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1690 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE); | |
1691 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1692 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1693 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1694 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); | |
1695 | |
1696 // 64bit test: any of 0x100000001 | |
1697 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE); | |
1698 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1699 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS); | |
1700 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1701 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1702 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS); | |
1703 #endif | |
1704 } | |
1705 | |
1706 class MaskedEqualTestPolicy : public Policy { | |
1707 public: | |
1708 MaskedEqualTestPolicy() {} | |
1709 ~MaskedEqualTestPolicy() override {} | |
1710 | |
1711 ResultExpr EvaluateSyscall(int sysno) const override; | |
1712 | |
1713 private: | |
1714 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value); | |
1715 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value); | |
1716 | |
1717 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy); | |
1718 }; | |
1719 | |
1720 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) { | |
1721 const Arg<uint32_t> arg(1); | |
1722 return If((arg & mask) == value, Error(1)).Else(Error(0)); | |
1723 } | |
1724 | |
1725 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) { | |
1726 const Arg<uint64_t> arg(1); | |
1727 return If((arg & mask) == value, Error(1)).Else(Error(0)); | |
1728 } | |
1729 | |
1730 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const { | |
1731 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1732 | |
1733 if (sysno == __NR_uname) { | |
1734 const Arg<int> option(0); | |
1735 return Switch(option) | |
1736 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa)) | |
1737 #if __SIZEOF_POINTER__ > 4 | |
1738 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000)) | |
1739 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa)) | |
1740 #endif | |
1741 .Default(Kill("Invalid test case number")); | |
1742 } | |
1743 | |
1744 return Allow(); | |
1745 } | |
1746 | |
1747 #define MASKEQ_TEST(rulenum, arg, expected_result) \ | |
1748 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result)) | |
1749 | |
1750 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) { | |
1751 // Allowed: 0x__55__aa | |
1752 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE); | |
1753 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE); | |
1754 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE); | |
1755 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE); | |
1756 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE); | |
1757 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS); | |
1758 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE); | |
1759 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE); | |
1760 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS); | |
1761 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS); | |
1762 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS); | |
1763 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS); | |
1764 | |
1765 #if __SIZEOF_POINTER__ > 4 | |
1766 // Allowed: 0x__55__aa________ | |
1767 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE); | |
1768 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE); | |
1769 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE); | |
1770 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE); | |
1771 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE); | |
1772 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE); | |
1773 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE); | |
1774 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS); | |
1775 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE); | |
1776 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE); | |
1777 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS); | |
1778 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS); | |
1779 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS); | |
1780 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); | |
1781 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); | |
1782 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS); | |
1783 | |
1784 // Allowed: 0x__55__aa__55__aa | |
1785 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE); | |
1786 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE); | |
1787 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE); | |
1788 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE); | |
1789 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE); | |
1790 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE); | |
1791 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE); | |
1792 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE); | |
1793 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE); | |
1794 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS); | |
1795 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE); | |
1796 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE); | |
1797 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE); | |
1798 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE); | |
1799 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS); | |
1800 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS); | |
1801 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS); | |
1802 #endif | |
1803 } | |
1804 | |
1805 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) { | |
1806 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) { | |
1807 // We expect to get called for an attempt to fork(). No need to log that | |
1808 // call. But if we ever get called for anything else, we want to verbosely | |
1809 // print as much information as possible. | |
1810 const char* msg = (const char*)aux; | |
1811 printf( | |
1812 "Clone() was called with unexpected arguments\n" | |
1813 " nr: %d\n" | |
1814 " 1: 0x%llX\n" | |
1815 " 2: 0x%llX\n" | |
1816 " 3: 0x%llX\n" | |
1817 " 4: 0x%llX\n" | |
1818 " 5: 0x%llX\n" | |
1819 " 6: 0x%llX\n" | |
1820 "%s\n", | |
1821 args.nr, | |
1822 (long long)args.args[0], | |
1823 (long long)args.args[1], | |
1824 (long long)args.args[2], | |
1825 (long long)args.args[3], | |
1826 (long long)args.args[4], | |
1827 (long long)args.args[5], | |
1828 msg); | |
1829 } | |
1830 return -EPERM; | |
1831 } | |
1832 | |
1833 class PthreadPolicyEquality : public Policy { | |
1834 public: | |
1835 PthreadPolicyEquality() {} | |
1836 ~PthreadPolicyEquality() override {} | |
1837 | |
1838 ResultExpr EvaluateSyscall(int sysno) const override; | |
1839 | |
1840 private: | |
1841 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality); | |
1842 }; | |
1843 | |
1844 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const { | |
1845 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1846 // This policy allows creating threads with pthread_create(). But it | |
1847 // doesn't allow any other uses of clone(). Most notably, it does not | |
1848 // allow callers to implement fork() or vfork() by passing suitable flags | |
1849 // to the clone() system call. | |
1850 if (sysno == __NR_clone) { | |
1851 // We have seen two different valid combinations of flags. Glibc | |
1852 // uses the more modern flags, sets the TLS from the call to clone(), and | |
1853 // uses futexes to monitor threads. Android's C run-time library, doesn't | |
1854 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. | |
1855 // More recent versions of Android don't set CLONE_DETACHED anymore, so | |
1856 // the last case accounts for that. | |
1857 // The following policy is very strict. It only allows the exact masks | |
1858 // that we have seen in known implementations. It is probably somewhat | |
1859 // stricter than what we would want to do. | |
1860 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1861 CLONE_SIGHAND | CLONE_THREAD | | |
1862 CLONE_SYSVSEM | CLONE_SETTLS | | |
1863 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; | |
1864 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1865 CLONE_SIGHAND | CLONE_THREAD | | |
1866 CLONE_SYSVSEM; | |
1867 const Arg<unsigned long> flags(0); | |
1868 return If(flags == kGlibcCloneMask || | |
1869 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) || | |
1870 flags == kBaseAndroidCloneMask, | |
1871 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask")); | |
1872 } | |
1873 | |
1874 return Allow(); | |
1875 } | |
1876 | |
1877 class PthreadPolicyBitMask : public Policy { | |
1878 public: | |
1879 PthreadPolicyBitMask() {} | |
1880 ~PthreadPolicyBitMask() override {} | |
1881 | |
1882 ResultExpr EvaluateSyscall(int sysno) const override; | |
1883 | |
1884 private: | |
1885 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits); | |
1886 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits); | |
1887 | |
1888 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask); | |
1889 }; | |
1890 | |
1891 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg, | |
1892 unsigned long bits) { | |
1893 return (arg & bits) != 0; | |
1894 } | |
1895 | |
1896 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg, | |
1897 unsigned long bits) { | |
1898 return (arg & bits) == bits; | |
1899 } | |
1900 | |
1901 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const { | |
1902 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
1903 // This policy allows creating threads with pthread_create(). But it | |
1904 // doesn't allow any other uses of clone(). Most notably, it does not | |
1905 // allow callers to implement fork() or vfork() by passing suitable flags | |
1906 // to the clone() system call. | |
1907 if (sysno == __NR_clone) { | |
1908 // We have seen two different valid combinations of flags. Glibc | |
1909 // uses the more modern flags, sets the TLS from the call to clone(), and | |
1910 // uses futexes to monitor threads. Android's C run-time library, doesn't | |
1911 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. | |
1912 // The following policy allows for either combination of flags, but it | |
1913 // is generally a little more conservative than strictly necessary. We | |
1914 // err on the side of rather safe than sorry. | |
1915 // Very noticeably though, we disallow fork() (which is often just a | |
1916 // wrapper around clone()). | |
1917 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES | | |
1918 CLONE_SIGHAND | CLONE_THREAD | | |
1919 CLONE_SYSVSEM; | |
1920 const unsigned long kFutexFlags = | |
1921 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; | |
1922 const unsigned long kNoopFlags = CLONE_DETACHED; | |
1923 const unsigned long kKnownFlags = | |
1924 kMandatoryFlags | kFutexFlags | kNoopFlags; | |
1925 | |
1926 const Arg<unsigned long> flags(0); | |
1927 return If(HasAnyBits(flags, ~kKnownFlags), | |
1928 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found")) | |
1929 .ElseIf(!HasAllBits(flags, kMandatoryFlags), | |
1930 Trap(PthreadTrapHandler, | |
1931 "Missing mandatory CLONE_XXX flags " | |
1932 "when creating new thread")) | |
1933 .ElseIf( | |
1934 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags), | |
1935 Trap(PthreadTrapHandler, | |
1936 "Must set either all or none of the TLS and futex bits in " | |
1937 "call to clone()")) | |
1938 .Else(Allow()); | |
1939 } | |
1940 | |
1941 return Allow(); | |
1942 } | |
1943 | |
1944 static void* ThreadFnc(void* arg) { | |
1945 ++*reinterpret_cast<int*>(arg); | |
1946 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0); | |
1947 return NULL; | |
1948 } | |
1949 | |
1950 static void PthreadTest() { | |
1951 // Attempt to start a joinable thread. This should succeed. | |
1952 pthread_t thread; | |
1953 int thread_ran = 0; | |
1954 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran)); | |
1955 BPF_ASSERT(!pthread_join(thread, NULL)); | |
1956 BPF_ASSERT(thread_ran); | |
1957 | |
1958 // Attempt to start a detached thread. This should succeed. | |
1959 thread_ran = 0; | |
1960 pthread_attr_t attr; | |
1961 BPF_ASSERT(!pthread_attr_init(&attr)); | |
1962 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); | |
1963 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran)); | |
1964 BPF_ASSERT(!pthread_attr_destroy(&attr)); | |
1965 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) == | |
1966 -EINTR) { | |
1967 } | |
1968 BPF_ASSERT(thread_ran); | |
1969 | |
1970 // Attempt to fork() a process using clone(). This should fail. We use the | |
1971 // same flags that glibc uses when calling fork(). But we don't actually | |
1972 // try calling the fork() implementation in the C run-time library, as | |
1973 // run-time libraries other than glibc might call __NR_fork instead of | |
1974 // __NR_clone, and that would introduce a bogus test failure. | |
1975 int pid; | |
1976 BPF_ASSERT(Syscall::Call(__NR_clone, | |
1977 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD, | |
1978 0, | |
1979 0, | |
1980 &pid) == -EPERM); | |
1981 } | |
1982 | |
1983 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) { | |
1984 PthreadTest(); | |
1985 } | |
1986 | |
1987 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { | |
1988 PthreadTest(); | |
1989 } | |
1990 | |
1991 // libc might not define these even though the kernel supports it. | |
1992 #ifndef PTRACE_O_TRACESECCOMP | |
1993 #define PTRACE_O_TRACESECCOMP 0x00000080 | |
1994 #endif | |
1995 | |
1996 #ifdef PTRACE_EVENT_SECCOMP | |
1997 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) | |
1998 #else | |
1999 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they | |
2000 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by | |
2001 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If | |
2002 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both | |
2003 // values here. | |
2004 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8) | |
2005 #endif | |
2006 | |
2007 #if defined(__arm__) | |
2008 #ifndef PTRACE_SET_SYSCALL | |
2009 #define PTRACE_SET_SYSCALL 23 | |
2010 #endif | |
2011 #endif | |
2012 | |
2013 #if defined(__aarch64__) | |
2014 #ifndef PTRACE_GETREGS | |
2015 #define PTRACE_GETREGS 12 | |
2016 #endif | |
2017 #endif | |
2018 | |
2019 #if defined(__aarch64__) | |
2020 #ifndef PTRACE_SETREGS | |
2021 #define PTRACE_SETREGS 13 | |
2022 #endif | |
2023 #endif | |
2024 | |
2025 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with | |
2026 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on | |
2027 // PTRACE_EVENT_SECCOMP. | |
2028 // | |
2029 // regs should contain the current set of registers of the child, obtained using | |
2030 // PTRACE_GETREGS. | |
2031 // | |
2032 // Depending on the architecture, this may modify regs, so the caller is | |
2033 // responsible for committing these changes using PTRACE_SETREGS. | |
2034 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) { | |
2035 #if defined(__arm__) | |
2036 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the | |
2037 // libc ptrace call as the request parameter is an enum, and | |
2038 // PTRACE_SET_SYSCALL may not be in the enum. | |
2039 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number); | |
2040 #endif | |
2041 | |
2042 SECCOMP_PT_SYSCALL(*regs) = syscall_number; | |
2043 return 0; | |
2044 } | |
2045 | |
2046 const uint16_t kTraceData = 0xcc; | |
2047 | |
2048 class TraceAllPolicy : public Policy { | |
2049 public: | |
2050 TraceAllPolicy() {} | |
2051 ~TraceAllPolicy() override {} | |
2052 | |
2053 ResultExpr EvaluateSyscall(int system_call_number) const override { | |
2054 return Trace(kTraceData); | |
2055 } | |
2056 | |
2057 private: | |
2058 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy); | |
2059 }; | |
2060 | |
2061 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) { | |
2062 if (!SandboxBPF::SupportsSeccompSandbox( | |
2063 SandboxBPF::SeccompLevel::SINGLE_THREADED)) { | |
2064 return; | |
2065 } | |
2066 | |
2067 // This test is disabled on arm due to a kernel bug. | |
2068 // See https://code.google.com/p/chromium/issues/detail?id=383977 | |
2069 #if defined(__arm__) || defined(__aarch64__) | |
2070 printf("This test is currently disabled on ARM32/64 due to a kernel bug."); | |
2071 return; | |
2072 #endif | |
2073 | |
2074 #if defined(__mips__) | |
2075 // TODO: Figure out how to support specificity of handling indirect syscalls | |
2076 // in this test and enable it. | |
2077 printf("This test is currently disabled on MIPS."); | |
2078 return; | |
2079 #endif | |
2080 | |
2081 pid_t pid = fork(); | |
2082 BPF_ASSERT_NE(-1, pid); | |
2083 if (pid == 0) { | |
2084 pid_t my_pid = getpid(); | |
2085 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL)); | |
2086 BPF_ASSERT_EQ(0, raise(SIGSTOP)); | |
2087 SandboxBPF sandbox(new TraceAllPolicy); | |
2088 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
2089 | |
2090 // getpid is allowed. | |
2091 BPF_ASSERT_EQ(my_pid, sys_getpid()); | |
2092 | |
2093 // write to stdout is skipped and returns a fake value. | |
2094 BPF_ASSERT_EQ(kExpectedReturnValue, | |
2095 syscall(__NR_write, STDOUT_FILENO, "A", 1)); | |
2096 | |
2097 // kill is rewritten to exit(kExpectedReturnValue). | |
2098 syscall(__NR_kill, my_pid, SIGKILL); | |
2099 | |
2100 // Should not be reached. | |
2101 BPF_ASSERT(false); | |
2102 } | |
2103 | |
2104 int status; | |
2105 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1); | |
2106 BPF_ASSERT(WIFSTOPPED(status)); | |
2107 | |
2108 BPF_ASSERT_NE(-1, | |
2109 ptrace(PTRACE_SETOPTIONS, | |
2110 pid, | |
2111 NULL, | |
2112 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP))); | |
2113 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2114 while (true) { | |
2115 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1); | |
2116 if (WIFEXITED(status) || WIFSIGNALED(status)) { | |
2117 BPF_ASSERT(WIFEXITED(status)); | |
2118 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status)); | |
2119 break; | |
2120 } | |
2121 | |
2122 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP || | |
2123 !IS_SECCOMP_EVENT(status)) { | |
2124 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2125 continue; | |
2126 } | |
2127 | |
2128 unsigned long data; | |
2129 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data)); | |
2130 BPF_ASSERT_EQ(kTraceData, data); | |
2131 | |
2132 regs_struct regs; | |
2133 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, ®s)); | |
2134 switch (SECCOMP_PT_SYSCALL(regs)) { | |
2135 case __NR_write: | |
2136 // Skip writes to stdout, make it return kExpectedReturnValue. Allow | |
2137 // writes to stderr so that BPF_ASSERT messages show up. | |
2138 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) { | |
2139 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, -1)); | |
2140 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue; | |
2141 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); | |
2142 } | |
2143 break; | |
2144 | |
2145 case __NR_kill: | |
2146 // Rewrite to exit(kExpectedReturnValue). | |
2147 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, __NR_exit)); | |
2148 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue; | |
2149 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); | |
2150 break; | |
2151 | |
2152 default: | |
2153 // Allow all other syscalls. | |
2154 break; | |
2155 } | |
2156 | |
2157 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); | |
2158 } | |
2159 } | |
2160 | |
2161 // Android does not expose pread64 nor pwrite64. | |
2162 #if !defined(OS_ANDROID) | |
2163 | |
2164 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) { | |
2165 while (count > 0) { | |
2166 const ssize_t transfered = | |
2167 HANDLE_EINTR(pwrite64(fd, buffer, count, offset)); | |
2168 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { | |
2169 return false; | |
2170 } | |
2171 count -= transfered; | |
2172 buffer += transfered; | |
2173 offset += transfered; | |
2174 } | |
2175 return true; | |
2176 } | |
2177 | |
2178 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) { | |
2179 while (count > 0) { | |
2180 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset)); | |
2181 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { | |
2182 return false; | |
2183 } | |
2184 count -= transfered; | |
2185 buffer += transfered; | |
2186 offset += transfered; | |
2187 } | |
2188 return true; | |
2189 } | |
2190 | |
2191 bool pread_64_was_forwarded = false; | |
2192 | |
2193 class TrapPread64Policy : public Policy { | |
2194 public: | |
2195 TrapPread64Policy() {} | |
2196 ~TrapPread64Policy() override {} | |
2197 | |
2198 ResultExpr EvaluateSyscall(int system_call_number) const override { | |
2199 // Set the global environment for unsafe traps once. | |
2200 if (system_call_number == MIN_SYSCALL) { | |
2201 EnableUnsafeTraps(); | |
2202 } | |
2203 | |
2204 if (system_call_number == __NR_pread64) { | |
2205 return UnsafeTrap(ForwardPreadHandler, NULL); | |
2206 } | |
2207 return Allow(); | |
2208 } | |
2209 | |
2210 private: | |
2211 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args, | |
2212 void* aux) { | |
2213 BPF_ASSERT(args.nr == __NR_pread64); | |
2214 pread_64_was_forwarded = true; | |
2215 | |
2216 return SandboxBPF::ForwardSyscall(args); | |
2217 } | |
2218 | |
2219 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy); | |
2220 }; | |
2221 | |
2222 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split | |
2223 // between two arguments. In this test, we make sure that ForwardSyscall() can | |
2224 // forward it properly. | |
2225 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) { | |
2226 ScopedTemporaryFile temp_file; | |
2227 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF; | |
2228 const char kTestString[] = "This is a test!"; | |
2229 BPF_ASSERT(FullPwrite64( | |
2230 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset)); | |
2231 | |
2232 char read_test_string[sizeof(kTestString)] = {0}; | |
2233 BPF_ASSERT(FullPread64(temp_file.fd(), | |
2234 read_test_string, | |
2235 sizeof(read_test_string), | |
2236 kLargeOffset)); | |
2237 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString))); | |
2238 BPF_ASSERT(pread_64_was_forwarded); | |
2239 } | |
2240 | |
2241 #endif // !defined(OS_ANDROID) | |
2242 | |
2243 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) { | |
2244 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr); | |
2245 | |
2246 // Wait for the main thread to signal that the filter has been applied. | |
2247 if (!event->IsSignaled()) { | |
2248 event->Wait(); | |
2249 } | |
2250 | |
2251 BPF_ASSERT(event->IsSignaled()); | |
2252 | |
2253 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
2254 | |
2255 return NULL; | |
2256 } | |
2257 | |
2258 SANDBOX_TEST(SandboxBPF, Tsync) { | |
2259 const bool supports_multi_threaded = SandboxBPF::SupportsSeccompSandbox( | |
2260 SandboxBPF::SeccompLevel::MULTI_THREADED); | |
2261 // On Chrome OS tsync is mandatory. | |
2262 #if defined(OS_CHROMEOS) | |
2263 if (base::SysInfo::IsRunningOnChromeOS()) { | |
2264 BPF_ASSERT_EQ(true, supports_multi_threaded); | |
2265 } | |
2266 // else a Chrome OS build not running on a Chrome OS device e.g. Chrome bots. | |
2267 // In this case fall through. | |
2268 #endif | |
2269 if (!supports_multi_threaded) { | |
2270 return; | |
2271 } | |
2272 | |
2273 base::WaitableEvent event(true, false); | |
2274 | |
2275 // Create a thread on which to invoke the blocked syscall. | |
2276 pthread_t thread; | |
2277 BPF_ASSERT_EQ( | |
2278 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event)); | |
2279 | |
2280 // Test that nanoseelp success. | |
2281 const struct timespec ts = {0, 0}; | |
2282 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); | |
2283 | |
2284 // Engage the sandbox. | |
2285 SandboxBPF sandbox(new BlacklistNanosleepPolicy()); | |
2286 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED)); | |
2287 | |
2288 // This thread should have the filter applied as well. | |
2289 BlacklistNanosleepPolicy::AssertNanosleepFails(); | |
2290 | |
2291 // Signal the condition to invoke the system call. | |
2292 event.Signal(); | |
2293 | |
2294 // Wait for the thread to finish. | |
2295 BPF_ASSERT_EQ(0, pthread_join(thread, NULL)); | |
2296 } | |
2297 | |
2298 class AllowAllPolicy : public Policy { | |
2299 public: | |
2300 AllowAllPolicy() {} | |
2301 ~AllowAllPolicy() override {} | |
2302 | |
2303 ResultExpr EvaluateSyscall(int sysno) const override { return Allow(); } | |
2304 | |
2305 private: | |
2306 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy); | |
2307 }; | |
2308 | |
2309 SANDBOX_DEATH_TEST( | |
2310 SandboxBPF, | |
2311 StartMultiThreadedAsSingleThreaded, | |
2312 DEATH_MESSAGE( | |
2313 ThreadHelpers::GetAssertSingleThreadedErrorMessageForTests())) { | |
2314 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded"); | |
2315 BPF_ASSERT(thread.Start()); | |
2316 | |
2317 SandboxBPF sandbox(new AllowAllPolicy()); | |
2318 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::SINGLE_THREADED)); | |
2319 } | |
2320 | |
2321 // http://crbug.com/407357 | |
2322 #if !defined(THREAD_SANITIZER) | |
2323 SANDBOX_DEATH_TEST( | |
2324 SandboxBPF, | |
2325 StartSingleThreadedAsMultiThreaded, | |
2326 DEATH_MESSAGE( | |
2327 "Cannot start sandbox; process may be single-threaded when " | |
2328 "reported as not")) { | |
2329 SandboxBPF sandbox(new AllowAllPolicy()); | |
2330 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::SeccompLevel::MULTI_THREADED)); | |
2331 } | |
2332 #endif // !defined(THREAD_SANITIZER) | |
2333 | |
2334 // A stub handler for the UnsafeTrap. Never called. | |
2335 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) { | |
2336 return -1; | |
2337 } | |
2338 | |
2339 class UnsafeTrapWithCondPolicy : public Policy { | |
2340 public: | |
2341 UnsafeTrapWithCondPolicy() {} | |
2342 ~UnsafeTrapWithCondPolicy() override {} | |
2343 | |
2344 ResultExpr EvaluateSyscall(int sysno) const override { | |
2345 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); | |
2346 setenv(kSandboxDebuggingEnv, "t", 0); | |
2347 Die::SuppressInfoMessages(true); | |
2348 | |
2349 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) | |
2350 return Allow(); | |
2351 | |
2352 switch (sysno) { | |
2353 case __NR_uname: { | |
2354 const Arg<uint32_t> arg(0); | |
2355 return If(arg == 0, Allow()).Else(Error(EPERM)); | |
2356 } | |
2357 case __NR_setgid: { | |
2358 const Arg<uint32_t> arg(0); | |
2359 return Switch(arg) | |
2360 .Case(100, Error(ENOMEM)) | |
2361 .Case(200, Error(ENOSYS)) | |
2362 .Default(Error(EPERM)); | |
2363 } | |
2364 case __NR_close: | |
2365 case __NR_exit_group: | |
2366 case __NR_write: | |
2367 return Allow(); | |
2368 case __NR_getppid: | |
2369 return UnsafeTrap(NoOpHandler, NULL); | |
2370 default: | |
2371 return Error(EPERM); | |
2372 } | |
2373 } | |
2374 | |
2375 private: | |
2376 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy); | |
2377 }; | |
2378 | |
2379 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) { | |
2380 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0)); | |
2381 BPF_ASSERT_EQ(EFAULT, errno); | |
2382 | |
2383 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1)); | |
2384 BPF_ASSERT_EQ(EPERM, errno); | |
2385 | |
2386 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100)); | |
2387 BPF_ASSERT_EQ(ENOMEM, errno); | |
2388 | |
2389 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200)); | |
2390 BPF_ASSERT_EQ(ENOSYS, errno); | |
2391 | |
2392 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300)); | |
2393 BPF_ASSERT_EQ(EPERM, errno); | |
2394 } | |
2395 | |
2396 } // namespace | |
2397 | |
2398 } // namespace bpf_dsl | |
2399 } // namespace sandbox | |
OLD | NEW |