Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(284)

Side by Side Diff: sandbox/linux/seccomp-bpf/sandbox_bpf_unittest.cc

Issue 559653004: Convert sandbox_bpf_unittest.cc to use bpf_dsl (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add unit test for != Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « sandbox/linux/sandbox_linux_test_sources.gypi ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <errno.h>
6 #include <pthread.h>
7 #include <sched.h>
8 #include <signal.h>
9 #include <sys/prctl.h>
10 #include <sys/ptrace.h>
11 #include <sys/syscall.h>
12 #include <sys/time.h>
13 #include <sys/types.h>
14 #include <sys/utsname.h>
15 #include <unistd.h>
16 #include <sys/socket.h>
17
18 #if defined(ANDROID)
19 // Work-around for buggy headers in Android's NDK
20 #define __user
21 #endif
22 #include <linux/futex.h>
23
24 #include <ostream>
25
26 #include "base/bind.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "base/memory/scoped_ptr.h"
30 #include "base/posix/eintr_wrapper.h"
31 #include "base/synchronization/waitable_event.h"
32 #include "base/threading/thread.h"
33 #include "build/build_config.h"
34 #include "sandbox/linux/seccomp-bpf/bpf_tests.h"
35 #include "sandbox/linux/seccomp-bpf/syscall.h"
36 #include "sandbox/linux/seccomp-bpf/trap.h"
37 #include "sandbox/linux/seccomp-bpf/verifier.h"
38 #include "sandbox/linux/services/broker_process.h"
39 #include "sandbox/linux/services/linux_syscalls.h"
40 #include "sandbox/linux/tests/scoped_temporary_file.h"
41 #include "sandbox/linux/tests/unit_tests.h"
42 #include "testing/gtest/include/gtest/gtest.h"
43
44 // Workaround for Android's prctl.h file.
45 #ifndef PR_GET_ENDIAN
46 #define PR_GET_ENDIAN 19
47 #endif
48 #ifndef PR_CAPBSET_READ
49 #define PR_CAPBSET_READ 23
50 #define PR_CAPBSET_DROP 24
51 #endif
52
53 namespace sandbox {
54
55 namespace {
56
57 const int kExpectedReturnValue = 42;
58 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING";
59
60 // Set the global environment to allow the use of UnsafeTrap() policies.
61 void EnableUnsafeTraps() {
62 // The use of UnsafeTrap() causes us to print a warning message. This is
63 // generally desirable, but it results in the unittest failing, as it doesn't
64 // expect any messages on "stderr". So, temporarily disable messages. The
65 // BPF_TEST() is guaranteed to turn messages back on, after the policy
66 // function has completed.
67 setenv(kSandboxDebuggingEnv, "t", 0);
68 Die::SuppressInfoMessages(true);
69 }
70
71 // This test should execute no matter whether we have kernel support. So,
72 // we make it a TEST() instead of a BPF_TEST().
73 TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) {
74 // We check that we don't crash, but it's ok if the kernel doesn't
75 // support it.
76 bool seccomp_bpf_supported =
77 SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE;
78 // We want to log whether or not seccomp BPF is actually supported
79 // since actual test coverage depends on it.
80 RecordProperty("SeccompBPFSupported",
81 seccomp_bpf_supported ? "true." : "false.");
82 std::cout << "Seccomp BPF supported: "
83 << (seccomp_bpf_supported ? "true." : "false.") << "\n";
84 RecordProperty("PointerSize", sizeof(void*));
85 std::cout << "Pointer size: " << sizeof(void*) << "\n";
86 }
87
88 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) {
89 SandboxBPF::SupportsSeccompSandbox(-1);
90 SandboxBPF::SupportsSeccompSandbox(-1);
91 }
92
93 // BPF_TEST does a lot of the boiler-plate code around setting up a
94 // policy and optional passing data between the caller, the policy and
95 // any Trap() handlers. This is great for writing short and concise tests,
96 // and it helps us accidentally forgetting any of the crucial steps in
97 // setting up the sandbox. But it wouldn't hurt to have at least one test
98 // that explicitly walks through all these steps.
99
100 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) {
101 BPF_ASSERT(aux);
102 int* counter = static_cast<int*>(aux);
103 return (*counter)++;
104 }
105
106 class VerboseAPITestingPolicy : public SandboxBPFPolicy {
107 public:
108 VerboseAPITestingPolicy(int* counter_ptr) : counter_ptr_(counter_ptr) {}
109
110 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
111 int sysno) const OVERRIDE {
112 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
113 if (sysno == __NR_uname) {
114 return sandbox->Trap(IncreaseCounter, counter_ptr_);
115 }
116 return ErrorCode(ErrorCode::ERR_ALLOWED);
117 }
118
119 private:
120 int* counter_ptr_;
121 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy);
122 };
123
124 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) {
125 if (SandboxBPF::SupportsSeccompSandbox(-1) ==
126 sandbox::SandboxBPF::STATUS_AVAILABLE) {
127 static int counter = 0;
128
129 SandboxBPF sandbox;
130 sandbox.SetSandboxPolicy(new VerboseAPITestingPolicy(&counter));
131 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
132
133 BPF_ASSERT_EQ(0, counter);
134 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0));
135 BPF_ASSERT_EQ(1, counter);
136 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0));
137 BPF_ASSERT_EQ(2, counter);
138 }
139 }
140
141 // A simple blacklist test
142
143 class BlacklistNanosleepPolicy : public SandboxBPFPolicy {
144 public:
145 BlacklistNanosleepPolicy() {}
146 virtual ErrorCode EvaluateSyscall(SandboxBPF*, int sysno) const OVERRIDE {
147 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
148 switch (sysno) {
149 case __NR_nanosleep:
150 return ErrorCode(EACCES);
151 default:
152 return ErrorCode(ErrorCode::ERR_ALLOWED);
153 }
154 }
155
156 static void AssertNanosleepFails() {
157 const struct timespec ts = {0, 0};
158 errno = 0;
159 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
160 BPF_ASSERT_EQ(EACCES, errno);
161 }
162
163 private:
164 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy);
165 };
166
167 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) {
168 BlacklistNanosleepPolicy::AssertNanosleepFails();
169 }
170
171 // Now do a simple whitelist test
172
173 class WhitelistGetpidPolicy : public SandboxBPFPolicy {
174 public:
175 WhitelistGetpidPolicy() {}
176 virtual ErrorCode EvaluateSyscall(SandboxBPF*, int sysno) const OVERRIDE {
177 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
178 switch (sysno) {
179 case __NR_getpid:
180 case __NR_exit_group:
181 return ErrorCode(ErrorCode::ERR_ALLOWED);
182 default:
183 return ErrorCode(ENOMEM);
184 }
185 }
186
187 private:
188 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy);
189 };
190
191 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) {
192 // getpid() should be allowed
193 errno = 0;
194 BPF_ASSERT(syscall(__NR_getpid) > 0);
195 BPF_ASSERT(errno == 0);
196
197 // getpgid() should be denied
198 BPF_ASSERT(getpgid(0) == -1);
199 BPF_ASSERT(errno == ENOMEM);
200 }
201
202 // A simple blacklist policy, with a SIGSYS handler
203 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) {
204 // We also check that the auxiliary data is correct
205 SANDBOX_ASSERT(aux);
206 *(static_cast<int*>(aux)) = kExpectedReturnValue;
207 return -ENOMEM;
208 }
209
210 ErrorCode BlacklistNanosleepPolicySigsys(SandboxBPF* sandbox,
211 int sysno,
212 int* aux) {
213 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
214 switch (sysno) {
215 case __NR_nanosleep:
216 return sandbox->Trap(EnomemHandler, aux);
217 default:
218 return ErrorCode(ErrorCode::ERR_ALLOWED);
219 }
220 }
221
222 BPF_TEST(SandboxBPF,
223 BasicBlacklistWithSigsys,
224 BlacklistNanosleepPolicySigsys,
225 int /* (*BPF_AUX) */) {
226 // getpid() should work properly
227 errno = 0;
228 BPF_ASSERT(syscall(__NR_getpid) > 0);
229 BPF_ASSERT(errno == 0);
230
231 // Our Auxiliary Data, should be reset by the signal handler
232 *BPF_AUX = -1;
233 const struct timespec ts = {0, 0};
234 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1);
235 BPF_ASSERT(errno == ENOMEM);
236
237 // We expect the signal handler to modify AuxData
238 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue);
239 }
240
241 // A simple test that verifies we can return arbitrary errno values.
242
243 class ErrnoTestPolicy : public SandboxBPFPolicy {
244 public:
245 ErrnoTestPolicy() {}
246 virtual ErrorCode EvaluateSyscall(SandboxBPF*, int sysno) const OVERRIDE;
247
248 private:
249 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy);
250 };
251
252 ErrorCode ErrnoTestPolicy::EvaluateSyscall(SandboxBPF*, int sysno) const {
253 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
254 switch (sysno) {
255 case __NR_dup3: // dup2 is a wrapper of dup3 in android
256 #if defined(__NR_dup2)
257 case __NR_dup2:
258 #endif
259 // Pretend that dup2() worked, but don't actually do anything.
260 return ErrorCode(0);
261 case __NR_setuid:
262 #if defined(__NR_setuid32)
263 case __NR_setuid32:
264 #endif
265 // Return errno = 1.
266 return ErrorCode(1);
267 case __NR_setgid:
268 #if defined(__NR_setgid32)
269 case __NR_setgid32:
270 #endif
271 // Return maximum errno value (typically 4095).
272 return ErrorCode(ErrorCode::ERR_MAX_ERRNO);
273 case __NR_uname:
274 // Return errno = 42;
275 return ErrorCode(42);
276 default:
277 return ErrorCode(ErrorCode::ERR_ALLOWED);
278 }
279 }
280
281 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) {
282 // Verify that dup2() returns success, but doesn't actually run.
283 int fds[4];
284 BPF_ASSERT(pipe(fds) == 0);
285 BPF_ASSERT(pipe(fds + 2) == 0);
286 BPF_ASSERT(dup2(fds[2], fds[0]) == 0);
287 char buf[1] = {};
288 BPF_ASSERT(write(fds[1], "\x55", 1) == 1);
289 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1);
290 BPF_ASSERT(read(fds[0], buf, 1) == 1);
291
292 // If dup2() executed, we will read \xAA, but it dup2() has been turned
293 // into a no-op by our policy, then we will read \x55.
294 BPF_ASSERT(buf[0] == '\x55');
295
296 // Verify that we can return the minimum and maximum errno values.
297 errno = 0;
298 BPF_ASSERT(setuid(0) == -1);
299 BPF_ASSERT(errno == 1);
300
301 // On Android, errno is only supported up to 255, otherwise errno
302 // processing is skipped.
303 // We work around this (crbug.com/181647).
304 if (sandbox::IsAndroid() && setgid(0) != -1) {
305 errno = 0;
306 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO);
307 BPF_ASSERT(errno == 0);
308 } else {
309 errno = 0;
310 BPF_ASSERT(setgid(0) == -1);
311 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO);
312 }
313
314 // Finally, test an errno in between the minimum and maximum.
315 errno = 0;
316 struct utsname uts_buf;
317 BPF_ASSERT(uname(&uts_buf) == -1);
318 BPF_ASSERT(errno == 42);
319 }
320
321 // Testing the stacking of two sandboxes
322
323 class StackingPolicyPartOne : public SandboxBPFPolicy {
324 public:
325 StackingPolicyPartOne() {}
326 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
327 int sysno) const OVERRIDE {
328 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
329 switch (sysno) {
330 case __NR_getppid:
331 return sandbox->Cond(0,
332 ErrorCode::TP_32BIT,
333 ErrorCode::OP_EQUAL,
334 0,
335 ErrorCode(ErrorCode::ERR_ALLOWED),
336 ErrorCode(EPERM));
337 default:
338 return ErrorCode(ErrorCode::ERR_ALLOWED);
339 }
340 }
341
342 private:
343 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne);
344 };
345
346 class StackingPolicyPartTwo : public SandboxBPFPolicy {
347 public:
348 StackingPolicyPartTwo() {}
349 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
350 int sysno) const OVERRIDE {
351 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
352 switch (sysno) {
353 case __NR_getppid:
354 return sandbox->Cond(0,
355 ErrorCode::TP_32BIT,
356 ErrorCode::OP_EQUAL,
357 0,
358 ErrorCode(EINVAL),
359 ErrorCode(ErrorCode::ERR_ALLOWED));
360 default:
361 return ErrorCode(ErrorCode::ERR_ALLOWED);
362 }
363 }
364
365 private:
366 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo);
367 };
368
369 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) {
370 errno = 0;
371 BPF_ASSERT(syscall(__NR_getppid, 0) > 0);
372 BPF_ASSERT(errno == 0);
373
374 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
375 BPF_ASSERT(errno == EPERM);
376
377 // Stack a second sandbox with its own policy. Verify that we can further
378 // restrict filters, but we cannot relax existing filters.
379 SandboxBPF sandbox;
380 sandbox.SetSandboxPolicy(new StackingPolicyPartTwo());
381 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
382
383 errno = 0;
384 BPF_ASSERT(syscall(__NR_getppid, 0) == -1);
385 BPF_ASSERT(errno == EINVAL);
386
387 BPF_ASSERT(syscall(__NR_getppid, 1) == -1);
388 BPF_ASSERT(errno == EPERM);
389 }
390
391 // A more complex, but synthetic policy. This tests the correctness of the BPF
392 // program by iterating through all syscalls and checking for an errno that
393 // depends on the syscall number. Unlike the Verifier, this exercises the BPF
394 // interpreter in the kernel.
395
396 // We try to make sure we exercise optimizations in the BPF compiler. We make
397 // sure that the compiler can have an opportunity to coalesce syscalls with
398 // contiguous numbers and we also make sure that disjoint sets can return the
399 // same errno.
400 int SysnoToRandomErrno(int sysno) {
401 // Small contiguous sets of 3 system calls return an errno equal to the
402 // index of that set + 1 (so that we never return a NUL errno).
403 return ((sysno & ~3) >> 2) % 29 + 1;
404 }
405
406 class SyntheticPolicy : public SandboxBPFPolicy {
407 public:
408 SyntheticPolicy() {}
409 virtual ErrorCode EvaluateSyscall(SandboxBPF*, int sysno) const OVERRIDE {
410 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
411 if (sysno == __NR_exit_group || sysno == __NR_write) {
412 // exit_group() is special, we really need it to work.
413 // write() is needed for BPF_ASSERT() to report a useful error message.
414 return ErrorCode(ErrorCode::ERR_ALLOWED);
415 }
416 return ErrorCode(SysnoToRandomErrno(sysno));
417 }
418
419 private:
420 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy);
421 };
422
423 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) {
424 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int
425 // overflow.
426 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >=
427 static_cast<int>(MAX_PUBLIC_SYSCALL));
428
429 for (int syscall_number = static_cast<int>(MIN_SYSCALL);
430 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL);
431 ++syscall_number) {
432 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) {
433 // exit_group() is special
434 continue;
435 }
436 errno = 0;
437 BPF_ASSERT(syscall(syscall_number) == -1);
438 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number));
439 }
440 }
441
442 #if defined(__arm__)
443 // A simple policy that tests whether ARM private system calls are supported
444 // by our BPF compiler and by the BPF interpreter in the kernel.
445
446 // For ARM private system calls, return an errno equal to their offset from
447 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno).
448 int ArmPrivateSysnoToErrno(int sysno) {
449 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) &&
450 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
451 return (sysno - MIN_PRIVATE_SYSCALL) + 1;
452 } else {
453 return ENOSYS;
454 }
455 }
456
457 class ArmPrivatePolicy : public SandboxBPFPolicy {
458 public:
459 ArmPrivatePolicy() {}
460 virtual ErrorCode EvaluateSyscall(SandboxBPF*, int sysno) const OVERRIDE {
461 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
462 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual
463 // ARM private system calls.
464 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) &&
465 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) {
466 return ErrorCode(ArmPrivateSysnoToErrno(sysno));
467 }
468 return ErrorCode(ErrorCode::ERR_ALLOWED);
469 }
470
471 private:
472 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy);
473 };
474
475 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) {
476 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1);
477 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL);
478 ++syscall_number) {
479 errno = 0;
480 BPF_ASSERT(syscall(syscall_number) == -1);
481 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number));
482 }
483 }
484 #endif // defined(__arm__)
485
486 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) {
487 // Count all invocations of our callback function.
488 ++*reinterpret_cast<int*>(aux);
489
490 // Verify that within the callback function all filtering is temporarily
491 // disabled.
492 BPF_ASSERT(syscall(__NR_getpid) > 1);
493
494 // Verify that we can now call the underlying system call without causing
495 // infinite recursion.
496 return SandboxBPF::ForwardSyscall(args);
497 }
498
499 ErrorCode GreyListedPolicy(SandboxBPF* sandbox, int sysno, int* aux) {
500 // Set the global environment for unsafe traps once.
501 if (sysno == MIN_SYSCALL) {
502 EnableUnsafeTraps();
503 }
504
505 // Some system calls must always be allowed, if our policy wants to make
506 // use of UnsafeTrap()
507 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) {
508 return ErrorCode(ErrorCode::ERR_ALLOWED);
509 } else if (sysno == __NR_getpid) {
510 // Disallow getpid()
511 return ErrorCode(EPERM);
512 } else if (SandboxBPF::IsValidSyscallNumber(sysno)) {
513 // Allow (and count) all other system calls.
514 return sandbox->UnsafeTrap(CountSyscalls, aux);
515 } else {
516 return ErrorCode(ENOSYS);
517 }
518 }
519
520 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) {
521 BPF_ASSERT(syscall(__NR_getpid) == -1);
522 BPF_ASSERT(errno == EPERM);
523 BPF_ASSERT(*BPF_AUX == 0);
524 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid));
525 BPF_ASSERT(*BPF_AUX == 2);
526 char name[17] = {};
527 BPF_ASSERT(!syscall(__NR_prctl,
528 PR_GET_NAME,
529 name,
530 (void*)NULL,
531 (void*)NULL,
532 (void*)NULL));
533 BPF_ASSERT(*BPF_AUX == 3);
534 BPF_ASSERT(*name);
535 }
536
537 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) {
538 // Disabling warning messages that could confuse our test framework.
539 setenv(kSandboxDebuggingEnv, "t", 0);
540 Die::SuppressInfoMessages(true);
541
542 unsetenv(kSandboxDebuggingEnv);
543 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
544 setenv(kSandboxDebuggingEnv, "", 1);
545 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false);
546 setenv(kSandboxDebuggingEnv, "t", 1);
547 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true);
548 }
549
550 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) {
551 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) {
552 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always
553 // return an error. But our handler allows this call.
554 return 0;
555 } else {
556 return SandboxBPF::ForwardSyscall(args);
557 }
558 }
559
560 class PrctlPolicy : public SandboxBPFPolicy {
561 public:
562 PrctlPolicy() {}
563 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
564 int sysno) const OVERRIDE {
565 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
566 setenv(kSandboxDebuggingEnv, "t", 0);
567 Die::SuppressInfoMessages(true);
568
569 if (sysno == __NR_prctl) {
570 // Handle prctl() inside an UnsafeTrap()
571 return sandbox->UnsafeTrap(PrctlHandler, NULL);
572 }
573
574 // Allow all other system calls.
575 return ErrorCode(ErrorCode::ERR_ALLOWED);
576 }
577
578 private:
579 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy);
580 };
581
582 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) {
583 // This call should never be allowed. But our policy will intercept it and
584 // let it pass successfully.
585 BPF_ASSERT(
586 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL));
587
588 // Verify that the call will fail, if it makes it all the way to the kernel.
589 BPF_ASSERT(
590 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1);
591
592 // And verify that other uses of prctl() work just fine.
593 char name[17] = {};
594 BPF_ASSERT(!syscall(__NR_prctl,
595 PR_GET_NAME,
596 name,
597 (void*)NULL,
598 (void*)NULL,
599 (void*)NULL));
600 BPF_ASSERT(*name);
601
602 // Finally, verify that system calls other than prctl() are completely
603 // unaffected by our policy.
604 struct utsname uts = {};
605 BPF_ASSERT(!uname(&uts));
606 BPF_ASSERT(!strcmp(uts.sysname, "Linux"));
607 }
608
609 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) {
610 return SandboxBPF::ForwardSyscall(args);
611 }
612
613 class RedirectAllSyscallsPolicy : public SandboxBPFPolicy {
614 public:
615 RedirectAllSyscallsPolicy() {}
616 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
617 int sysno) const OVERRIDE;
618
619 private:
620 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy);
621 };
622
623 ErrorCode RedirectAllSyscallsPolicy::EvaluateSyscall(SandboxBPF* sandbox,
624 int sysno) const {
625 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
626 setenv(kSandboxDebuggingEnv, "t", 0);
627 Die::SuppressInfoMessages(true);
628
629 // Some system calls must always be allowed, if our policy wants to make
630 // use of UnsafeTrap()
631 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
632 return ErrorCode(ErrorCode::ERR_ALLOWED);
633 return sandbox->UnsafeTrap(AllowRedirectedSyscall, NULL);
634 }
635
636 int bus_handler_fd_ = -1;
637
638 void SigBusHandler(int, siginfo_t* info, void* void_context) {
639 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1);
640 }
641
642 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) {
643 // We use the SIGBUS bit in the signal mask as a thread-local boolean
644 // value in the implementation of UnsafeTrap(). This is obviously a bit
645 // of a hack that could conceivably interfere with code that uses SIGBUS
646 // in more traditional ways. This test verifies that basic functionality
647 // of SIGBUS is not impacted, but it is certainly possibly to construe
648 // more complex uses of signals where our use of the SIGBUS mask is not
649 // 100% transparent. This is expected behavior.
650 int fds[2];
651 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0);
652 bus_handler_fd_ = fds[1];
653 struct sigaction sa = {};
654 sa.sa_sigaction = SigBusHandler;
655 sa.sa_flags = SA_SIGINFO;
656 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0);
657 raise(SIGBUS);
658 char c = '\000';
659 BPF_ASSERT(read(fds[0], &c, 1) == 1);
660 BPF_ASSERT(close(fds[0]) == 0);
661 BPF_ASSERT(close(fds[1]) == 0);
662 BPF_ASSERT(c == 0x55);
663 }
664
665 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) {
666 // Signal masks are potentially tricky to handle. For instance, if we
667 // ever tried to update them from inside a Trap() or UnsafeTrap() handler,
668 // the call to sigreturn() at the end of the signal handler would undo
669 // all of our efforts. So, it makes sense to test that sigprocmask()
670 // works, even if we have a policy in place that makes use of UnsafeTrap().
671 // In practice, this works because we force sigprocmask() to be handled
672 // entirely in the kernel.
673 sigset_t mask0, mask1, mask2;
674
675 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't
676 // change the mask (it shouldn't have been, as it isn't blocked by default
677 // in POSIX).
678 //
679 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose.
680 sigemptyset(&mask0);
681 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1));
682 BPF_ASSERT(!sigismember(&mask1, SIGUSR2));
683
684 // Try again, and this time we verify that we can block it. This
685 // requires a second call to sigprocmask().
686 sigaddset(&mask0, SIGUSR2);
687 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL));
688 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2));
689 BPF_ASSERT(sigismember(&mask2, SIGUSR2));
690 }
691
692 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) {
693 // An UnsafeTrap() (or for that matter, a Trap()) has to report error
694 // conditions by returning an exit code in the range -1..-4096. This
695 // should happen automatically if using ForwardSyscall(). If the TrapFnc()
696 // uses some other method to make system calls, then it is responsible
697 // for computing the correct return code.
698 // This test verifies that ForwardSyscall() does the correct thing.
699
700 // The glibc system wrapper will ultimately set errno for us. So, from normal
701 // userspace, all of this should be completely transparent.
702 errno = 0;
703 BPF_ASSERT(close(-1) == -1);
704 BPF_ASSERT(errno == EBADF);
705
706 // Explicitly avoid the glibc wrapper. This is not normally the way anybody
707 // would make system calls, but it allows us to verify that we don't
708 // accidentally mess with errno, when we shouldn't.
709 errno = 0;
710 struct arch_seccomp_data args = {};
711 args.nr = __NR_close;
712 args.args[0] = -1;
713 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF);
714 BPF_ASSERT(errno == 0);
715 }
716
717 bool NoOpCallback() { return true; }
718
719 // Test a trap handler that makes use of a broker process to open().
720
721 class InitializedOpenBroker {
722 public:
723 InitializedOpenBroker() : initialized_(false) {
724 std::vector<std::string> allowed_files;
725 allowed_files.push_back("/proc/allowed");
726 allowed_files.push_back("/proc/cpuinfo");
727
728 broker_process_.reset(
729 new BrokerProcess(EPERM, allowed_files, std::vector<std::string>()));
730 BPF_ASSERT(broker_process() != NULL);
731 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback)));
732
733 initialized_ = true;
734 }
735 bool initialized() { return initialized_; }
736 class BrokerProcess* broker_process() { return broker_process_.get(); }
737
738 private:
739 bool initialized_;
740 scoped_ptr<class BrokerProcess> broker_process_;
741 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker);
742 };
743
744 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args,
745 void* aux) {
746 BPF_ASSERT(aux);
747 BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux);
748 switch (args.nr) {
749 case __NR_faccessat: // access is a wrapper of faccessat in android
750 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
751 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]),
752 static_cast<int>(args.args[2]));
753 #if defined(__NR_access)
754 case __NR_access:
755 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]),
756 static_cast<int>(args.args[1]));
757 #endif
758 #if defined(__NR_open)
759 case __NR_open:
760 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]),
761 static_cast<int>(args.args[1]));
762 #endif
763 case __NR_openat:
764 // We only call open() so if we arrive here, it's because glibc uses
765 // the openat() system call.
766 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD);
767 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]),
768 static_cast<int>(args.args[2]));
769 default:
770 BPF_ASSERT(false);
771 return -ENOSYS;
772 }
773 }
774
775 ErrorCode DenyOpenPolicy(SandboxBPF* sandbox,
776 int sysno,
777 InitializedOpenBroker* iob) {
778 if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
779 return ErrorCode(ENOSYS);
780 }
781
782 switch (sysno) {
783 case __NR_faccessat:
784 #if defined(__NR_access)
785 case __NR_access:
786 #endif
787 #if defined(__NR_open)
788 case __NR_open:
789 #endif
790 case __NR_openat:
791 // We get a InitializedOpenBroker class, but our trap handler wants
792 // the BrokerProcess object.
793 return ErrorCode(
794 sandbox->Trap(BrokerOpenTrapHandler, iob->broker_process()));
795 default:
796 return ErrorCode(ErrorCode::ERR_ALLOWED);
797 }
798 }
799
800 // We use a InitializedOpenBroker class, so that we can run unsandboxed
801 // code in its constructor, which is the only way to do so in a BPF_TEST.
802 BPF_TEST(SandboxBPF,
803 UseOpenBroker,
804 DenyOpenPolicy,
805 InitializedOpenBroker /* (*BPF_AUX) */) {
806 BPF_ASSERT(BPF_AUX->initialized());
807 BrokerProcess* broker_process = BPF_AUX->broker_process();
808 BPF_ASSERT(broker_process != NULL);
809
810 // First, use the broker "manually"
811 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM);
812 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM);
813 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT);
814 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT);
815
816 // Now use glibc's open() as an external library would.
817 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1);
818 BPF_ASSERT(errno == EPERM);
819
820 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1);
821 BPF_ASSERT(errno == ENOENT);
822
823 // Also test glibc's openat(), some versions of libc use it transparently
824 // instead of open().
825 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1);
826 BPF_ASSERT(errno == EPERM);
827
828 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1);
829 BPF_ASSERT(errno == ENOENT);
830
831 // And test glibc's access().
832 BPF_ASSERT(access("/proc/denied", R_OK) == -1);
833 BPF_ASSERT(errno == EPERM);
834
835 BPF_ASSERT(access("/proc/allowed", R_OK) == -1);
836 BPF_ASSERT(errno == ENOENT);
837
838 // This is also white listed and does exist.
839 int cpu_info_access = access("/proc/cpuinfo", R_OK);
840 BPF_ASSERT(cpu_info_access == 0);
841 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY);
842 BPF_ASSERT(cpu_info_fd >= 0);
843 char buf[1024];
844 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0);
845 }
846
847 // Simple test demonstrating how to use SandboxBPF::Cond()
848
849 class SimpleCondTestPolicy : public SandboxBPFPolicy {
850 public:
851 SimpleCondTestPolicy() {}
852 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
853 int sysno) const OVERRIDE;
854
855 private:
856 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy);
857 };
858
859 ErrorCode SimpleCondTestPolicy::EvaluateSyscall(SandboxBPF* sandbox,
860 int sysno) const {
861 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
862
863 // We deliberately return unusual errno values upon failure, so that we
864 // can uniquely test for these values. In a "real" policy, you would want
865 // to return more traditional values.
866 int flags_argument_position = -1;
867 switch (sysno) {
868 #if defined(__NR_open)
869 case __NR_open:
870 flags_argument_position = 1;
871 #endif
872 case __NR_openat: // open can be a wrapper for openat(2).
873 if (sysno == __NR_openat)
874 flags_argument_position = 2;
875
876 // Allow opening files for reading, but don't allow writing.
877 COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits);
878 return sandbox->Cond(flags_argument_position,
879 ErrorCode::TP_32BIT,
880 ErrorCode::OP_HAS_ANY_BITS,
881 O_ACCMODE /* 0x3 */,
882 ErrorCode(EROFS),
883 ErrorCode(ErrorCode::ERR_ALLOWED));
884 case __NR_prctl:
885 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but
886 // disallow everything else.
887 return sandbox->Cond(0,
888 ErrorCode::TP_32BIT,
889 ErrorCode::OP_EQUAL,
890 PR_SET_DUMPABLE,
891 ErrorCode(ErrorCode::ERR_ALLOWED),
892 sandbox->Cond(0,
893 ErrorCode::TP_32BIT,
894 ErrorCode::OP_EQUAL,
895 PR_GET_DUMPABLE,
896 ErrorCode(ErrorCode::ERR_ALLOWED),
897 ErrorCode(ENOMEM)));
898 default:
899 return ErrorCode(ErrorCode::ERR_ALLOWED);
900 }
901 }
902
903 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) {
904 int fd;
905 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1);
906 BPF_ASSERT(errno == EROFS);
907 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0);
908 close(fd);
909
910 int ret;
911 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0);
912 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0);
913 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1);
914 BPF_ASSERT(errno == ENOMEM);
915 }
916
917 // This test exercises the SandboxBPF::Cond() method by building a complex
918 // tree of conditional equality operations. It then makes system calls and
919 // verifies that they return the values that we expected from our BPF
920 // program.
921 class EqualityStressTest {
922 public:
923 EqualityStressTest() {
924 // We want a deterministic test
925 srand(0);
926
927 // Iterates over system call numbers and builds a random tree of
928 // equality tests.
929 // We are actually constructing a graph of ArgValue objects. This
930 // graph will later be used to a) compute our sandbox policy, and
931 // b) drive the code that verifies the output from the BPF program.
932 COMPILE_ASSERT(
933 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10),
934 num_test_cases_must_be_significantly_smaller_than_num_system_calls);
935 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) {
936 if (IsReservedSyscall(sysno)) {
937 // Skip reserved system calls. This ensures that our test frame
938 // work isn't impacted by the fact that we are overriding
939 // a lot of different system calls.
940 ++end;
941 arg_values_.push_back(NULL);
942 } else {
943 arg_values_.push_back(
944 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs));
945 }
946 }
947 }
948
949 ~EqualityStressTest() {
950 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin();
951 iter != arg_values_.end();
952 ++iter) {
953 DeleteArgValue(*iter);
954 }
955 }
956
957 ErrorCode Policy(SandboxBPF* sandbox, int sysno) {
958 if (!SandboxBPF::IsValidSyscallNumber(sysno)) {
959 // FIXME: we should really not have to do that in a trivial policy
960 return ErrorCode(ENOSYS);
961 } else if (sysno < 0 || sysno >= (int)arg_values_.size() ||
962 IsReservedSyscall(sysno)) {
963 // We only return ErrorCode values for the system calls that
964 // are part of our test data. Every other system call remains
965 // allowed.
966 return ErrorCode(ErrorCode::ERR_ALLOWED);
967 } else {
968 // ToErrorCode() turns an ArgValue object into an ErrorCode that is
969 // suitable for use by a sandbox policy.
970 return ToErrorCode(sandbox, arg_values_[sysno]);
971 }
972 }
973
974 void VerifyFilter() {
975 // Iterate over all system calls. Skip the system calls that have
976 // previously been determined as being reserved.
977 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) {
978 if (!arg_values_[sysno]) {
979 // Skip reserved system calls.
980 continue;
981 }
982 // Verify that system calls return the values that we expect them to
983 // return. This involves passing different combinations of system call
984 // parameters in order to exercise all possible code paths through the
985 // BPF filter program.
986 // We arbitrarily start by setting all six system call arguments to
987 // zero. And we then recursive traverse our tree of ArgValues to
988 // determine the necessary combinations of parameters.
989 intptr_t args[6] = {};
990 Verify(sysno, args, *arg_values_[sysno]);
991 }
992 }
993
994 private:
995 struct ArgValue {
996 int argno; // Argument number to inspect.
997 int size; // Number of test cases (must be > 0).
998 struct Tests {
999 uint32_t k_value; // Value to compare syscall arg against.
1000 int err; // If non-zero, errno value to return.
1001 struct ArgValue* arg_value; // Otherwise, more args needs inspecting.
1002 }* tests;
1003 int err; // If none of the tests passed, this is what
1004 struct ArgValue* arg_value; // we'll return (this is the "else" branch).
1005 };
1006
1007 bool IsReservedSyscall(int sysno) {
1008 // There are a handful of system calls that we should never use in our
1009 // test cases. These system calls are needed to allow the test framework
1010 // to run properly.
1011 // If we wanted to write fully generic code, there are more system calls
1012 // that could be listed here, and it is quite difficult to come up with a
1013 // truly comprehensive list. After all, we are deliberately making system
1014 // calls unavailable. In practice, we have a pretty good idea of the system
1015 // calls that will be made by this particular test. So, this small list is
1016 // sufficient. But if anybody copy'n'pasted this code for other uses, they
1017 // would have to review that the list.
1018 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit ||
1019 sysno == __NR_exit_group || sysno == __NR_restart_syscall;
1020 }
1021
1022 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) {
1023 // Create a new ArgValue and fill it with random data. We use as bit mask
1024 // to keep track of the system call parameters that have previously been
1025 // set; this ensures that we won't accidentally define a contradictory
1026 // set of equality tests.
1027 struct ArgValue* arg_value = new ArgValue();
1028 args_mask |= 1 << argno;
1029 arg_value->argno = argno;
1030
1031 // Apply some restrictions on just how complex our tests can be.
1032 // Otherwise, we end up with a BPF program that is too complicated for
1033 // the kernel to load.
1034 int fan_out = kMaxFanOut;
1035 if (remaining_args > 3) {
1036 fan_out = 1;
1037 } else if (remaining_args > 2) {
1038 fan_out = 2;
1039 }
1040
1041 // Create a couple of different test cases with randomized values that
1042 // we want to use when comparing system call parameter number "argno".
1043 arg_value->size = rand() % fan_out + 1;
1044 arg_value->tests = new ArgValue::Tests[arg_value->size];
1045
1046 uint32_t k_value = rand();
1047 for (int n = 0; n < arg_value->size; ++n) {
1048 // Ensure that we have unique values
1049 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1;
1050
1051 // There are two possible types of nodes. Either this is a leaf node;
1052 // in that case, we have completed all the equality tests that we
1053 // wanted to perform, and we can now compute a random "errno" value that
1054 // we should return. Or this is part of a more complex boolean
1055 // expression; in that case, we have to recursively add tests for some
1056 // of system call parameters that we have not yet included in our
1057 // tests.
1058 arg_value->tests[n].k_value = k_value;
1059 if (!remaining_args || (rand() & 1)) {
1060 arg_value->tests[n].err = (rand() % 1000) + 1;
1061 arg_value->tests[n].arg_value = NULL;
1062 } else {
1063 arg_value->tests[n].err = 0;
1064 arg_value->tests[n].arg_value =
1065 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1066 }
1067 }
1068 // Finally, we have to define what we should return if none of the
1069 // previous equality tests pass. Again, we can either deal with a leaf
1070 // node, or we can randomly add another couple of tests.
1071 if (!remaining_args || (rand() & 1)) {
1072 arg_value->err = (rand() % 1000) + 1;
1073 arg_value->arg_value = NULL;
1074 } else {
1075 arg_value->err = 0;
1076 arg_value->arg_value =
1077 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1);
1078 }
1079 // We have now built a new (sub-)tree of ArgValues defining a set of
1080 // boolean expressions for testing random system call arguments against
1081 // random values. Return this tree to our caller.
1082 return arg_value;
1083 }
1084
1085 int RandomArg(int args_mask) {
1086 // Compute a random system call parameter number.
1087 int argno = rand() % kMaxArgs;
1088
1089 // Make sure that this same parameter number has not previously been
1090 // used. Otherwise, we could end up with a test that is impossible to
1091 // satisfy (e.g. args[0] == 1 && args[0] == 2).
1092 while (args_mask & (1 << argno)) {
1093 argno = (argno + 1) % kMaxArgs;
1094 }
1095 return argno;
1096 }
1097
1098 void DeleteArgValue(ArgValue* arg_value) {
1099 // Delete an ArgValue and all of its child nodes. This requires
1100 // recursively descending into the tree.
1101 if (arg_value) {
1102 if (arg_value->size) {
1103 for (int n = 0; n < arg_value->size; ++n) {
1104 if (!arg_value->tests[n].err) {
1105 DeleteArgValue(arg_value->tests[n].arg_value);
1106 }
1107 }
1108 delete[] arg_value->tests;
1109 }
1110 if (!arg_value->err) {
1111 DeleteArgValue(arg_value->arg_value);
1112 }
1113 delete arg_value;
1114 }
1115 }
1116
1117 ErrorCode ToErrorCode(SandboxBPF* sandbox, ArgValue* arg_value) {
1118 // Compute the ErrorCode that should be returned, if none of our
1119 // tests succeed (i.e. the system call parameter doesn't match any
1120 // of the values in arg_value->tests[].k_value).
1121 ErrorCode err;
1122 if (arg_value->err) {
1123 // If this was a leaf node, return the errno value that we expect to
1124 // return from the BPF filter program.
1125 err = ErrorCode(arg_value->err);
1126 } else {
1127 // If this wasn't a leaf node yet, recursively descend into the rest
1128 // of the tree. This will end up adding a few more SandboxBPF::Cond()
1129 // tests to our ErrorCode.
1130 err = ToErrorCode(sandbox, arg_value->arg_value);
1131 }
1132
1133 // Now, iterate over all the test cases that we want to compare against.
1134 // This builds a chain of SandboxBPF::Cond() tests
1135 // (aka "if ... elif ... elif ... elif ... fi")
1136 for (int n = arg_value->size; n-- > 0;) {
1137 ErrorCode matched;
1138 // Again, we distinguish between leaf nodes and subtrees.
1139 if (arg_value->tests[n].err) {
1140 matched = ErrorCode(arg_value->tests[n].err);
1141 } else {
1142 matched = ToErrorCode(sandbox, arg_value->tests[n].arg_value);
1143 }
1144 // For now, all of our tests are limited to 32bit.
1145 // We have separate tests that check the behavior of 32bit vs. 64bit
1146 // conditional expressions.
1147 err = sandbox->Cond(arg_value->argno,
1148 ErrorCode::TP_32BIT,
1149 ErrorCode::OP_EQUAL,
1150 arg_value->tests[n].k_value,
1151 matched,
1152 err);
1153 }
1154 return err;
1155 }
1156
1157 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) {
1158 uint32_t mismatched = 0;
1159 // Iterate over all the k_values in arg_value.tests[] and verify that
1160 // we see the expected return values from system calls, when we pass
1161 // the k_value as a parameter in a system call.
1162 for (int n = arg_value.size; n-- > 0;) {
1163 mismatched += arg_value.tests[n].k_value;
1164 args[arg_value.argno] = arg_value.tests[n].k_value;
1165 if (arg_value.tests[n].err) {
1166 VerifyErrno(sysno, args, arg_value.tests[n].err);
1167 } else {
1168 Verify(sysno, args, *arg_value.tests[n].arg_value);
1169 }
1170 }
1171 // Find a k_value that doesn't match any of the k_values in
1172 // arg_value.tests[]. In most cases, the current value of "mismatched"
1173 // would fit this requirement. But on the off-chance that it happens
1174 // to collide, we double-check.
1175 try_again:
1176 for (int n = arg_value.size; n-- > 0;) {
1177 if (mismatched == arg_value.tests[n].k_value) {
1178 ++mismatched;
1179 goto try_again;
1180 }
1181 }
1182 // Now verify that we see the expected return value from system calls,
1183 // if we pass a value that doesn't match any of the conditions (i.e. this
1184 // is testing the "else" clause of the conditions).
1185 args[arg_value.argno] = mismatched;
1186 if (arg_value.err) {
1187 VerifyErrno(sysno, args, arg_value.err);
1188 } else {
1189 Verify(sysno, args, *arg_value.arg_value);
1190 }
1191 // Reset args[arg_value.argno]. This is not technically needed, but it
1192 // makes it easier to reason about the correctness of our tests.
1193 args[arg_value.argno] = 0;
1194 }
1195
1196 void VerifyErrno(int sysno, intptr_t* args, int err) {
1197 // We installed BPF filters that return different errno values
1198 // based on the system call number and the parameters that we decided
1199 // to pass in. Verify that this condition holds true.
1200 BPF_ASSERT(
1201 Syscall::Call(
1202 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) ==
1203 -err);
1204 }
1205
1206 // Vector of ArgValue trees. These trees define all the possible boolean
1207 // expressions that we want to turn into a BPF filter program.
1208 std::vector<ArgValue*> arg_values_;
1209
1210 // Don't increase these values. We are pushing the limits of the maximum
1211 // BPF program that the kernel will allow us to load. If the values are
1212 // increased too much, the test will start failing.
1213 #if defined(__aarch64__)
1214 static const int kNumTestCases = 30;
1215 #else
1216 static const int kNumTestCases = 40;
1217 #endif
1218 static const int kMaxFanOut = 3;
1219 static const int kMaxArgs = 6;
1220 };
1221
1222 ErrorCode EqualityStressTestPolicy(SandboxBPF* sandbox,
1223 int sysno,
1224 EqualityStressTest* aux) {
1225 DCHECK(aux);
1226 return aux->Policy(sandbox, sysno);
1227 }
1228
1229 BPF_TEST(SandboxBPF,
1230 EqualityTests,
1231 EqualityStressTestPolicy,
1232 EqualityStressTest /* (*BPF_AUX) */) {
1233 BPF_AUX->VerifyFilter();
1234 }
1235
1236 class EqualityArgumentWidthPolicy : public SandboxBPFPolicy {
1237 public:
1238 EqualityArgumentWidthPolicy() {}
1239 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1240 int sysno) const OVERRIDE;
1241
1242 private:
1243 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy);
1244 };
1245
1246 ErrorCode EqualityArgumentWidthPolicy::EvaluateSyscall(SandboxBPF* sandbox,
1247 int sysno) const {
1248 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1249 if (sysno == __NR_uname) {
1250 return sandbox->Cond(
1251 0,
1252 ErrorCode::TP_32BIT,
1253 ErrorCode::OP_EQUAL,
1254 0,
1255 sandbox->Cond(1,
1256 ErrorCode::TP_32BIT,
1257 ErrorCode::OP_EQUAL,
1258 0x55555555,
1259 ErrorCode(1),
1260 ErrorCode(2)),
1261 // The BPF compiler and the BPF interpreter in the kernel are
1262 // (mostly) agnostic of the host platform's word size. The compiler
1263 // will happily generate code that tests a 64bit value, and the
1264 // interpreter will happily perform this test.
1265 // But unless there is a kernel bug, there is no way for us to pass
1266 // in a 64bit quantity on a 32bit platform. The upper 32bits should
1267 // always be zero. So, this test should always evaluate as false on
1268 // 32bit systems.
1269 sandbox->Cond(1,
1270 ErrorCode::TP_64BIT,
1271 ErrorCode::OP_EQUAL,
1272 0x55555555AAAAAAAAULL,
1273 ErrorCode(1),
1274 ErrorCode(2)));
1275 }
1276 return ErrorCode(ErrorCode::ERR_ALLOWED);
1277 }
1278
1279 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) {
1280 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1);
1281 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2);
1282 #if __SIZEOF_POINTER__ > 4
1283 // On 32bit machines, there is no way to pass a 64bit argument through the
1284 // syscall interface. So, we have to skip the part of the test that requires
1285 // 64bit arguments.
1286 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1);
1287 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2);
1288 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2);
1289 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2);
1290 #else
1291 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555) == -2);
1292 #endif
1293 }
1294
1295 #if __SIZEOF_POINTER__ > 4
1296 // On 32bit machines, there is no way to pass a 64bit argument through the
1297 // syscall interface. So, we have to skip the part of the test that requires
1298 // 64bit arguments.
1299 BPF_DEATH_TEST_C(SandboxBPF,
1300 EqualityArgumentUnallowed64bit,
1301 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1302 EqualityArgumentWidthPolicy) {
1303 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL);
1304 }
1305 #endif
1306
1307 class EqualityWithNegativeArgumentsPolicy : public SandboxBPFPolicy {
1308 public:
1309 EqualityWithNegativeArgumentsPolicy() {}
1310 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1311 int sysno) const OVERRIDE {
1312 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1313 if (sysno == __NR_uname) {
1314 return sandbox->Cond(0,
1315 ErrorCode::TP_32BIT,
1316 ErrorCode::OP_EQUAL,
1317 0xFFFFFFFF,
1318 ErrorCode(1),
1319 ErrorCode(2));
1320 }
1321 return ErrorCode(ErrorCode::ERR_ALLOWED);
1322 }
1323
1324 private:
1325 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy);
1326 };
1327
1328 BPF_TEST_C(SandboxBPF,
1329 EqualityWithNegativeArguments,
1330 EqualityWithNegativeArgumentsPolicy) {
1331 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1);
1332 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1);
1333 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1);
1334 }
1335
1336 #if __SIZEOF_POINTER__ > 4
1337 BPF_DEATH_TEST_C(SandboxBPF,
1338 EqualityWithNegative64bitArguments,
1339 DEATH_MESSAGE("Unexpected 64bit argument detected"),
1340 EqualityWithNegativeArgumentsPolicy) {
1341 // When expecting a 32bit system call argument, we look at the MSB of the
1342 // 64bit value and allow both "0" and "-1". But the latter is allowed only
1343 // iff the LSB was negative. So, this death test should error out.
1344 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1);
1345 }
1346 #endif
1347 class AllBitTestPolicy : public SandboxBPFPolicy {
1348 public:
1349 AllBitTestPolicy() {}
1350 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1351 int sysno) const OVERRIDE;
1352
1353 private:
1354 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy);
1355 };
1356
1357 ErrorCode AllBitTestPolicy::EvaluateSyscall(SandboxBPF* sandbox,
1358 int sysno) const {
1359 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1360 // Test the OP_HAS_ALL_BITS conditional test operator with a couple of
1361 // different bitmasks. We try to find bitmasks that could conceivably
1362 // touch corner cases.
1363 // For all of these tests, we override the uname(). We can make use with
1364 // a single system call number, as we use the first system call argument to
1365 // select the different bit masks that we want to test against.
1366 if (sysno == __NR_uname) {
1367 return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
1368 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1369 0x0,
1370 ErrorCode(1), ErrorCode(0)),
1371
1372 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 1,
1373 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1374 0x1,
1375 ErrorCode(1), ErrorCode(0)),
1376
1377 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 2,
1378 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1379 0x3,
1380 ErrorCode(1), ErrorCode(0)),
1381
1382 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 3,
1383 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1384 0x80000000,
1385 ErrorCode(1), ErrorCode(0)),
1386 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 4,
1387 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1388 0x0,
1389 ErrorCode(1), ErrorCode(0)),
1390
1391 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 5,
1392 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1393 0x1,
1394 ErrorCode(1), ErrorCode(0)),
1395
1396 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 6,
1397 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1398 0x3,
1399 ErrorCode(1), ErrorCode(0)),
1400
1401 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 7,
1402 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1403 0x80000000,
1404 ErrorCode(1), ErrorCode(0)),
1405
1406 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 8,
1407 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1408 0x100000000ULL,
1409 ErrorCode(1), ErrorCode(0)),
1410
1411 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 9,
1412 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1413 0x300000000ULL,
1414 ErrorCode(1), ErrorCode(0)),
1415
1416 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 10,
1417 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ALL_BITS,
1418 0x100000001ULL,
1419 ErrorCode(1), ErrorCode(0)),
1420
1421 sandbox->Kill("Invalid test case number"))))))))))));
1422 }
1423 return ErrorCode(ErrorCode::ERR_ALLOWED);
1424 }
1425
1426 // Define a macro that performs tests using our test policy.
1427 // NOTE: Not all of the arguments in this macro are actually used!
1428 // They are here just to serve as documentation of the conditions
1429 // implemented in the test policy.
1430 // Most notably, "op" and "mask" are unused by the macro. If you want
1431 // to make changes to these values, you will have to edit the
1432 // test policy instead.
1433 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \
1434 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value))
1435
1436 // Our uname() system call returns ErrorCode(1) for success and
1437 // ErrorCode(0) for failure. Syscall::Call() turns this into an
1438 // exit code of -1 or 0.
1439 #define EXPECT_FAILURE 0
1440 #define EXPECT_SUCCESS -1
1441
1442 // A couple of our tests behave differently on 32bit and 64bit systems, as
1443 // there is no way for a 32bit system call to pass in a 64bit system call
1444 // argument "arg".
1445 // We expect these tests to succeed on 64bit systems, but to tail on 32bit
1446 // systems.
1447 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE)
1448 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) {
1449 // 32bit test: all of 0x0 (should always be true)
1450 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS);
1451 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS);
1452 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS);
1453 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS);
1454 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS);
1455
1456 // 32bit test: all of 0x1
1457 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE);
1458 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS);
1459 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE);
1460 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS);
1461
1462 // 32bit test: all of 0x3
1463 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE);
1464 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE);
1465 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE);
1466 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS);
1467 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS);
1468
1469 // 32bit test: all of 0x80000000
1470 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1471 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE);
1472 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1473 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1474 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS);
1475
1476 // 64bit test: all of 0x0 (should always be true)
1477 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS);
1478 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS);
1479 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS);
1480 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS);
1481 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1482 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1483 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS);
1484 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS);
1485
1486 // 64bit test: all of 0x1
1487 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE);
1488 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS);
1489 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE);
1490 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS);
1491 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE);
1492 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS);
1493 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE);
1494 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS);
1495
1496 // 64bit test: all of 0x3
1497 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE);
1498 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE);
1499 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE);
1500 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS);
1501 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS);
1502 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE);
1503 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE);
1504 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE);
1505 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS);
1506 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS);
1507
1508 // 64bit test: all of 0x80000000
1509 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1510 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1511 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1512 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1513 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1514 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1515 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE);
1516 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1517 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1518 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS);
1519
1520 // 64bit test: all of 0x100000000
1521 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1522 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1523 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1524 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1525 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1526 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1527 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE);
1528 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS);
1529
1530 // 64bit test: all of 0x300000000
1531 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1532 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1533 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1534 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1535 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1536 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1537 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1538 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE);
1539 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1540 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS);
1541
1542 // 64bit test: all of 0x100000001
1543 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1544 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1545 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE);
1546 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1547 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE);
1548 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS);
1549 }
1550
1551 class AnyBitTestPolicy : public SandboxBPFPolicy {
1552 public:
1553 AnyBitTestPolicy() {}
1554 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1555 int sysno) const OVERRIDE;
1556
1557 private:
1558 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy);
1559 };
1560
1561 ErrorCode AnyBitTestPolicy::EvaluateSyscall(SandboxBPF* sandbox,
1562 int sysno) const {
1563 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1564 // Test the OP_HAS_ANY_BITS conditional test operator with a couple of
1565 // different bitmasks. We try to find bitmasks that could conceivably
1566 // touch corner cases.
1567 // For all of these tests, we override the uname(). We can make use with
1568 // a single system call number, as we use the first system call argument to
1569 // select the different bit masks that we want to test against.
1570 if (sysno == __NR_uname) {
1571 return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 0,
1572 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1573 0x0,
1574 ErrorCode(1), ErrorCode(0)),
1575
1576 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 1,
1577 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1578 0x1,
1579 ErrorCode(1), ErrorCode(0)),
1580
1581 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 2,
1582 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1583 0x3,
1584 ErrorCode(1), ErrorCode(0)),
1585
1586 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 3,
1587 sandbox->Cond(1, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1588 0x80000000,
1589 ErrorCode(1), ErrorCode(0)),
1590
1591 // All the following tests don't really make much sense on 32bit
1592 // systems. They will always evaluate as false.
1593 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 4,
1594 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1595 0x0,
1596 ErrorCode(1), ErrorCode(0)),
1597
1598 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 5,
1599 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1600 0x1,
1601 ErrorCode(1), ErrorCode(0)),
1602
1603 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 6,
1604 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1605 0x3,
1606 ErrorCode(1), ErrorCode(0)),
1607
1608 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 7,
1609 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1610 0x80000000,
1611 ErrorCode(1), ErrorCode(0)),
1612
1613 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 8,
1614 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1615 0x100000000ULL,
1616 ErrorCode(1), ErrorCode(0)),
1617
1618 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 9,
1619 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1620 0x300000000ULL,
1621 ErrorCode(1), ErrorCode(0)),
1622
1623 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL, 10,
1624 sandbox->Cond(1, ErrorCode::TP_64BIT, ErrorCode::OP_HAS_ANY_BITS,
1625 0x100000001ULL,
1626 ErrorCode(1), ErrorCode(0)),
1627
1628 sandbox->Kill("Invalid test case number"))))))))))));
1629 }
1630 return ErrorCode(ErrorCode::ERR_ALLOWED);
1631 }
1632
1633 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) {
1634 // 32bit test: any of 0x0 (should always be false)
1635 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE);
1636 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE);
1637 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE);
1638 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE);
1639 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE);
1640
1641 // 32bit test: any of 0x1
1642 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE);
1643 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS);
1644 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE);
1645 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS);
1646
1647 // 32bit test: any of 0x3
1648 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE);
1649 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS);
1650 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS);
1651 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS);
1652 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS);
1653
1654 // 32bit test: any of 0x80000000
1655 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1656 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE);
1657 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1658 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1659 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS);
1660
1661 // 64bit test: any of 0x0 (should always be false)
1662 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE);
1663 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE);
1664 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE);
1665 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE);
1666 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1667 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1668 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1669 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE);
1670
1671 // 64bit test: any of 0x1
1672 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE);
1673 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS);
1674 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE);
1675 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS);
1676 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1677 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1678 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE);
1679 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS);
1680
1681 // 64bit test: any of 0x3
1682 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE);
1683 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS);
1684 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS);
1685 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS);
1686 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS);
1687 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE);
1688 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1689 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1690 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1691 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS);
1692
1693 // 64bit test: any of 0x80000000
1694 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1695 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1696 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1697 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1698 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1699 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1700 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE);
1701 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1702 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1703 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS);
1704
1705 // 64bit test: any of 0x100000000
1706 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1707 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1708 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1709 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1710 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1711 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1712 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE);
1713 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS);
1714
1715 // 64bit test: any of 0x300000000
1716 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1717 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1718 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1719 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1720 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1721 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE);
1722 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1723 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1724 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1725 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS);
1726
1727 // 64bit test: any of 0x100000001
1728 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE);
1729 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1730 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS);
1731 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1732 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1733 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS);
1734 }
1735
1736 class MaskedEqualTestPolicy : public SandboxBPFPolicy {
1737 public:
1738 MaskedEqualTestPolicy() {}
1739 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1740 int sysno) const OVERRIDE;
1741
1742 private:
1743 struct Rule {
1744 ErrorCode::ArgType arg_type;
1745 uint64_t mask;
1746 uint64_t value;
1747 };
1748
1749 static Rule rules[];
1750
1751 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy);
1752 };
1753
1754 MaskedEqualTestPolicy::Rule MaskedEqualTestPolicy::rules[] = {
1755 /* 0 = */ {ErrorCode::TP_32BIT, 0x0000000000ff00ff, 0x00000000005500aa},
1756
1757 #if __SIZEOF_POINTER__ > 4
1758 /* 1 = */ {ErrorCode::TP_64BIT, 0x00ff00ff00000000, 0x005500aa00000000},
1759 /* 2 = */ {ErrorCode::TP_64BIT, 0x00ff00ff00ff00ff, 0x005500aa005500aa},
1760 #endif
1761 };
1762
1763 ErrorCode MaskedEqualTestPolicy::EvaluateSyscall(SandboxBPF* sandbox,
1764 int sysno) const {
1765 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1766
1767 if (sysno == __NR_uname) {
1768 ErrorCode err = sandbox->Kill("Invalid test case number");
1769 for (size_t i = 0; i < arraysize(rules); i++) {
1770 err = sandbox->Cond(0,
1771 ErrorCode::TP_32BIT,
1772 ErrorCode::OP_EQUAL,
1773 i,
1774 sandbox->CondMaskedEqual(1,
1775 rules[i].arg_type,
1776 rules[i].mask,
1777 rules[i].value,
1778 ErrorCode(1),
1779 ErrorCode(0)),
1780 err);
1781 }
1782 return err;
1783 }
1784 return ErrorCode(ErrorCode::ERR_ALLOWED);
1785 }
1786
1787 #define MASKEQ_TEST(rulenum, arg, expected_result) \
1788 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result))
1789
1790 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) {
1791 // Allowed: 0x__55__aa
1792 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE);
1793 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE);
1794 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE);
1795 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE);
1796 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE);
1797 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS);
1798 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE);
1799 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE);
1800 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS);
1801 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS);
1802 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS);
1803 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS);
1804
1805 #if __SIZEOF_POINTER__ > 4
1806 // Allowed: 0x__55__aa________
1807 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE);
1808 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE);
1809 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE);
1810 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE);
1811 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE);
1812 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE);
1813 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE);
1814 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS);
1815 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE);
1816 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE);
1817 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS);
1818 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS);
1819 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS);
1820 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1821 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS);
1822 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS);
1823
1824 // Allowed: 0x__55__aa__55__aa
1825 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE);
1826 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE);
1827 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE);
1828 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE);
1829 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE);
1830 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE);
1831 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE);
1832 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE);
1833 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE);
1834 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS);
1835 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE);
1836 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE);
1837 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE);
1838 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE);
1839 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS);
1840 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS);
1841 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS);
1842 #endif
1843 }
1844
1845 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) {
1846 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) {
1847 // We expect to get called for an attempt to fork(). No need to log that
1848 // call. But if we ever get called for anything else, we want to verbosely
1849 // print as much information as possible.
1850 const char* msg = (const char*)aux;
1851 printf(
1852 "Clone() was called with unexpected arguments\n"
1853 " nr: %d\n"
1854 " 1: 0x%llX\n"
1855 " 2: 0x%llX\n"
1856 " 3: 0x%llX\n"
1857 " 4: 0x%llX\n"
1858 " 5: 0x%llX\n"
1859 " 6: 0x%llX\n"
1860 "%s\n",
1861 args.nr,
1862 (long long)args.args[0],
1863 (long long)args.args[1],
1864 (long long)args.args[2],
1865 (long long)args.args[3],
1866 (long long)args.args[4],
1867 (long long)args.args[5],
1868 msg);
1869 }
1870 return -EPERM;
1871 }
1872
1873 class PthreadPolicyEquality : public SandboxBPFPolicy {
1874 public:
1875 PthreadPolicyEquality() {}
1876 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1877 int sysno) const OVERRIDE;
1878
1879 private:
1880 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality);
1881 };
1882
1883 ErrorCode PthreadPolicyEquality::EvaluateSyscall(SandboxBPF* sandbox,
1884 int sysno) const {
1885 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1886 // This policy allows creating threads with pthread_create(). But it
1887 // doesn't allow any other uses of clone(). Most notably, it does not
1888 // allow callers to implement fork() or vfork() by passing suitable flags
1889 // to the clone() system call.
1890 if (sysno == __NR_clone) {
1891 // We have seen two different valid combinations of flags. Glibc
1892 // uses the more modern flags, sets the TLS from the call to clone(), and
1893 // uses futexes to monitor threads. Android's C run-time library, doesn't
1894 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1895 // More recent versions of Android don't set CLONE_DETACHED anymore, so
1896 // the last case accounts for that.
1897 // The following policy is very strict. It only allows the exact masks
1898 // that we have seen in known implementations. It is probably somewhat
1899 // stricter than what we would want to do.
1900 const uint64_t kGlibcCloneMask =
1901 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
1902 CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS |
1903 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
1904 const uint64_t kBaseAndroidCloneMask =
1905 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
1906 CLONE_THREAD | CLONE_SYSVSEM;
1907 return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
1908 kGlibcCloneMask,
1909 ErrorCode(ErrorCode::ERR_ALLOWED),
1910 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
1911 kBaseAndroidCloneMask | CLONE_DETACHED,
1912 ErrorCode(ErrorCode::ERR_ALLOWED),
1913 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_EQUAL,
1914 kBaseAndroidCloneMask,
1915 ErrorCode(ErrorCode::ERR_ALLOWED),
1916 sandbox->Trap(PthreadTrapHandler, "Unknown mask"))));
1917 }
1918 return ErrorCode(ErrorCode::ERR_ALLOWED);
1919 }
1920
1921 class PthreadPolicyBitMask : public SandboxBPFPolicy {
1922 public:
1923 PthreadPolicyBitMask() {}
1924 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
1925 int sysno) const OVERRIDE;
1926
1927 private:
1928 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask);
1929 };
1930
1931 ErrorCode PthreadPolicyBitMask::EvaluateSyscall(SandboxBPF* sandbox,
1932 int sysno) const {
1933 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
1934 // This policy allows creating threads with pthread_create(). But it
1935 // doesn't allow any other uses of clone(). Most notably, it does not
1936 // allow callers to implement fork() or vfork() by passing suitable flags
1937 // to the clone() system call.
1938 if (sysno == __NR_clone) {
1939 // We have seen two different valid combinations of flags. Glibc
1940 // uses the more modern flags, sets the TLS from the call to clone(), and
1941 // uses futexes to monitor threads. Android's C run-time library, doesn't
1942 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED.
1943 // The following policy allows for either combination of flags, but it
1944 // is generally a little more conservative than strictly necessary. We
1945 // err on the side of rather safe than sorry.
1946 // Very noticeably though, we disallow fork() (which is often just a
1947 // wrapper around clone()).
1948 return sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1949 ~uint32(CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|
1950 CLONE_THREAD|CLONE_SYSVSEM|CLONE_SETTLS|
1951 CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID|
1952 CLONE_DETACHED),
1953 sandbox->Trap(PthreadTrapHandler,
1954 "Unexpected CLONE_XXX flag found"),
1955 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1956 CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_SIGHAND|
1957 CLONE_THREAD|CLONE_SYSVSEM,
1958 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ALL_BITS,
1959 CLONE_SETTLS|CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID,
1960 ErrorCode(ErrorCode::ERR_ALLOWED),
1961 sandbox->Cond(0, ErrorCode::TP_32BIT, ErrorCode::OP_HAS_ANY_BITS,
1962 CLONE_SETTLS|CLONE_PARENT_SETTID|CLONE_CHILD_CLEARTID,
1963 sandbox->Trap(PthreadTrapHandler,
1964 "Must set either all or none of the TLS"
1965 " and futex bits in call to clone()"),
1966 ErrorCode(ErrorCode::ERR_ALLOWED))),
1967 sandbox->Trap(PthreadTrapHandler,
1968 "Missing mandatory CLONE_XXX flags "
1969 "when creating new thread")));
1970 }
1971 return ErrorCode(ErrorCode::ERR_ALLOWED);
1972 }
1973
1974 static void* ThreadFnc(void* arg) {
1975 ++*reinterpret_cast<int*>(arg);
1976 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0);
1977 return NULL;
1978 }
1979
1980 static void PthreadTest() {
1981 // Attempt to start a joinable thread. This should succeed.
1982 pthread_t thread;
1983 int thread_ran = 0;
1984 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran));
1985 BPF_ASSERT(!pthread_join(thread, NULL));
1986 BPF_ASSERT(thread_ran);
1987
1988 // Attempt to start a detached thread. This should succeed.
1989 thread_ran = 0;
1990 pthread_attr_t attr;
1991 BPF_ASSERT(!pthread_attr_init(&attr));
1992 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1993 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran));
1994 BPF_ASSERT(!pthread_attr_destroy(&attr));
1995 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) ==
1996 -EINTR) {
1997 }
1998 BPF_ASSERT(thread_ran);
1999
2000 // Attempt to fork() a process using clone(). This should fail. We use the
2001 // same flags that glibc uses when calling fork(). But we don't actually
2002 // try calling the fork() implementation in the C run-time library, as
2003 // run-time libraries other than glibc might call __NR_fork instead of
2004 // __NR_clone, and that would introduce a bogus test failure.
2005 int pid;
2006 BPF_ASSERT(Syscall::Call(__NR_clone,
2007 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD,
2008 0,
2009 0,
2010 &pid) == -EPERM);
2011 }
2012
2013 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) {
2014 PthreadTest();
2015 }
2016
2017 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) {
2018 PthreadTest();
2019 }
2020
2021 // libc might not define these even though the kernel supports it.
2022 #ifndef PTRACE_O_TRACESECCOMP
2023 #define PTRACE_O_TRACESECCOMP 0x00000080
2024 #endif
2025
2026 #ifdef PTRACE_EVENT_SECCOMP
2027 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
2028 #else
2029 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they
2030 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by
2031 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If
2032 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both
2033 // values here.
2034 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8)
2035 #endif
2036
2037 #if defined(__arm__)
2038 #ifndef PTRACE_SET_SYSCALL
2039 #define PTRACE_SET_SYSCALL 23
2040 #endif
2041 #endif
2042
2043 #if defined(__aarch64__)
2044 #ifndef PTRACE_GETREGS
2045 #define PTRACE_GETREGS 12
2046 #endif
2047 #endif
2048
2049 #if defined(__aarch64__)
2050 #ifndef PTRACE_SETREGS
2051 #define PTRACE_SETREGS 13
2052 #endif
2053 #endif
2054
2055 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with
2056 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on
2057 // PTRACE_EVENT_SECCOMP.
2058 //
2059 // regs should contain the current set of registers of the child, obtained using
2060 // PTRACE_GETREGS.
2061 //
2062 // Depending on the architecture, this may modify regs, so the caller is
2063 // responsible for committing these changes using PTRACE_SETREGS.
2064 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) {
2065 #if defined(__arm__)
2066 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the
2067 // libc ptrace call as the request parameter is an enum, and
2068 // PTRACE_SET_SYSCALL may not be in the enum.
2069 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number);
2070 #endif
2071
2072 SECCOMP_PT_SYSCALL(*regs) = syscall_number;
2073 return 0;
2074 }
2075
2076 const uint16_t kTraceData = 0xcc;
2077
2078 class TraceAllPolicy : public SandboxBPFPolicy {
2079 public:
2080 TraceAllPolicy() {}
2081 virtual ~TraceAllPolicy() {}
2082
2083 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
2084 int system_call_number) const OVERRIDE {
2085 return ErrorCode(ErrorCode::ERR_TRACE + kTraceData);
2086 }
2087
2088 private:
2089 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy);
2090 };
2091
2092 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) {
2093 if (SandboxBPF::SupportsSeccompSandbox(-1) !=
2094 sandbox::SandboxBPF::STATUS_AVAILABLE) {
2095 return;
2096 }
2097
2098 // This test is disabled on arm due to a kernel bug.
2099 // See https://code.google.com/p/chromium/issues/detail?id=383977
2100 #if defined(__arm__) || defined(__aarch64__)
2101 printf("This test is currently disabled on ARM32/64 due to a kernel bug.");
2102 return;
2103 #endif
2104
2105 #if defined(__mips__)
2106 // TODO: Figure out how to support specificity of handling indirect syscalls
2107 // in this test and enable it.
2108 printf("This test is currently disabled on MIPS.");
2109 return;
2110 #endif
2111
2112 pid_t pid = fork();
2113 BPF_ASSERT_NE(-1, pid);
2114 if (pid == 0) {
2115 pid_t my_pid = getpid();
2116 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL));
2117 BPF_ASSERT_EQ(0, raise(SIGSTOP));
2118 SandboxBPF sandbox;
2119 sandbox.SetSandboxPolicy(new TraceAllPolicy);
2120 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2121
2122 // getpid is allowed.
2123 BPF_ASSERT_EQ(my_pid, syscall(__NR_getpid));
2124
2125 // write to stdout is skipped and returns a fake value.
2126 BPF_ASSERT_EQ(kExpectedReturnValue,
2127 syscall(__NR_write, STDOUT_FILENO, "A", 1));
2128
2129 // kill is rewritten to exit(kExpectedReturnValue).
2130 syscall(__NR_kill, my_pid, SIGKILL);
2131
2132 // Should not be reached.
2133 BPF_ASSERT(false);
2134 }
2135
2136 int status;
2137 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1);
2138 BPF_ASSERT(WIFSTOPPED(status));
2139
2140 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETOPTIONS, pid, NULL,
2141 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP)));
2142 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2143 while (true) {
2144 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1);
2145 if (WIFEXITED(status) || WIFSIGNALED(status)) {
2146 BPF_ASSERT(WIFEXITED(status));
2147 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status));
2148 break;
2149 }
2150
2151 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP ||
2152 !IS_SECCOMP_EVENT(status)) {
2153 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2154 continue;
2155 }
2156
2157 unsigned long data;
2158 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data));
2159 BPF_ASSERT_EQ(kTraceData, data);
2160
2161 regs_struct regs;
2162 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, &regs));
2163 switch (SECCOMP_PT_SYSCALL(regs)) {
2164 case __NR_write:
2165 // Skip writes to stdout, make it return kExpectedReturnValue. Allow
2166 // writes to stderr so that BPF_ASSERT messages show up.
2167 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) {
2168 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, -1));
2169 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue;
2170 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2171 }
2172 break;
2173
2174 case __NR_kill:
2175 // Rewrite to exit(kExpectedReturnValue).
2176 BPF_ASSERT_NE(-1, SetSyscall(pid, &regs, __NR_exit));
2177 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue;
2178 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, &regs));
2179 break;
2180
2181 default:
2182 // Allow all other syscalls.
2183 break;
2184 }
2185
2186 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL));
2187 }
2188 }
2189
2190 // Android does not expose pread64 nor pwrite64.
2191 #if !defined(OS_ANDROID)
2192
2193 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) {
2194 while (count > 0) {
2195 const ssize_t transfered =
2196 HANDLE_EINTR(pwrite64(fd, buffer, count, offset));
2197 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2198 return false;
2199 }
2200 count -= transfered;
2201 buffer += transfered;
2202 offset += transfered;
2203 }
2204 return true;
2205 }
2206
2207 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) {
2208 while (count > 0) {
2209 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset));
2210 if (transfered <= 0 || static_cast<size_t>(transfered) > count) {
2211 return false;
2212 }
2213 count -= transfered;
2214 buffer += transfered;
2215 offset += transfered;
2216 }
2217 return true;
2218 }
2219
2220 bool pread_64_was_forwarded = false;
2221
2222 class TrapPread64Policy : public SandboxBPFPolicy {
2223 public:
2224 TrapPread64Policy() {}
2225 virtual ~TrapPread64Policy() {}
2226
2227 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox_compiler,
2228 int system_call_number) const OVERRIDE {
2229 // Set the global environment for unsafe traps once.
2230 if (system_call_number == MIN_SYSCALL) {
2231 EnableUnsafeTraps();
2232 }
2233
2234 if (system_call_number == __NR_pread64) {
2235 return sandbox_compiler->UnsafeTrap(ForwardPreadHandler, NULL);
2236 }
2237 return ErrorCode(ErrorCode::ERR_ALLOWED);
2238 }
2239
2240 private:
2241 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args,
2242 void* aux) {
2243 BPF_ASSERT(args.nr == __NR_pread64);
2244 pread_64_was_forwarded = true;
2245
2246 return SandboxBPF::ForwardSyscall(args);
2247 }
2248 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy);
2249 };
2250
2251 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split
2252 // between two arguments. In this test, we make sure that ForwardSyscall() can
2253 // forward it properly.
2254 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) {
2255 ScopedTemporaryFile temp_file;
2256 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF;
2257 const char kTestString[] = "This is a test!";
2258 BPF_ASSERT(FullPwrite64(
2259 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset));
2260
2261 char read_test_string[sizeof(kTestString)] = {0};
2262 BPF_ASSERT(FullPread64(temp_file.fd(),
2263 read_test_string,
2264 sizeof(read_test_string),
2265 kLargeOffset));
2266 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString)));
2267 BPF_ASSERT(pread_64_was_forwarded);
2268 }
2269
2270 #endif // !defined(OS_ANDROID)
2271
2272 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) {
2273 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr);
2274
2275 // Wait for the main thread to signal that the filter has been applied.
2276 if (!event->IsSignaled()) {
2277 event->Wait();
2278 }
2279
2280 BPF_ASSERT(event->IsSignaled());
2281
2282 BlacklistNanosleepPolicy::AssertNanosleepFails();
2283
2284 return NULL;
2285 }
2286
2287 SANDBOX_TEST(SandboxBPF, Tsync) {
2288 if (SandboxBPF::SupportsSeccompThreadFilterSynchronization() !=
2289 SandboxBPF::STATUS_AVAILABLE) {
2290 return;
2291 }
2292
2293 base::WaitableEvent event(true, false);
2294
2295 // Create a thread on which to invoke the blocked syscall.
2296 pthread_t thread;
2297 BPF_ASSERT_EQ(0,
2298 pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event));
2299
2300 // Test that nanoseelp success.
2301 const struct timespec ts = {0, 0};
2302 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL)));
2303
2304 // Engage the sandbox.
2305 SandboxBPF sandbox;
2306 sandbox.SetSandboxPolicy(new BlacklistNanosleepPolicy());
2307 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2308
2309 // This thread should have the filter applied as well.
2310 BlacklistNanosleepPolicy::AssertNanosleepFails();
2311
2312 // Signal the condition to invoke the system call.
2313 event.Signal();
2314
2315 // Wait for the thread to finish.
2316 BPF_ASSERT_EQ(0, pthread_join(thread, NULL));
2317 }
2318
2319 class AllowAllPolicy : public SandboxBPFPolicy {
2320 public:
2321 AllowAllPolicy() : SandboxBPFPolicy() {}
2322 virtual ~AllowAllPolicy() {}
2323
2324 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
2325 int sysno) const OVERRIDE {
2326 return ErrorCode(ErrorCode::ERR_ALLOWED);
2327 }
2328
2329 private:
2330 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy);
2331 };
2332
2333 SANDBOX_DEATH_TEST(SandboxBPF, StartMultiThreadedAsSingleThreaded,
2334 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) {
2335 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded");
2336 BPF_ASSERT(thread.Start());
2337
2338 SandboxBPF sandbox;
2339 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2340 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED));
2341 }
2342
2343 // http://crbug.com/407357
2344 #if !defined(THREAD_SANITIZER)
2345 SANDBOX_DEATH_TEST(SandboxBPF, StartSingleThreadedAsMultiThreaded,
2346 DEATH_MESSAGE("Cannot start sandbox; process may be single-threaded when "
2347 "reported as not")) {
2348 SandboxBPF sandbox;
2349 sandbox.SetSandboxPolicy(new AllowAllPolicy());
2350 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED));
2351 }
2352 #endif // !defined(THREAD_SANITIZER)
2353
2354 // A stub handler for the UnsafeTrap. Never called.
2355 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) {
2356 return -1;
2357 }
2358
2359 class UnsafeTrapWithCondPolicy : public SandboxBPFPolicy {
2360 public:
2361 UnsafeTrapWithCondPolicy() {}
2362 virtual ErrorCode EvaluateSyscall(SandboxBPF* sandbox,
2363 int sysno) const OVERRIDE {
2364 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno));
2365 setenv(kSandboxDebuggingEnv, "t", 0);
2366 Die::SuppressInfoMessages(true);
2367
2368 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno))
2369 return ErrorCode(ErrorCode::ERR_ALLOWED);
2370
2371 switch (sysno) {
2372 case __NR_uname:
2373 return sandbox->Cond(0,
2374 ErrorCode::TP_32BIT,
2375 ErrorCode::OP_EQUAL,
2376 0,
2377 ErrorCode(ErrorCode::ERR_ALLOWED),
2378 ErrorCode(EPERM));
2379 case __NR_setgid:
2380 return sandbox->Cond(0,
2381 ErrorCode::TP_32BIT,
2382 ErrorCode::OP_EQUAL,
2383 100,
2384 ErrorCode(ErrorCode(ENOMEM)),
2385 sandbox->Cond(0,
2386 ErrorCode::TP_32BIT,
2387 ErrorCode::OP_EQUAL,
2388 200,
2389 ErrorCode(ENOSYS),
2390 ErrorCode(EPERM)));
2391 case __NR_close:
2392 case __NR_exit_group:
2393 case __NR_write:
2394 return ErrorCode(ErrorCode::ERR_ALLOWED);
2395 case __NR_getppid:
2396 return sandbox->UnsafeTrap(NoOpHandler, NULL);
2397 default:
2398 return ErrorCode(EPERM);
2399 }
2400 }
2401
2402 private:
2403 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy);
2404 };
2405
2406 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) {
2407 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0));
2408 BPF_ASSERT_EQ(EFAULT, errno);
2409
2410 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1));
2411 BPF_ASSERT_EQ(EPERM, errno);
2412
2413 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100));
2414 BPF_ASSERT_EQ(ENOMEM, errno);
2415
2416 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200));
2417 BPF_ASSERT_EQ(ENOSYS, errno);
2418
2419 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300));
2420 BPF_ASSERT_EQ(EPERM, errno);
2421 }
2422
2423 } // namespace
2424
2425 } // namespace sandbox
OLDNEW
« no previous file with comments | « sandbox/linux/sandbox_linux_test_sources.gypi ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698