OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "sandbox/linux/bpf_dsl/bpf_dsl.h" |
| 6 |
| 7 #include <errno.h> |
| 8 #include <fcntl.h> |
| 9 #include <pthread.h> |
| 10 #include <sched.h> |
| 11 #include <signal.h> |
| 12 #include <sys/prctl.h> |
| 13 #include <sys/ptrace.h> |
| 14 #include <sys/syscall.h> |
| 15 #include <sys/time.h> |
| 16 #include <sys/types.h> |
| 17 #include <sys/utsname.h> |
| 18 #include <unistd.h> |
| 19 #include <sys/socket.h> |
| 20 |
| 21 #if defined(ANDROID) |
| 22 // Work-around for buggy headers in Android's NDK |
| 23 #define __user |
| 24 #endif |
| 25 #include <linux/futex.h> |
| 26 |
| 27 #include "base/bind.h" |
| 28 #include "base/logging.h" |
| 29 #include "base/macros.h" |
| 30 #include "base/memory/scoped_ptr.h" |
| 31 #include "base/posix/eintr_wrapper.h" |
| 32 #include "base/synchronization/waitable_event.h" |
| 33 #include "base/threading/thread.h" |
| 34 #include "build/build_config.h" |
| 35 #include "sandbox/linux/seccomp-bpf/bpf_tests.h" |
| 36 #include "sandbox/linux/seccomp-bpf/die.h" |
| 37 #include "sandbox/linux/seccomp-bpf/errorcode.h" |
| 38 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h" |
| 39 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" |
| 40 #include "sandbox/linux/seccomp-bpf/syscall.h" |
| 41 #include "sandbox/linux/seccomp-bpf/trap.h" |
| 42 #include "sandbox/linux/services/broker_process.h" |
| 43 #include "sandbox/linux/services/linux_syscalls.h" |
| 44 #include "sandbox/linux/tests/scoped_temporary_file.h" |
| 45 #include "sandbox/linux/tests/unit_tests.h" |
| 46 #include "testing/gtest/include/gtest/gtest.h" |
| 47 |
| 48 // Workaround for Android's prctl.h file. |
| 49 #ifndef PR_GET_ENDIAN |
| 50 #define PR_GET_ENDIAN 19 |
| 51 #endif |
| 52 #ifndef PR_CAPBSET_READ |
| 53 #define PR_CAPBSET_READ 23 |
| 54 #define PR_CAPBSET_DROP 24 |
| 55 #endif |
| 56 |
| 57 namespace sandbox { |
| 58 namespace bpf_dsl { |
| 59 |
| 60 namespace { |
| 61 |
| 62 const int kExpectedReturnValue = 42; |
| 63 const char kSandboxDebuggingEnv[] = "CHROME_SANDBOX_DEBUGGING"; |
| 64 |
| 65 // Set the global environment to allow the use of UnsafeTrap() policies. |
| 66 void EnableUnsafeTraps() { |
| 67 // The use of UnsafeTrap() causes us to print a warning message. This is |
| 68 // generally desirable, but it results in the unittest failing, as it doesn't |
| 69 // expect any messages on "stderr". So, temporarily disable messages. The |
| 70 // BPF_TEST() is guaranteed to turn messages back on, after the policy |
| 71 // function has completed. |
| 72 setenv(kSandboxDebuggingEnv, "t", 0); |
| 73 Die::SuppressInfoMessages(true); |
| 74 } |
| 75 |
| 76 // This test should execute no matter whether we have kernel support. So, |
| 77 // we make it a TEST() instead of a BPF_TEST(). |
| 78 TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupports)) { |
| 79 // We check that we don't crash, but it's ok if the kernel doesn't |
| 80 // support it. |
| 81 bool seccomp_bpf_supported = |
| 82 SandboxBPF::SupportsSeccompSandbox(-1) == SandboxBPF::STATUS_AVAILABLE; |
| 83 // We want to log whether or not seccomp BPF is actually supported |
| 84 // since actual test coverage depends on it. |
| 85 RecordProperty("SeccompBPFSupported", |
| 86 seccomp_bpf_supported ? "true." : "false."); |
| 87 std::cout << "Seccomp BPF supported: " |
| 88 << (seccomp_bpf_supported ? "true." : "false.") << "\n"; |
| 89 RecordProperty("PointerSize", sizeof(void*)); |
| 90 std::cout << "Pointer size: " << sizeof(void*) << "\n"; |
| 91 } |
| 92 |
| 93 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(CallSupportsTwice)) { |
| 94 SandboxBPF::SupportsSeccompSandbox(-1); |
| 95 SandboxBPF::SupportsSeccompSandbox(-1); |
| 96 } |
| 97 |
| 98 // BPF_TEST does a lot of the boiler-plate code around setting up a |
| 99 // policy and optional passing data between the caller, the policy and |
| 100 // any Trap() handlers. This is great for writing short and concise tests, |
| 101 // and it helps us accidentally forgetting any of the crucial steps in |
| 102 // setting up the sandbox. But it wouldn't hurt to have at least one test |
| 103 // that explicitly walks through all these steps. |
| 104 |
| 105 intptr_t IncreaseCounter(const struct arch_seccomp_data& args, void* aux) { |
| 106 BPF_ASSERT(aux); |
| 107 int* counter = static_cast<int*>(aux); |
| 108 return (*counter)++; |
| 109 } |
| 110 |
| 111 class VerboseAPITestingPolicy : public SandboxBPFDSLPolicy { |
| 112 public: |
| 113 explicit VerboseAPITestingPolicy(int* counter_ptr) |
| 114 : counter_ptr_(counter_ptr) {} |
| 115 virtual ~VerboseAPITestingPolicy() {} |
| 116 |
| 117 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 118 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 119 if (sysno == __NR_uname) { |
| 120 return Trap(IncreaseCounter, counter_ptr_); |
| 121 } |
| 122 return Allow(); |
| 123 } |
| 124 |
| 125 private: |
| 126 int* counter_ptr_; |
| 127 |
| 128 DISALLOW_COPY_AND_ASSIGN(VerboseAPITestingPolicy); |
| 129 }; |
| 130 |
| 131 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(VerboseAPITesting)) { |
| 132 if (SandboxBPF::SupportsSeccompSandbox(-1) == |
| 133 sandbox::SandboxBPF::STATUS_AVAILABLE) { |
| 134 static int counter = 0; |
| 135 |
| 136 SandboxBPF sandbox; |
| 137 sandbox.SetSandboxPolicy(new VerboseAPITestingPolicy(&counter)); |
| 138 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); |
| 139 |
| 140 BPF_ASSERT_EQ(0, counter); |
| 141 BPF_ASSERT_EQ(0, syscall(__NR_uname, 0)); |
| 142 BPF_ASSERT_EQ(1, counter); |
| 143 BPF_ASSERT_EQ(1, syscall(__NR_uname, 0)); |
| 144 BPF_ASSERT_EQ(2, counter); |
| 145 } |
| 146 } |
| 147 |
| 148 // A simple blacklist test |
| 149 |
| 150 class BlacklistNanosleepPolicy : public SandboxBPFDSLPolicy { |
| 151 public: |
| 152 BlacklistNanosleepPolicy() {} |
| 153 virtual ~BlacklistNanosleepPolicy() {} |
| 154 |
| 155 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 156 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 157 switch (sysno) { |
| 158 case __NR_nanosleep: |
| 159 return Error(EACCES); |
| 160 default: |
| 161 return Allow(); |
| 162 } |
| 163 } |
| 164 |
| 165 static void AssertNanosleepFails() { |
| 166 const struct timespec ts = {0, 0}; |
| 167 errno = 0; |
| 168 BPF_ASSERT_EQ(-1, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); |
| 169 BPF_ASSERT_EQ(EACCES, errno); |
| 170 } |
| 171 |
| 172 private: |
| 173 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepPolicy); |
| 174 }; |
| 175 |
| 176 BPF_TEST_C(SandboxBPF, ApplyBasicBlacklistPolicy, BlacklistNanosleepPolicy) { |
| 177 BlacklistNanosleepPolicy::AssertNanosleepFails(); |
| 178 } |
| 179 |
| 180 // Now do a simple whitelist test |
| 181 |
| 182 class WhitelistGetpidPolicy : public SandboxBPFDSLPolicy { |
| 183 public: |
| 184 WhitelistGetpidPolicy() {} |
| 185 virtual ~WhitelistGetpidPolicy() {} |
| 186 |
| 187 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 188 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 189 switch (sysno) { |
| 190 case __NR_getpid: |
| 191 case __NR_exit_group: |
| 192 return Allow(); |
| 193 default: |
| 194 return Error(ENOMEM); |
| 195 } |
| 196 } |
| 197 |
| 198 private: |
| 199 DISALLOW_COPY_AND_ASSIGN(WhitelistGetpidPolicy); |
| 200 }; |
| 201 |
| 202 BPF_TEST_C(SandboxBPF, ApplyBasicWhitelistPolicy, WhitelistGetpidPolicy) { |
| 203 // getpid() should be allowed |
| 204 errno = 0; |
| 205 BPF_ASSERT(syscall(__NR_getpid) > 0); |
| 206 BPF_ASSERT(errno == 0); |
| 207 |
| 208 // getpgid() should be denied |
| 209 BPF_ASSERT(getpgid(0) == -1); |
| 210 BPF_ASSERT(errno == ENOMEM); |
| 211 } |
| 212 |
| 213 // A simple blacklist policy, with a SIGSYS handler |
| 214 intptr_t EnomemHandler(const struct arch_seccomp_data& args, void* aux) { |
| 215 // We also check that the auxiliary data is correct |
| 216 SANDBOX_ASSERT(aux); |
| 217 *(static_cast<int*>(aux)) = kExpectedReturnValue; |
| 218 return -ENOMEM; |
| 219 } |
| 220 |
| 221 class BlacklistNanosleepTrapPolicy : public SandboxBPFDSLPolicy { |
| 222 public: |
| 223 explicit BlacklistNanosleepTrapPolicy(int* aux) : aux_(aux) {} |
| 224 virtual ~BlacklistNanosleepTrapPolicy() {} |
| 225 |
| 226 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 227 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 228 switch (sysno) { |
| 229 case __NR_nanosleep: |
| 230 return Trap(EnomemHandler, aux_); |
| 231 default: |
| 232 return Allow(); |
| 233 } |
| 234 } |
| 235 |
| 236 private: |
| 237 int* aux_; |
| 238 |
| 239 DISALLOW_COPY_AND_ASSIGN(BlacklistNanosleepTrapPolicy); |
| 240 }; |
| 241 |
| 242 BPF_TEST(SandboxBPF, |
| 243 BasicBlacklistWithSigsys, |
| 244 BlacklistNanosleepTrapPolicy, |
| 245 int /* (*BPF_AUX) */) { |
| 246 // getpid() should work properly |
| 247 errno = 0; |
| 248 BPF_ASSERT(syscall(__NR_getpid) > 0); |
| 249 BPF_ASSERT(errno == 0); |
| 250 |
| 251 // Our Auxiliary Data, should be reset by the signal handler |
| 252 *BPF_AUX = -1; |
| 253 const struct timespec ts = {0, 0}; |
| 254 BPF_ASSERT(syscall(__NR_nanosleep, &ts, NULL) == -1); |
| 255 BPF_ASSERT(errno == ENOMEM); |
| 256 |
| 257 // We expect the signal handler to modify AuxData |
| 258 BPF_ASSERT(*BPF_AUX == kExpectedReturnValue); |
| 259 } |
| 260 |
| 261 // A simple test that verifies we can return arbitrary errno values. |
| 262 |
| 263 class ErrnoTestPolicy : public SandboxBPFDSLPolicy { |
| 264 public: |
| 265 ErrnoTestPolicy() {} |
| 266 virtual ~ErrnoTestPolicy() {} |
| 267 |
| 268 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 269 |
| 270 private: |
| 271 DISALLOW_COPY_AND_ASSIGN(ErrnoTestPolicy); |
| 272 }; |
| 273 |
| 274 ResultExpr ErrnoTestPolicy::EvaluateSyscall(int sysno) const { |
| 275 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 276 switch (sysno) { |
| 277 case __NR_dup3: // dup2 is a wrapper of dup3 in android |
| 278 #if defined(__NR_dup2) |
| 279 case __NR_dup2: |
| 280 #endif |
| 281 // Pretend that dup2() worked, but don't actually do anything. |
| 282 return Error(0); |
| 283 case __NR_setuid: |
| 284 #if defined(__NR_setuid32) |
| 285 case __NR_setuid32: |
| 286 #endif |
| 287 // Return errno = 1. |
| 288 return Error(1); |
| 289 case __NR_setgid: |
| 290 #if defined(__NR_setgid32) |
| 291 case __NR_setgid32: |
| 292 #endif |
| 293 // Return maximum errno value (typically 4095). |
| 294 return Error(ErrorCode::ERR_MAX_ERRNO); |
| 295 case __NR_uname: |
| 296 // Return errno = 42; |
| 297 return Error(42); |
| 298 default: |
| 299 return Allow(); |
| 300 } |
| 301 } |
| 302 |
| 303 BPF_TEST_C(SandboxBPF, ErrnoTest, ErrnoTestPolicy) { |
| 304 // Verify that dup2() returns success, but doesn't actually run. |
| 305 int fds[4]; |
| 306 BPF_ASSERT(pipe(fds) == 0); |
| 307 BPF_ASSERT(pipe(fds + 2) == 0); |
| 308 BPF_ASSERT(dup2(fds[2], fds[0]) == 0); |
| 309 char buf[1] = {}; |
| 310 BPF_ASSERT(write(fds[1], "\x55", 1) == 1); |
| 311 BPF_ASSERT(write(fds[3], "\xAA", 1) == 1); |
| 312 BPF_ASSERT(read(fds[0], buf, 1) == 1); |
| 313 |
| 314 // If dup2() executed, we will read \xAA, but it dup2() has been turned |
| 315 // into a no-op by our policy, then we will read \x55. |
| 316 BPF_ASSERT(buf[0] == '\x55'); |
| 317 |
| 318 // Verify that we can return the minimum and maximum errno values. |
| 319 errno = 0; |
| 320 BPF_ASSERT(setuid(0) == -1); |
| 321 BPF_ASSERT(errno == 1); |
| 322 |
| 323 // On Android, errno is only supported up to 255, otherwise errno |
| 324 // processing is skipped. |
| 325 // We work around this (crbug.com/181647). |
| 326 if (sandbox::IsAndroid() && setgid(0) != -1) { |
| 327 errno = 0; |
| 328 BPF_ASSERT(setgid(0) == -ErrorCode::ERR_MAX_ERRNO); |
| 329 BPF_ASSERT(errno == 0); |
| 330 } else { |
| 331 errno = 0; |
| 332 BPF_ASSERT(setgid(0) == -1); |
| 333 BPF_ASSERT(errno == ErrorCode::ERR_MAX_ERRNO); |
| 334 } |
| 335 |
| 336 // Finally, test an errno in between the minimum and maximum. |
| 337 errno = 0; |
| 338 struct utsname uts_buf; |
| 339 BPF_ASSERT(uname(&uts_buf) == -1); |
| 340 BPF_ASSERT(errno == 42); |
| 341 } |
| 342 |
| 343 // Testing the stacking of two sandboxes |
| 344 |
| 345 class StackingPolicyPartOne : public SandboxBPFDSLPolicy { |
| 346 public: |
| 347 StackingPolicyPartOne() {} |
| 348 virtual ~StackingPolicyPartOne() {} |
| 349 |
| 350 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 351 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 352 switch (sysno) { |
| 353 case __NR_getppid: { |
| 354 const Arg<int> arg(0); |
| 355 return If(arg == 0, Allow()).Else(Error(EPERM)); |
| 356 } |
| 357 default: |
| 358 return Allow(); |
| 359 } |
| 360 } |
| 361 |
| 362 private: |
| 363 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartOne); |
| 364 }; |
| 365 |
| 366 class StackingPolicyPartTwo : public SandboxBPFDSLPolicy { |
| 367 public: |
| 368 StackingPolicyPartTwo() {} |
| 369 virtual ~StackingPolicyPartTwo() {} |
| 370 |
| 371 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 372 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 373 switch (sysno) { |
| 374 case __NR_getppid: { |
| 375 const Arg<int> arg(0); |
| 376 return If(arg == 0, Error(EINVAL)).Else(Allow()); |
| 377 } |
| 378 default: |
| 379 return Allow(); |
| 380 } |
| 381 } |
| 382 |
| 383 private: |
| 384 DISALLOW_COPY_AND_ASSIGN(StackingPolicyPartTwo); |
| 385 }; |
| 386 |
| 387 BPF_TEST_C(SandboxBPF, StackingPolicy, StackingPolicyPartOne) { |
| 388 errno = 0; |
| 389 BPF_ASSERT(syscall(__NR_getppid, 0) > 0); |
| 390 BPF_ASSERT(errno == 0); |
| 391 |
| 392 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); |
| 393 BPF_ASSERT(errno == EPERM); |
| 394 |
| 395 // Stack a second sandbox with its own policy. Verify that we can further |
| 396 // restrict filters, but we cannot relax existing filters. |
| 397 SandboxBPF sandbox; |
| 398 sandbox.SetSandboxPolicy(new StackingPolicyPartTwo()); |
| 399 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); |
| 400 |
| 401 errno = 0; |
| 402 BPF_ASSERT(syscall(__NR_getppid, 0) == -1); |
| 403 BPF_ASSERT(errno == EINVAL); |
| 404 |
| 405 BPF_ASSERT(syscall(__NR_getppid, 1) == -1); |
| 406 BPF_ASSERT(errno == EPERM); |
| 407 } |
| 408 |
| 409 // A more complex, but synthetic policy. This tests the correctness of the BPF |
| 410 // program by iterating through all syscalls and checking for an errno that |
| 411 // depends on the syscall number. Unlike the Verifier, this exercises the BPF |
| 412 // interpreter in the kernel. |
| 413 |
| 414 // We try to make sure we exercise optimizations in the BPF compiler. We make |
| 415 // sure that the compiler can have an opportunity to coalesce syscalls with |
| 416 // contiguous numbers and we also make sure that disjoint sets can return the |
| 417 // same errno. |
| 418 int SysnoToRandomErrno(int sysno) { |
| 419 // Small contiguous sets of 3 system calls return an errno equal to the |
| 420 // index of that set + 1 (so that we never return a NUL errno). |
| 421 return ((sysno & ~3) >> 2) % 29 + 1; |
| 422 } |
| 423 |
| 424 class SyntheticPolicy : public SandboxBPFDSLPolicy { |
| 425 public: |
| 426 SyntheticPolicy() {} |
| 427 virtual ~SyntheticPolicy() {} |
| 428 |
| 429 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 430 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 431 if (sysno == __NR_exit_group || sysno == __NR_write) { |
| 432 // exit_group() is special, we really need it to work. |
| 433 // write() is needed for BPF_ASSERT() to report a useful error message. |
| 434 return Allow(); |
| 435 } |
| 436 return Error(SysnoToRandomErrno(sysno)); |
| 437 } |
| 438 |
| 439 private: |
| 440 DISALLOW_COPY_AND_ASSIGN(SyntheticPolicy); |
| 441 }; |
| 442 |
| 443 BPF_TEST_C(SandboxBPF, SyntheticPolicy, SyntheticPolicy) { |
| 444 // Ensure that that kExpectedReturnValue + syscallnumber + 1 does not int |
| 445 // overflow. |
| 446 BPF_ASSERT(std::numeric_limits<int>::max() - kExpectedReturnValue - 1 >= |
| 447 static_cast<int>(MAX_PUBLIC_SYSCALL)); |
| 448 |
| 449 for (int syscall_number = static_cast<int>(MIN_SYSCALL); |
| 450 syscall_number <= static_cast<int>(MAX_PUBLIC_SYSCALL); |
| 451 ++syscall_number) { |
| 452 if (syscall_number == __NR_exit_group || syscall_number == __NR_write) { |
| 453 // exit_group() is special |
| 454 continue; |
| 455 } |
| 456 errno = 0; |
| 457 BPF_ASSERT(syscall(syscall_number) == -1); |
| 458 BPF_ASSERT(errno == SysnoToRandomErrno(syscall_number)); |
| 459 } |
| 460 } |
| 461 |
| 462 #if defined(__arm__) |
| 463 // A simple policy that tests whether ARM private system calls are supported |
| 464 // by our BPF compiler and by the BPF interpreter in the kernel. |
| 465 |
| 466 // For ARM private system calls, return an errno equal to their offset from |
| 467 // MIN_PRIVATE_SYSCALL plus 1 (to avoid NUL errno). |
| 468 int ArmPrivateSysnoToErrno(int sysno) { |
| 469 if (sysno >= static_cast<int>(MIN_PRIVATE_SYSCALL) && |
| 470 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { |
| 471 return (sysno - MIN_PRIVATE_SYSCALL) + 1; |
| 472 } else { |
| 473 return ENOSYS; |
| 474 } |
| 475 } |
| 476 |
| 477 class ArmPrivatePolicy : public SandboxBPFDSLPolicy { |
| 478 public: |
| 479 ArmPrivatePolicy() {} |
| 480 virtual ~ArmPrivatePolicy() {} |
| 481 |
| 482 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 483 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 484 // Start from |__ARM_NR_set_tls + 1| so as not to mess with actual |
| 485 // ARM private system calls. |
| 486 if (sysno >= static_cast<int>(__ARM_NR_set_tls + 1) && |
| 487 sysno <= static_cast<int>(MAX_PRIVATE_SYSCALL)) { |
| 488 return Error(ArmPrivateSysnoToErrno(sysno)); |
| 489 } |
| 490 return Allow(); |
| 491 } |
| 492 |
| 493 private: |
| 494 DISALLOW_COPY_AND_ASSIGN(ArmPrivatePolicy); |
| 495 }; |
| 496 |
| 497 BPF_TEST_C(SandboxBPF, ArmPrivatePolicy, ArmPrivatePolicy) { |
| 498 for (int syscall_number = static_cast<int>(__ARM_NR_set_tls + 1); |
| 499 syscall_number <= static_cast<int>(MAX_PRIVATE_SYSCALL); |
| 500 ++syscall_number) { |
| 501 errno = 0; |
| 502 BPF_ASSERT(syscall(syscall_number) == -1); |
| 503 BPF_ASSERT(errno == ArmPrivateSysnoToErrno(syscall_number)); |
| 504 } |
| 505 } |
| 506 #endif // defined(__arm__) |
| 507 |
| 508 intptr_t CountSyscalls(const struct arch_seccomp_data& args, void* aux) { |
| 509 // Count all invocations of our callback function. |
| 510 ++*reinterpret_cast<int*>(aux); |
| 511 |
| 512 // Verify that within the callback function all filtering is temporarily |
| 513 // disabled. |
| 514 BPF_ASSERT(syscall(__NR_getpid) > 1); |
| 515 |
| 516 // Verify that we can now call the underlying system call without causing |
| 517 // infinite recursion. |
| 518 return SandboxBPF::ForwardSyscall(args); |
| 519 } |
| 520 |
| 521 class GreyListedPolicy : public SandboxBPFDSLPolicy { |
| 522 public: |
| 523 explicit GreyListedPolicy(int* aux) : aux_(aux) { |
| 524 // Set the global environment for unsafe traps once. |
| 525 EnableUnsafeTraps(); |
| 526 } |
| 527 virtual ~GreyListedPolicy() {} |
| 528 |
| 529 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 530 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 531 // Some system calls must always be allowed, if our policy wants to make |
| 532 // use of UnsafeTrap() |
| 533 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) { |
| 534 return Allow(); |
| 535 } else if (sysno == __NR_getpid) { |
| 536 // Disallow getpid() |
| 537 return Error(EPERM); |
| 538 } else { |
| 539 // Allow (and count) all other system calls. |
| 540 return UnsafeTrap(CountSyscalls, aux_); |
| 541 } |
| 542 } |
| 543 |
| 544 private: |
| 545 int* aux_; |
| 546 |
| 547 DISALLOW_COPY_AND_ASSIGN(GreyListedPolicy); |
| 548 }; |
| 549 |
| 550 BPF_TEST(SandboxBPF, GreyListedPolicy, GreyListedPolicy, int /* (*BPF_AUX) */) { |
| 551 BPF_ASSERT(syscall(__NR_getpid) == -1); |
| 552 BPF_ASSERT(errno == EPERM); |
| 553 BPF_ASSERT(*BPF_AUX == 0); |
| 554 BPF_ASSERT(syscall(__NR_geteuid) == syscall(__NR_getuid)); |
| 555 BPF_ASSERT(*BPF_AUX == 2); |
| 556 char name[17] = {}; |
| 557 BPF_ASSERT(!syscall(__NR_prctl, |
| 558 PR_GET_NAME, |
| 559 name, |
| 560 (void*)NULL, |
| 561 (void*)NULL, |
| 562 (void*)NULL)); |
| 563 BPF_ASSERT(*BPF_AUX == 3); |
| 564 BPF_ASSERT(*name); |
| 565 } |
| 566 |
| 567 SANDBOX_TEST(SandboxBPF, EnableUnsafeTrapsInSigSysHandler) { |
| 568 // Disabling warning messages that could confuse our test framework. |
| 569 setenv(kSandboxDebuggingEnv, "t", 0); |
| 570 Die::SuppressInfoMessages(true); |
| 571 |
| 572 unsetenv(kSandboxDebuggingEnv); |
| 573 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); |
| 574 setenv(kSandboxDebuggingEnv, "", 1); |
| 575 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == false); |
| 576 setenv(kSandboxDebuggingEnv, "t", 1); |
| 577 SANDBOX_ASSERT(Trap::EnableUnsafeTrapsInSigSysHandler() == true); |
| 578 } |
| 579 |
| 580 intptr_t PrctlHandler(const struct arch_seccomp_data& args, void*) { |
| 581 if (args.args[0] == PR_CAPBSET_DROP && static_cast<int>(args.args[1]) == -1) { |
| 582 // prctl(PR_CAPBSET_DROP, -1) is never valid. The kernel will always |
| 583 // return an error. But our handler allows this call. |
| 584 return 0; |
| 585 } else { |
| 586 return SandboxBPF::ForwardSyscall(args); |
| 587 } |
| 588 } |
| 589 |
| 590 class PrctlPolicy : public SandboxBPFDSLPolicy { |
| 591 public: |
| 592 PrctlPolicy() {} |
| 593 virtual ~PrctlPolicy() {} |
| 594 |
| 595 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 596 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 597 setenv(kSandboxDebuggingEnv, "t", 0); |
| 598 Die::SuppressInfoMessages(true); |
| 599 |
| 600 if (sysno == __NR_prctl) { |
| 601 // Handle prctl() inside an UnsafeTrap() |
| 602 return UnsafeTrap(PrctlHandler, NULL); |
| 603 } |
| 604 |
| 605 // Allow all other system calls. |
| 606 return Allow(); |
| 607 } |
| 608 |
| 609 private: |
| 610 DISALLOW_COPY_AND_ASSIGN(PrctlPolicy); |
| 611 }; |
| 612 |
| 613 BPF_TEST_C(SandboxBPF, ForwardSyscall, PrctlPolicy) { |
| 614 // This call should never be allowed. But our policy will intercept it and |
| 615 // let it pass successfully. |
| 616 BPF_ASSERT( |
| 617 !prctl(PR_CAPBSET_DROP, -1, (void*)NULL, (void*)NULL, (void*)NULL)); |
| 618 |
| 619 // Verify that the call will fail, if it makes it all the way to the kernel. |
| 620 BPF_ASSERT( |
| 621 prctl(PR_CAPBSET_DROP, -2, (void*)NULL, (void*)NULL, (void*)NULL) == -1); |
| 622 |
| 623 // And verify that other uses of prctl() work just fine. |
| 624 char name[17] = {}; |
| 625 BPF_ASSERT(!syscall(__NR_prctl, |
| 626 PR_GET_NAME, |
| 627 name, |
| 628 (void*)NULL, |
| 629 (void*)NULL, |
| 630 (void*)NULL)); |
| 631 BPF_ASSERT(*name); |
| 632 |
| 633 // Finally, verify that system calls other than prctl() are completely |
| 634 // unaffected by our policy. |
| 635 struct utsname uts = {}; |
| 636 BPF_ASSERT(!uname(&uts)); |
| 637 BPF_ASSERT(!strcmp(uts.sysname, "Linux")); |
| 638 } |
| 639 |
| 640 intptr_t AllowRedirectedSyscall(const struct arch_seccomp_data& args, void*) { |
| 641 return SandboxBPF::ForwardSyscall(args); |
| 642 } |
| 643 |
| 644 class RedirectAllSyscallsPolicy : public SandboxBPFDSLPolicy { |
| 645 public: |
| 646 RedirectAllSyscallsPolicy() {} |
| 647 virtual ~RedirectAllSyscallsPolicy() {} |
| 648 |
| 649 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 650 |
| 651 private: |
| 652 DISALLOW_COPY_AND_ASSIGN(RedirectAllSyscallsPolicy); |
| 653 }; |
| 654 |
| 655 ResultExpr RedirectAllSyscallsPolicy::EvaluateSyscall(int sysno) const { |
| 656 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 657 setenv(kSandboxDebuggingEnv, "t", 0); |
| 658 Die::SuppressInfoMessages(true); |
| 659 |
| 660 // Some system calls must always be allowed, if our policy wants to make |
| 661 // use of UnsafeTrap() |
| 662 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) |
| 663 return Allow(); |
| 664 return UnsafeTrap(AllowRedirectedSyscall, NULL); |
| 665 } |
| 666 |
| 667 int bus_handler_fd_ = -1; |
| 668 |
| 669 void SigBusHandler(int, siginfo_t* info, void* void_context) { |
| 670 BPF_ASSERT(write(bus_handler_fd_, "\x55", 1) == 1); |
| 671 } |
| 672 |
| 673 BPF_TEST_C(SandboxBPF, SigBus, RedirectAllSyscallsPolicy) { |
| 674 // We use the SIGBUS bit in the signal mask as a thread-local boolean |
| 675 // value in the implementation of UnsafeTrap(). This is obviously a bit |
| 676 // of a hack that could conceivably interfere with code that uses SIGBUS |
| 677 // in more traditional ways. This test verifies that basic functionality |
| 678 // of SIGBUS is not impacted, but it is certainly possibly to construe |
| 679 // more complex uses of signals where our use of the SIGBUS mask is not |
| 680 // 100% transparent. This is expected behavior. |
| 681 int fds[2]; |
| 682 BPF_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, fds) == 0); |
| 683 bus_handler_fd_ = fds[1]; |
| 684 struct sigaction sa = {}; |
| 685 sa.sa_sigaction = SigBusHandler; |
| 686 sa.sa_flags = SA_SIGINFO; |
| 687 BPF_ASSERT(sigaction(SIGBUS, &sa, NULL) == 0); |
| 688 raise(SIGBUS); |
| 689 char c = '\000'; |
| 690 BPF_ASSERT(read(fds[0], &c, 1) == 1); |
| 691 BPF_ASSERT(close(fds[0]) == 0); |
| 692 BPF_ASSERT(close(fds[1]) == 0); |
| 693 BPF_ASSERT(c == 0x55); |
| 694 } |
| 695 |
| 696 BPF_TEST_C(SandboxBPF, SigMask, RedirectAllSyscallsPolicy) { |
| 697 // Signal masks are potentially tricky to handle. For instance, if we |
| 698 // ever tried to update them from inside a Trap() or UnsafeTrap() handler, |
| 699 // the call to sigreturn() at the end of the signal handler would undo |
| 700 // all of our efforts. So, it makes sense to test that sigprocmask() |
| 701 // works, even if we have a policy in place that makes use of UnsafeTrap(). |
| 702 // In practice, this works because we force sigprocmask() to be handled |
| 703 // entirely in the kernel. |
| 704 sigset_t mask0, mask1, mask2; |
| 705 |
| 706 // Call sigprocmask() to verify that SIGUSR2 wasn't blocked, if we didn't |
| 707 // change the mask (it shouldn't have been, as it isn't blocked by default |
| 708 // in POSIX). |
| 709 // |
| 710 // Use SIGUSR2 because Android seems to use SIGUSR1 for some purpose. |
| 711 sigemptyset(&mask0); |
| 712 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, &mask1)); |
| 713 BPF_ASSERT(!sigismember(&mask1, SIGUSR2)); |
| 714 |
| 715 // Try again, and this time we verify that we can block it. This |
| 716 // requires a second call to sigprocmask(). |
| 717 sigaddset(&mask0, SIGUSR2); |
| 718 BPF_ASSERT(!sigprocmask(SIG_BLOCK, &mask0, NULL)); |
| 719 BPF_ASSERT(!sigprocmask(SIG_BLOCK, NULL, &mask2)); |
| 720 BPF_ASSERT(sigismember(&mask2, SIGUSR2)); |
| 721 } |
| 722 |
| 723 BPF_TEST_C(SandboxBPF, UnsafeTrapWithErrno, RedirectAllSyscallsPolicy) { |
| 724 // An UnsafeTrap() (or for that matter, a Trap()) has to report error |
| 725 // conditions by returning an exit code in the range -1..-4096. This |
| 726 // should happen automatically if using ForwardSyscall(). If the TrapFnc() |
| 727 // uses some other method to make system calls, then it is responsible |
| 728 // for computing the correct return code. |
| 729 // This test verifies that ForwardSyscall() does the correct thing. |
| 730 |
| 731 // The glibc system wrapper will ultimately set errno for us. So, from normal |
| 732 // userspace, all of this should be completely transparent. |
| 733 errno = 0; |
| 734 BPF_ASSERT(close(-1) == -1); |
| 735 BPF_ASSERT(errno == EBADF); |
| 736 |
| 737 // Explicitly avoid the glibc wrapper. This is not normally the way anybody |
| 738 // would make system calls, but it allows us to verify that we don't |
| 739 // accidentally mess with errno, when we shouldn't. |
| 740 errno = 0; |
| 741 struct arch_seccomp_data args = {}; |
| 742 args.nr = __NR_close; |
| 743 args.args[0] = -1; |
| 744 BPF_ASSERT(SandboxBPF::ForwardSyscall(args) == -EBADF); |
| 745 BPF_ASSERT(errno == 0); |
| 746 } |
| 747 |
| 748 bool NoOpCallback() { |
| 749 return true; |
| 750 } |
| 751 |
| 752 // Test a trap handler that makes use of a broker process to open(). |
| 753 |
| 754 class InitializedOpenBroker { |
| 755 public: |
| 756 InitializedOpenBroker() : initialized_(false) { |
| 757 std::vector<std::string> allowed_files; |
| 758 allowed_files.push_back("/proc/allowed"); |
| 759 allowed_files.push_back("/proc/cpuinfo"); |
| 760 |
| 761 broker_process_.reset( |
| 762 new BrokerProcess(EPERM, allowed_files, std::vector<std::string>())); |
| 763 BPF_ASSERT(broker_process() != NULL); |
| 764 BPF_ASSERT(broker_process_->Init(base::Bind(&NoOpCallback))); |
| 765 |
| 766 initialized_ = true; |
| 767 } |
| 768 bool initialized() { return initialized_; } |
| 769 class BrokerProcess* broker_process() { return broker_process_.get(); } |
| 770 |
| 771 private: |
| 772 bool initialized_; |
| 773 scoped_ptr<class BrokerProcess> broker_process_; |
| 774 DISALLOW_COPY_AND_ASSIGN(InitializedOpenBroker); |
| 775 }; |
| 776 |
| 777 intptr_t BrokerOpenTrapHandler(const struct arch_seccomp_data& args, |
| 778 void* aux) { |
| 779 BPF_ASSERT(aux); |
| 780 BrokerProcess* broker_process = static_cast<BrokerProcess*>(aux); |
| 781 switch (args.nr) { |
| 782 case __NR_faccessat: // access is a wrapper of faccessat in android |
| 783 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); |
| 784 return broker_process->Access(reinterpret_cast<const char*>(args.args[1]), |
| 785 static_cast<int>(args.args[2])); |
| 786 #if defined(__NR_access) |
| 787 case __NR_access: |
| 788 return broker_process->Access(reinterpret_cast<const char*>(args.args[0]), |
| 789 static_cast<int>(args.args[1])); |
| 790 #endif |
| 791 #if defined(__NR_open) |
| 792 case __NR_open: |
| 793 return broker_process->Open(reinterpret_cast<const char*>(args.args[0]), |
| 794 static_cast<int>(args.args[1])); |
| 795 #endif |
| 796 case __NR_openat: |
| 797 // We only call open() so if we arrive here, it's because glibc uses |
| 798 // the openat() system call. |
| 799 BPF_ASSERT(static_cast<int>(args.args[0]) == AT_FDCWD); |
| 800 return broker_process->Open(reinterpret_cast<const char*>(args.args[1]), |
| 801 static_cast<int>(args.args[2])); |
| 802 default: |
| 803 BPF_ASSERT(false); |
| 804 return -ENOSYS; |
| 805 } |
| 806 } |
| 807 |
| 808 class DenyOpenPolicy : public SandboxBPFDSLPolicy { |
| 809 public: |
| 810 explicit DenyOpenPolicy(InitializedOpenBroker* iob) : iob_(iob) {} |
| 811 virtual ~DenyOpenPolicy() {} |
| 812 |
| 813 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 814 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 815 |
| 816 switch (sysno) { |
| 817 case __NR_faccessat: |
| 818 #if defined(__NR_access) |
| 819 case __NR_access: |
| 820 #endif |
| 821 #if defined(__NR_open) |
| 822 case __NR_open: |
| 823 #endif |
| 824 case __NR_openat: |
| 825 // We get a InitializedOpenBroker class, but our trap handler wants |
| 826 // the BrokerProcess object. |
| 827 return Trap(BrokerOpenTrapHandler, iob_->broker_process()); |
| 828 default: |
| 829 return Allow(); |
| 830 } |
| 831 } |
| 832 |
| 833 private: |
| 834 InitializedOpenBroker* iob_; |
| 835 |
| 836 DISALLOW_COPY_AND_ASSIGN(DenyOpenPolicy); |
| 837 }; |
| 838 |
| 839 // We use a InitializedOpenBroker class, so that we can run unsandboxed |
| 840 // code in its constructor, which is the only way to do so in a BPF_TEST. |
| 841 BPF_TEST(SandboxBPF, |
| 842 UseOpenBroker, |
| 843 DenyOpenPolicy, |
| 844 InitializedOpenBroker /* (*BPF_AUX) */) { |
| 845 BPF_ASSERT(BPF_AUX->initialized()); |
| 846 BrokerProcess* broker_process = BPF_AUX->broker_process(); |
| 847 BPF_ASSERT(broker_process != NULL); |
| 848 |
| 849 // First, use the broker "manually" |
| 850 BPF_ASSERT(broker_process->Open("/proc/denied", O_RDONLY) == -EPERM); |
| 851 BPF_ASSERT(broker_process->Access("/proc/denied", R_OK) == -EPERM); |
| 852 BPF_ASSERT(broker_process->Open("/proc/allowed", O_RDONLY) == -ENOENT); |
| 853 BPF_ASSERT(broker_process->Access("/proc/allowed", R_OK) == -ENOENT); |
| 854 |
| 855 // Now use glibc's open() as an external library would. |
| 856 BPF_ASSERT(open("/proc/denied", O_RDONLY) == -1); |
| 857 BPF_ASSERT(errno == EPERM); |
| 858 |
| 859 BPF_ASSERT(open("/proc/allowed", O_RDONLY) == -1); |
| 860 BPF_ASSERT(errno == ENOENT); |
| 861 |
| 862 // Also test glibc's openat(), some versions of libc use it transparently |
| 863 // instead of open(). |
| 864 BPF_ASSERT(openat(AT_FDCWD, "/proc/denied", O_RDONLY) == -1); |
| 865 BPF_ASSERT(errno == EPERM); |
| 866 |
| 867 BPF_ASSERT(openat(AT_FDCWD, "/proc/allowed", O_RDONLY) == -1); |
| 868 BPF_ASSERT(errno == ENOENT); |
| 869 |
| 870 // And test glibc's access(). |
| 871 BPF_ASSERT(access("/proc/denied", R_OK) == -1); |
| 872 BPF_ASSERT(errno == EPERM); |
| 873 |
| 874 BPF_ASSERT(access("/proc/allowed", R_OK) == -1); |
| 875 BPF_ASSERT(errno == ENOENT); |
| 876 |
| 877 // This is also white listed and does exist. |
| 878 int cpu_info_access = access("/proc/cpuinfo", R_OK); |
| 879 BPF_ASSERT(cpu_info_access == 0); |
| 880 int cpu_info_fd = open("/proc/cpuinfo", O_RDONLY); |
| 881 BPF_ASSERT(cpu_info_fd >= 0); |
| 882 char buf[1024]; |
| 883 BPF_ASSERT(read(cpu_info_fd, buf, sizeof(buf)) > 0); |
| 884 } |
| 885 |
| 886 // Simple test demonstrating how to use SandboxBPF::Cond() |
| 887 |
| 888 class SimpleCondTestPolicy : public SandboxBPFDSLPolicy { |
| 889 public: |
| 890 SimpleCondTestPolicy() {} |
| 891 virtual ~SimpleCondTestPolicy() {} |
| 892 |
| 893 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 894 |
| 895 private: |
| 896 DISALLOW_COPY_AND_ASSIGN(SimpleCondTestPolicy); |
| 897 }; |
| 898 |
| 899 ResultExpr SimpleCondTestPolicy::EvaluateSyscall(int sysno) const { |
| 900 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 901 |
| 902 // We deliberately return unusual errno values upon failure, so that we |
| 903 // can uniquely test for these values. In a "real" policy, you would want |
| 904 // to return more traditional values. |
| 905 int flags_argument_position = -1; |
| 906 switch (sysno) { |
| 907 #if defined(__NR_open) |
| 908 case __NR_open: |
| 909 flags_argument_position = 1; |
| 910 #endif |
| 911 case __NR_openat: { // open can be a wrapper for openat(2). |
| 912 if (sysno == __NR_openat) |
| 913 flags_argument_position = 2; |
| 914 |
| 915 // Allow opening files for reading, but don't allow writing. |
| 916 COMPILE_ASSERT(O_RDONLY == 0, O_RDONLY_must_be_all_zero_bits); |
| 917 const Arg<int> flags(flags_argument_position); |
| 918 return If((flags & O_ACCMODE) != 0, Error(EROFS)).Else(Allow()); |
| 919 } |
| 920 case __NR_prctl: { |
| 921 // Allow prctl(PR_SET_DUMPABLE) and prctl(PR_GET_DUMPABLE), but |
| 922 // disallow everything else. |
| 923 const Arg<int> option(0); |
| 924 return If(option == PR_SET_DUMPABLE || option == PR_GET_DUMPABLE, Allow()) |
| 925 .Else(Error(ENOMEM)); |
| 926 } |
| 927 default: |
| 928 return Allow(); |
| 929 } |
| 930 } |
| 931 |
| 932 BPF_TEST_C(SandboxBPF, SimpleCondTest, SimpleCondTestPolicy) { |
| 933 int fd; |
| 934 BPF_ASSERT((fd = open("/proc/self/comm", O_RDWR)) == -1); |
| 935 BPF_ASSERT(errno == EROFS); |
| 936 BPF_ASSERT((fd = open("/proc/self/comm", O_RDONLY)) >= 0); |
| 937 close(fd); |
| 938 |
| 939 int ret; |
| 940 BPF_ASSERT((ret = prctl(PR_GET_DUMPABLE)) >= 0); |
| 941 BPF_ASSERT(prctl(PR_SET_DUMPABLE, 1 - ret) == 0); |
| 942 BPF_ASSERT(prctl(PR_GET_ENDIAN, &ret) == -1); |
| 943 BPF_ASSERT(errno == ENOMEM); |
| 944 } |
| 945 |
| 946 // This test exercises the SandboxBPF::Cond() method by building a complex |
| 947 // tree of conditional equality operations. It then makes system calls and |
| 948 // verifies that they return the values that we expected from our BPF |
| 949 // program. |
| 950 class EqualityStressTest { |
| 951 public: |
| 952 EqualityStressTest() { |
| 953 // We want a deterministic test |
| 954 srand(0); |
| 955 |
| 956 // Iterates over system call numbers and builds a random tree of |
| 957 // equality tests. |
| 958 // We are actually constructing a graph of ArgValue objects. This |
| 959 // graph will later be used to a) compute our sandbox policy, and |
| 960 // b) drive the code that verifies the output from the BPF program. |
| 961 COMPILE_ASSERT( |
| 962 kNumTestCases < (int)(MAX_PUBLIC_SYSCALL - MIN_SYSCALL - 10), |
| 963 num_test_cases_must_be_significantly_smaller_than_num_system_calls); |
| 964 for (int sysno = MIN_SYSCALL, end = kNumTestCases; sysno < end; ++sysno) { |
| 965 if (IsReservedSyscall(sysno)) { |
| 966 // Skip reserved system calls. This ensures that our test frame |
| 967 // work isn't impacted by the fact that we are overriding |
| 968 // a lot of different system calls. |
| 969 ++end; |
| 970 arg_values_.push_back(NULL); |
| 971 } else { |
| 972 arg_values_.push_back( |
| 973 RandomArgValue(rand() % kMaxArgs, 0, rand() % kMaxArgs)); |
| 974 } |
| 975 } |
| 976 } |
| 977 |
| 978 ~EqualityStressTest() { |
| 979 for (std::vector<ArgValue*>::iterator iter = arg_values_.begin(); |
| 980 iter != arg_values_.end(); |
| 981 ++iter) { |
| 982 DeleteArgValue(*iter); |
| 983 } |
| 984 } |
| 985 |
| 986 ResultExpr Policy(int sysno) { |
| 987 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 988 if (sysno < 0 || sysno >= (int)arg_values_.size() || |
| 989 IsReservedSyscall(sysno)) { |
| 990 // We only return ErrorCode values for the system calls that |
| 991 // are part of our test data. Every other system call remains |
| 992 // allowed. |
| 993 return Allow(); |
| 994 } else { |
| 995 // ToErrorCode() turns an ArgValue object into an ErrorCode that is |
| 996 // suitable for use by a sandbox policy. |
| 997 return ToErrorCode(arg_values_[sysno]); |
| 998 } |
| 999 } |
| 1000 |
| 1001 void VerifyFilter() { |
| 1002 // Iterate over all system calls. Skip the system calls that have |
| 1003 // previously been determined as being reserved. |
| 1004 for (int sysno = 0; sysno < (int)arg_values_.size(); ++sysno) { |
| 1005 if (!arg_values_[sysno]) { |
| 1006 // Skip reserved system calls. |
| 1007 continue; |
| 1008 } |
| 1009 // Verify that system calls return the values that we expect them to |
| 1010 // return. This involves passing different combinations of system call |
| 1011 // parameters in order to exercise all possible code paths through the |
| 1012 // BPF filter program. |
| 1013 // We arbitrarily start by setting all six system call arguments to |
| 1014 // zero. And we then recursive traverse our tree of ArgValues to |
| 1015 // determine the necessary combinations of parameters. |
| 1016 intptr_t args[6] = {}; |
| 1017 Verify(sysno, args, *arg_values_[sysno]); |
| 1018 } |
| 1019 } |
| 1020 |
| 1021 private: |
| 1022 struct ArgValue { |
| 1023 int argno; // Argument number to inspect. |
| 1024 int size; // Number of test cases (must be > 0). |
| 1025 struct Tests { |
| 1026 uint32_t k_value; // Value to compare syscall arg against. |
| 1027 int err; // If non-zero, errno value to return. |
| 1028 struct ArgValue* arg_value; // Otherwise, more args needs inspecting. |
| 1029 }* tests; |
| 1030 int err; // If none of the tests passed, this is what |
| 1031 struct ArgValue* arg_value; // we'll return (this is the "else" branch). |
| 1032 }; |
| 1033 |
| 1034 bool IsReservedSyscall(int sysno) { |
| 1035 // There are a handful of system calls that we should never use in our |
| 1036 // test cases. These system calls are needed to allow the test framework |
| 1037 // to run properly. |
| 1038 // If we wanted to write fully generic code, there are more system calls |
| 1039 // that could be listed here, and it is quite difficult to come up with a |
| 1040 // truly comprehensive list. After all, we are deliberately making system |
| 1041 // calls unavailable. In practice, we have a pretty good idea of the system |
| 1042 // calls that will be made by this particular test. So, this small list is |
| 1043 // sufficient. But if anybody copy'n'pasted this code for other uses, they |
| 1044 // would have to review that the list. |
| 1045 return sysno == __NR_read || sysno == __NR_write || sysno == __NR_exit || |
| 1046 sysno == __NR_exit_group || sysno == __NR_restart_syscall; |
| 1047 } |
| 1048 |
| 1049 ArgValue* RandomArgValue(int argno, int args_mask, int remaining_args) { |
| 1050 // Create a new ArgValue and fill it with random data. We use as bit mask |
| 1051 // to keep track of the system call parameters that have previously been |
| 1052 // set; this ensures that we won't accidentally define a contradictory |
| 1053 // set of equality tests. |
| 1054 struct ArgValue* arg_value = new ArgValue(); |
| 1055 args_mask |= 1 << argno; |
| 1056 arg_value->argno = argno; |
| 1057 |
| 1058 // Apply some restrictions on just how complex our tests can be. |
| 1059 // Otherwise, we end up with a BPF program that is too complicated for |
| 1060 // the kernel to load. |
| 1061 int fan_out = kMaxFanOut; |
| 1062 if (remaining_args > 3) { |
| 1063 fan_out = 1; |
| 1064 } else if (remaining_args > 2) { |
| 1065 fan_out = 2; |
| 1066 } |
| 1067 |
| 1068 // Create a couple of different test cases with randomized values that |
| 1069 // we want to use when comparing system call parameter number "argno". |
| 1070 arg_value->size = rand() % fan_out + 1; |
| 1071 arg_value->tests = new ArgValue::Tests[arg_value->size]; |
| 1072 |
| 1073 uint32_t k_value = rand(); |
| 1074 for (int n = 0; n < arg_value->size; ++n) { |
| 1075 // Ensure that we have unique values |
| 1076 k_value += rand() % (RAND_MAX / (kMaxFanOut + 1)) + 1; |
| 1077 |
| 1078 // There are two possible types of nodes. Either this is a leaf node; |
| 1079 // in that case, we have completed all the equality tests that we |
| 1080 // wanted to perform, and we can now compute a random "errno" value that |
| 1081 // we should return. Or this is part of a more complex boolean |
| 1082 // expression; in that case, we have to recursively add tests for some |
| 1083 // of system call parameters that we have not yet included in our |
| 1084 // tests. |
| 1085 arg_value->tests[n].k_value = k_value; |
| 1086 if (!remaining_args || (rand() & 1)) { |
| 1087 arg_value->tests[n].err = (rand() % 1000) + 1; |
| 1088 arg_value->tests[n].arg_value = NULL; |
| 1089 } else { |
| 1090 arg_value->tests[n].err = 0; |
| 1091 arg_value->tests[n].arg_value = |
| 1092 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); |
| 1093 } |
| 1094 } |
| 1095 // Finally, we have to define what we should return if none of the |
| 1096 // previous equality tests pass. Again, we can either deal with a leaf |
| 1097 // node, or we can randomly add another couple of tests. |
| 1098 if (!remaining_args || (rand() & 1)) { |
| 1099 arg_value->err = (rand() % 1000) + 1; |
| 1100 arg_value->arg_value = NULL; |
| 1101 } else { |
| 1102 arg_value->err = 0; |
| 1103 arg_value->arg_value = |
| 1104 RandomArgValue(RandomArg(args_mask), args_mask, remaining_args - 1); |
| 1105 } |
| 1106 // We have now built a new (sub-)tree of ArgValues defining a set of |
| 1107 // boolean expressions for testing random system call arguments against |
| 1108 // random values. Return this tree to our caller. |
| 1109 return arg_value; |
| 1110 } |
| 1111 |
| 1112 int RandomArg(int args_mask) { |
| 1113 // Compute a random system call parameter number. |
| 1114 int argno = rand() % kMaxArgs; |
| 1115 |
| 1116 // Make sure that this same parameter number has not previously been |
| 1117 // used. Otherwise, we could end up with a test that is impossible to |
| 1118 // satisfy (e.g. args[0] == 1 && args[0] == 2). |
| 1119 while (args_mask & (1 << argno)) { |
| 1120 argno = (argno + 1) % kMaxArgs; |
| 1121 } |
| 1122 return argno; |
| 1123 } |
| 1124 |
| 1125 void DeleteArgValue(ArgValue* arg_value) { |
| 1126 // Delete an ArgValue and all of its child nodes. This requires |
| 1127 // recursively descending into the tree. |
| 1128 if (arg_value) { |
| 1129 if (arg_value->size) { |
| 1130 for (int n = 0; n < arg_value->size; ++n) { |
| 1131 if (!arg_value->tests[n].err) { |
| 1132 DeleteArgValue(arg_value->tests[n].arg_value); |
| 1133 } |
| 1134 } |
| 1135 delete[] arg_value->tests; |
| 1136 } |
| 1137 if (!arg_value->err) { |
| 1138 DeleteArgValue(arg_value->arg_value); |
| 1139 } |
| 1140 delete arg_value; |
| 1141 } |
| 1142 } |
| 1143 |
| 1144 ResultExpr ToErrorCode(ArgValue* arg_value) { |
| 1145 // Compute the ResultExpr that should be returned, if none of our |
| 1146 // tests succeed (i.e. the system call parameter doesn't match any |
| 1147 // of the values in arg_value->tests[].k_value). |
| 1148 ResultExpr err; |
| 1149 if (arg_value->err) { |
| 1150 // If this was a leaf node, return the errno value that we expect to |
| 1151 // return from the BPF filter program. |
| 1152 err = Error(arg_value->err); |
| 1153 } else { |
| 1154 // If this wasn't a leaf node yet, recursively descend into the rest |
| 1155 // of the tree. This will end up adding a few more SandboxBPF::Cond() |
| 1156 // tests to our ErrorCode. |
| 1157 err = ToErrorCode(arg_value->arg_value); |
| 1158 } |
| 1159 |
| 1160 // Now, iterate over all the test cases that we want to compare against. |
| 1161 // This builds a chain of SandboxBPF::Cond() tests |
| 1162 // (aka "if ... elif ... elif ... elif ... fi") |
| 1163 for (int n = arg_value->size; n-- > 0;) { |
| 1164 ResultExpr matched; |
| 1165 // Again, we distinguish between leaf nodes and subtrees. |
| 1166 if (arg_value->tests[n].err) { |
| 1167 matched = Error(arg_value->tests[n].err); |
| 1168 } else { |
| 1169 matched = ToErrorCode(arg_value->tests[n].arg_value); |
| 1170 } |
| 1171 // For now, all of our tests are limited to 32bit. |
| 1172 // We have separate tests that check the behavior of 32bit vs. 64bit |
| 1173 // conditional expressions. |
| 1174 const Arg<uint32_t> arg(arg_value->argno); |
| 1175 err = If(arg == arg_value->tests[n].k_value, matched).Else(err); |
| 1176 } |
| 1177 return err; |
| 1178 } |
| 1179 |
| 1180 void Verify(int sysno, intptr_t* args, const ArgValue& arg_value) { |
| 1181 uint32_t mismatched = 0; |
| 1182 // Iterate over all the k_values in arg_value.tests[] and verify that |
| 1183 // we see the expected return values from system calls, when we pass |
| 1184 // the k_value as a parameter in a system call. |
| 1185 for (int n = arg_value.size; n-- > 0;) { |
| 1186 mismatched += arg_value.tests[n].k_value; |
| 1187 args[arg_value.argno] = arg_value.tests[n].k_value; |
| 1188 if (arg_value.tests[n].err) { |
| 1189 VerifyErrno(sysno, args, arg_value.tests[n].err); |
| 1190 } else { |
| 1191 Verify(sysno, args, *arg_value.tests[n].arg_value); |
| 1192 } |
| 1193 } |
| 1194 // Find a k_value that doesn't match any of the k_values in |
| 1195 // arg_value.tests[]. In most cases, the current value of "mismatched" |
| 1196 // would fit this requirement. But on the off-chance that it happens |
| 1197 // to collide, we double-check. |
| 1198 try_again: |
| 1199 for (int n = arg_value.size; n-- > 0;) { |
| 1200 if (mismatched == arg_value.tests[n].k_value) { |
| 1201 ++mismatched; |
| 1202 goto try_again; |
| 1203 } |
| 1204 } |
| 1205 // Now verify that we see the expected return value from system calls, |
| 1206 // if we pass a value that doesn't match any of the conditions (i.e. this |
| 1207 // is testing the "else" clause of the conditions). |
| 1208 args[arg_value.argno] = mismatched; |
| 1209 if (arg_value.err) { |
| 1210 VerifyErrno(sysno, args, arg_value.err); |
| 1211 } else { |
| 1212 Verify(sysno, args, *arg_value.arg_value); |
| 1213 } |
| 1214 // Reset args[arg_value.argno]. This is not technically needed, but it |
| 1215 // makes it easier to reason about the correctness of our tests. |
| 1216 args[arg_value.argno] = 0; |
| 1217 } |
| 1218 |
| 1219 void VerifyErrno(int sysno, intptr_t* args, int err) { |
| 1220 // We installed BPF filters that return different errno values |
| 1221 // based on the system call number and the parameters that we decided |
| 1222 // to pass in. Verify that this condition holds true. |
| 1223 BPF_ASSERT( |
| 1224 Syscall::Call( |
| 1225 sysno, args[0], args[1], args[2], args[3], args[4], args[5]) == |
| 1226 -err); |
| 1227 } |
| 1228 |
| 1229 // Vector of ArgValue trees. These trees define all the possible boolean |
| 1230 // expressions that we want to turn into a BPF filter program. |
| 1231 std::vector<ArgValue*> arg_values_; |
| 1232 |
| 1233 // Don't increase these values. We are pushing the limits of the maximum |
| 1234 // BPF program that the kernel will allow us to load. If the values are |
| 1235 // increased too much, the test will start failing. |
| 1236 #if defined(__aarch64__) |
| 1237 static const int kNumTestCases = 30; |
| 1238 #else |
| 1239 static const int kNumTestCases = 40; |
| 1240 #endif |
| 1241 static const int kMaxFanOut = 3; |
| 1242 static const int kMaxArgs = 6; |
| 1243 }; |
| 1244 |
| 1245 class EqualityStressTestPolicy : public SandboxBPFDSLPolicy { |
| 1246 public: |
| 1247 explicit EqualityStressTestPolicy(EqualityStressTest* aux) : aux_(aux) {} |
| 1248 virtual ~EqualityStressTestPolicy() {} |
| 1249 |
| 1250 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 1251 return aux_->Policy(sysno); |
| 1252 } |
| 1253 |
| 1254 private: |
| 1255 EqualityStressTest* aux_; |
| 1256 |
| 1257 DISALLOW_COPY_AND_ASSIGN(EqualityStressTestPolicy); |
| 1258 }; |
| 1259 |
| 1260 BPF_TEST(SandboxBPF, |
| 1261 EqualityTests, |
| 1262 EqualityStressTestPolicy, |
| 1263 EqualityStressTest /* (*BPF_AUX) */) { |
| 1264 BPF_AUX->VerifyFilter(); |
| 1265 } |
| 1266 |
| 1267 class EqualityArgumentWidthPolicy : public SandboxBPFDSLPolicy { |
| 1268 public: |
| 1269 EqualityArgumentWidthPolicy() {} |
| 1270 virtual ~EqualityArgumentWidthPolicy() {} |
| 1271 |
| 1272 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1273 |
| 1274 private: |
| 1275 DISALLOW_COPY_AND_ASSIGN(EqualityArgumentWidthPolicy); |
| 1276 }; |
| 1277 |
| 1278 ResultExpr EqualityArgumentWidthPolicy::EvaluateSyscall(int sysno) const { |
| 1279 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1280 if (sysno == __NR_uname) { |
| 1281 const Arg<int> option(0); |
| 1282 const Arg<uint32_t> arg32(1); |
| 1283 const Arg<uint64_t> arg64(1); |
| 1284 return Switch(option) |
| 1285 .Case(0, If(arg32 == 0x55555555, Error(1)).Else(Error(2))) |
| 1286 #if __SIZEOF_POINTER__ > 4 |
| 1287 .Case(1, If(arg64 == 0x55555555AAAAAAAAULL, Error(1)).Else(Error(2))) |
| 1288 #endif |
| 1289 .Default(Error(3)); |
| 1290 } |
| 1291 return Allow(); |
| 1292 } |
| 1293 |
| 1294 BPF_TEST_C(SandboxBPF, EqualityArgumentWidth, EqualityArgumentWidthPolicy) { |
| 1295 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0x55555555) == -1); |
| 1296 BPF_ASSERT(Syscall::Call(__NR_uname, 0, 0xAAAAAAAA) == -2); |
| 1297 #if __SIZEOF_POINTER__ > 4 |
| 1298 // On 32bit machines, there is no way to pass a 64bit argument through the |
| 1299 // syscall interface. So, we have to skip the part of the test that requires |
| 1300 // 64bit arguments. |
| 1301 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x55555555AAAAAAAAULL) == -1); |
| 1302 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555500000000ULL) == -2); |
| 1303 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x5555555511111111ULL) == -2); |
| 1304 BPF_ASSERT(Syscall::Call(__NR_uname, 1, 0x11111111AAAAAAAAULL) == -2); |
| 1305 #endif |
| 1306 } |
| 1307 |
| 1308 #if __SIZEOF_POINTER__ > 4 |
| 1309 // On 32bit machines, there is no way to pass a 64bit argument through the |
| 1310 // syscall interface. So, we have to skip the part of the test that requires |
| 1311 // 64bit arguments. |
| 1312 BPF_DEATH_TEST_C(SandboxBPF, |
| 1313 EqualityArgumentUnallowed64bit, |
| 1314 DEATH_MESSAGE("Unexpected 64bit argument detected"), |
| 1315 EqualityArgumentWidthPolicy) { |
| 1316 Syscall::Call(__NR_uname, 0, 0x5555555555555555ULL); |
| 1317 } |
| 1318 #endif |
| 1319 |
| 1320 class EqualityWithNegativeArgumentsPolicy : public SandboxBPFDSLPolicy { |
| 1321 public: |
| 1322 EqualityWithNegativeArgumentsPolicy() {} |
| 1323 virtual ~EqualityWithNegativeArgumentsPolicy() {} |
| 1324 |
| 1325 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 1326 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1327 if (sysno == __NR_uname) { |
| 1328 // TODO(mdempsky): This currently can't be Arg<int> because then |
| 1329 // 0xFFFFFFFF will be treated as a (signed) int, and then when |
| 1330 // Arg::EqualTo casts it to uint64_t, it will be sign extended. |
| 1331 const Arg<unsigned> arg(0); |
| 1332 return If(arg == 0xFFFFFFFF, Error(1)).Else(Error(2)); |
| 1333 } |
| 1334 return Allow(); |
| 1335 } |
| 1336 |
| 1337 private: |
| 1338 DISALLOW_COPY_AND_ASSIGN(EqualityWithNegativeArgumentsPolicy); |
| 1339 }; |
| 1340 |
| 1341 BPF_TEST_C(SandboxBPF, |
| 1342 EqualityWithNegativeArguments, |
| 1343 EqualityWithNegativeArgumentsPolicy) { |
| 1344 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF) == -1); |
| 1345 BPF_ASSERT(Syscall::Call(__NR_uname, -1) == -1); |
| 1346 BPF_ASSERT(Syscall::Call(__NR_uname, -1LL) == -1); |
| 1347 } |
| 1348 |
| 1349 #if __SIZEOF_POINTER__ > 4 |
| 1350 BPF_DEATH_TEST_C(SandboxBPF, |
| 1351 EqualityWithNegative64bitArguments, |
| 1352 DEATH_MESSAGE("Unexpected 64bit argument detected"), |
| 1353 EqualityWithNegativeArgumentsPolicy) { |
| 1354 // When expecting a 32bit system call argument, we look at the MSB of the |
| 1355 // 64bit value and allow both "0" and "-1". But the latter is allowed only |
| 1356 // iff the LSB was negative. So, this death test should error out. |
| 1357 BPF_ASSERT(Syscall::Call(__NR_uname, 0xFFFFFFFF00000000LL) == -1); |
| 1358 } |
| 1359 #endif |
| 1360 |
| 1361 class AllBitTestPolicy : public SandboxBPFDSLPolicy { |
| 1362 public: |
| 1363 AllBitTestPolicy() {} |
| 1364 virtual ~AllBitTestPolicy() {} |
| 1365 |
| 1366 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1367 |
| 1368 private: |
| 1369 static ResultExpr HasAllBits32(uint32_t bits); |
| 1370 static ResultExpr HasAllBits64(uint64_t bits); |
| 1371 |
| 1372 DISALLOW_COPY_AND_ASSIGN(AllBitTestPolicy); |
| 1373 }; |
| 1374 |
| 1375 ResultExpr AllBitTestPolicy::HasAllBits32(uint32_t bits) { |
| 1376 if (bits == 0) { |
| 1377 return Error(1); |
| 1378 } |
| 1379 const Arg<uint32_t> arg(1); |
| 1380 return If((arg & bits) == bits, Error(1)).Else(Error(0)); |
| 1381 } |
| 1382 |
| 1383 ResultExpr AllBitTestPolicy::HasAllBits64(uint64_t bits) { |
| 1384 if (bits == 0) { |
| 1385 return Error(1); |
| 1386 } |
| 1387 const Arg<uint64_t> arg(1); |
| 1388 return If((arg & bits) == bits, Error(1)).Else(Error(0)); |
| 1389 } |
| 1390 |
| 1391 ResultExpr AllBitTestPolicy::EvaluateSyscall(int sysno) const { |
| 1392 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1393 // Test masked-equality cases that should trigger the "has all bits" |
| 1394 // peephole optimizations. We try to find bitmasks that could conceivably |
| 1395 // touch corner cases. |
| 1396 // For all of these tests, we override the uname(). We can make use with |
| 1397 // a single system call number, as we use the first system call argument to |
| 1398 // select the different bit masks that we want to test against. |
| 1399 if (sysno == __NR_uname) { |
| 1400 const Arg<int> option(0); |
| 1401 return Switch(option) |
| 1402 .Case(0, HasAllBits32(0x0)) |
| 1403 .Case(1, HasAllBits32(0x1)) |
| 1404 .Case(2, HasAllBits32(0x3)) |
| 1405 .Case(3, HasAllBits32(0x80000000)) |
| 1406 #if __SIZEOF_POINTER__ > 4 |
| 1407 .Case(4, HasAllBits64(0x0)) |
| 1408 .Case(5, HasAllBits64(0x1)) |
| 1409 .Case(6, HasAllBits64(0x3)) |
| 1410 .Case(7, HasAllBits64(0x80000000)) |
| 1411 .Case(8, HasAllBits64(0x100000000ULL)) |
| 1412 .Case(9, HasAllBits64(0x300000000ULL)) |
| 1413 .Case(10, HasAllBits64(0x100000001ULL)) |
| 1414 #endif |
| 1415 .Default(Kill("Invalid test case number")); |
| 1416 } |
| 1417 return Allow(); |
| 1418 } |
| 1419 |
| 1420 // Define a macro that performs tests using our test policy. |
| 1421 // NOTE: Not all of the arguments in this macro are actually used! |
| 1422 // They are here just to serve as documentation of the conditions |
| 1423 // implemented in the test policy. |
| 1424 // Most notably, "op" and "mask" are unused by the macro. If you want |
| 1425 // to make changes to these values, you will have to edit the |
| 1426 // test policy instead. |
| 1427 #define BITMASK_TEST(testcase, arg, op, mask, expected_value) \ |
| 1428 BPF_ASSERT(Syscall::Call(__NR_uname, (testcase), (arg)) == (expected_value)) |
| 1429 |
| 1430 // Our uname() system call returns ErrorCode(1) for success and |
| 1431 // ErrorCode(0) for failure. Syscall::Call() turns this into an |
| 1432 // exit code of -1 or 0. |
| 1433 #define EXPECT_FAILURE 0 |
| 1434 #define EXPECT_SUCCESS -1 |
| 1435 |
| 1436 // A couple of our tests behave differently on 32bit and 64bit systems, as |
| 1437 // there is no way for a 32bit system call to pass in a 64bit system call |
| 1438 // argument "arg". |
| 1439 // We expect these tests to succeed on 64bit systems, but to tail on 32bit |
| 1440 // systems. |
| 1441 #define EXPT64_SUCCESS (sizeof(void*) > 4 ? EXPECT_SUCCESS : EXPECT_FAILURE) |
| 1442 BPF_TEST_C(SandboxBPF, AllBitTests, AllBitTestPolicy) { |
| 1443 // 32bit test: all of 0x0 (should always be true) |
| 1444 BITMASK_TEST( 0, 0, ALLBITS32, 0, EXPECT_SUCCESS); |
| 1445 BITMASK_TEST( 0, 1, ALLBITS32, 0, EXPECT_SUCCESS); |
| 1446 BITMASK_TEST( 0, 3, ALLBITS32, 0, EXPECT_SUCCESS); |
| 1447 BITMASK_TEST( 0, 0xFFFFFFFFU, ALLBITS32, 0, EXPECT_SUCCESS); |
| 1448 BITMASK_TEST( 0, -1LL, ALLBITS32, 0, EXPECT_SUCCESS); |
| 1449 |
| 1450 // 32bit test: all of 0x1 |
| 1451 BITMASK_TEST( 1, 0, ALLBITS32, 0x1, EXPECT_FAILURE); |
| 1452 BITMASK_TEST( 1, 1, ALLBITS32, 0x1, EXPECT_SUCCESS); |
| 1453 BITMASK_TEST( 1, 2, ALLBITS32, 0x1, EXPECT_FAILURE); |
| 1454 BITMASK_TEST( 1, 3, ALLBITS32, 0x1, EXPECT_SUCCESS); |
| 1455 |
| 1456 // 32bit test: all of 0x3 |
| 1457 BITMASK_TEST( 2, 0, ALLBITS32, 0x3, EXPECT_FAILURE); |
| 1458 BITMASK_TEST( 2, 1, ALLBITS32, 0x3, EXPECT_FAILURE); |
| 1459 BITMASK_TEST( 2, 2, ALLBITS32, 0x3, EXPECT_FAILURE); |
| 1460 BITMASK_TEST( 2, 3, ALLBITS32, 0x3, EXPECT_SUCCESS); |
| 1461 BITMASK_TEST( 2, 7, ALLBITS32, 0x3, EXPECT_SUCCESS); |
| 1462 |
| 1463 // 32bit test: all of 0x80000000 |
| 1464 BITMASK_TEST( 3, 0, ALLBITS32, 0x80000000, EXPECT_FAILURE); |
| 1465 BITMASK_TEST( 3, 0x40000000U, ALLBITS32, 0x80000000, EXPECT_FAILURE); |
| 1466 BITMASK_TEST( 3, 0x80000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1467 BITMASK_TEST( 3, 0xC0000000U, ALLBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1468 BITMASK_TEST( 3, -0x80000000LL, ALLBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1469 |
| 1470 #if __SIZEOF_POINTER__ > 4 |
| 1471 // 64bit test: all of 0x0 (should always be true) |
| 1472 BITMASK_TEST( 4, 0, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1473 BITMASK_TEST( 4, 1, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1474 BITMASK_TEST( 4, 3, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1475 BITMASK_TEST( 4, 0xFFFFFFFFU, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1476 BITMASK_TEST( 4, 0x100000000LL, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1477 BITMASK_TEST( 4, 0x300000000LL, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1478 BITMASK_TEST( 4,0x8000000000000000LL, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1479 BITMASK_TEST( 4, -1LL, ALLBITS64, 0, EXPECT_SUCCESS); |
| 1480 |
| 1481 // 64bit test: all of 0x1 |
| 1482 BITMASK_TEST( 5, 0, ALLBITS64, 1, EXPECT_FAILURE); |
| 1483 BITMASK_TEST( 5, 1, ALLBITS64, 1, EXPECT_SUCCESS); |
| 1484 BITMASK_TEST( 5, 2, ALLBITS64, 1, EXPECT_FAILURE); |
| 1485 BITMASK_TEST( 5, 3, ALLBITS64, 1, EXPECT_SUCCESS); |
| 1486 BITMASK_TEST( 5, 0x100000000LL, ALLBITS64, 1, EXPECT_FAILURE); |
| 1487 BITMASK_TEST( 5, 0x100000001LL, ALLBITS64, 1, EXPECT_SUCCESS); |
| 1488 BITMASK_TEST( 5, 0x100000002LL, ALLBITS64, 1, EXPECT_FAILURE); |
| 1489 BITMASK_TEST( 5, 0x100000003LL, ALLBITS64, 1, EXPECT_SUCCESS); |
| 1490 |
| 1491 // 64bit test: all of 0x3 |
| 1492 BITMASK_TEST( 6, 0, ALLBITS64, 3, EXPECT_FAILURE); |
| 1493 BITMASK_TEST( 6, 1, ALLBITS64, 3, EXPECT_FAILURE); |
| 1494 BITMASK_TEST( 6, 2, ALLBITS64, 3, EXPECT_FAILURE); |
| 1495 BITMASK_TEST( 6, 3, ALLBITS64, 3, EXPECT_SUCCESS); |
| 1496 BITMASK_TEST( 6, 7, ALLBITS64, 3, EXPECT_SUCCESS); |
| 1497 BITMASK_TEST( 6, 0x100000000LL, ALLBITS64, 3, EXPECT_FAILURE); |
| 1498 BITMASK_TEST( 6, 0x100000001LL, ALLBITS64, 3, EXPECT_FAILURE); |
| 1499 BITMASK_TEST( 6, 0x100000002LL, ALLBITS64, 3, EXPECT_FAILURE); |
| 1500 BITMASK_TEST( 6, 0x100000003LL, ALLBITS64, 3, EXPECT_SUCCESS); |
| 1501 BITMASK_TEST( 6, 0x100000007LL, ALLBITS64, 3, EXPECT_SUCCESS); |
| 1502 |
| 1503 // 64bit test: all of 0x80000000 |
| 1504 BITMASK_TEST( 7, 0, ALLBITS64, 0x80000000, EXPECT_FAILURE); |
| 1505 BITMASK_TEST( 7, 0x40000000U, ALLBITS64, 0x80000000, EXPECT_FAILURE); |
| 1506 BITMASK_TEST( 7, 0x80000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1507 BITMASK_TEST( 7, 0xC0000000U, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1508 BITMASK_TEST( 7, -0x80000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1509 BITMASK_TEST( 7, 0x100000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); |
| 1510 BITMASK_TEST( 7, 0x140000000LL, ALLBITS64, 0x80000000, EXPECT_FAILURE); |
| 1511 BITMASK_TEST( 7, 0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1512 BITMASK_TEST( 7, 0x1C0000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1513 BITMASK_TEST( 7, -0x180000000LL, ALLBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1514 |
| 1515 // 64bit test: all of 0x100000000 |
| 1516 BITMASK_TEST( 8, 0x000000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); |
| 1517 BITMASK_TEST( 8, 0x100000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); |
| 1518 BITMASK_TEST( 8, 0x200000000LL, ALLBITS64,0x100000000, EXPECT_FAILURE); |
| 1519 BITMASK_TEST( 8, 0x300000000LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); |
| 1520 BITMASK_TEST( 8, 0x000000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); |
| 1521 BITMASK_TEST( 8, 0x100000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); |
| 1522 BITMASK_TEST( 8, 0x200000001LL, ALLBITS64,0x100000000, EXPECT_FAILURE); |
| 1523 BITMASK_TEST( 8, 0x300000001LL, ALLBITS64,0x100000000, EXPT64_SUCCESS); |
| 1524 |
| 1525 // 64bit test: all of 0x300000000 |
| 1526 BITMASK_TEST( 9, 0x000000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1527 BITMASK_TEST( 9, 0x100000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1528 BITMASK_TEST( 9, 0x200000000LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1529 BITMASK_TEST( 9, 0x300000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); |
| 1530 BITMASK_TEST( 9, 0x700000000LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); |
| 1531 BITMASK_TEST( 9, 0x000000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1532 BITMASK_TEST( 9, 0x100000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1533 BITMASK_TEST( 9, 0x200000001LL, ALLBITS64,0x300000000, EXPECT_FAILURE); |
| 1534 BITMASK_TEST( 9, 0x300000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); |
| 1535 BITMASK_TEST( 9, 0x700000001LL, ALLBITS64,0x300000000, EXPT64_SUCCESS); |
| 1536 |
| 1537 // 64bit test: all of 0x100000001 |
| 1538 BITMASK_TEST(10, 0x000000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); |
| 1539 BITMASK_TEST(10, 0x000000001LL, ALLBITS64,0x100000001, EXPECT_FAILURE); |
| 1540 BITMASK_TEST(10, 0x100000000LL, ALLBITS64,0x100000001, EXPECT_FAILURE); |
| 1541 BITMASK_TEST(10, 0x100000001LL, ALLBITS64,0x100000001, EXPT64_SUCCESS); |
| 1542 BITMASK_TEST(10, 0xFFFFFFFFU, ALLBITS64,0x100000001, EXPECT_FAILURE); |
| 1543 BITMASK_TEST(10, -1L, ALLBITS64,0x100000001, EXPT64_SUCCESS); |
| 1544 #endif |
| 1545 } |
| 1546 |
| 1547 class AnyBitTestPolicy : public SandboxBPFDSLPolicy { |
| 1548 public: |
| 1549 AnyBitTestPolicy() {} |
| 1550 virtual ~AnyBitTestPolicy() {} |
| 1551 |
| 1552 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1553 |
| 1554 private: |
| 1555 static ResultExpr HasAnyBits32(uint32_t); |
| 1556 static ResultExpr HasAnyBits64(uint64_t); |
| 1557 |
| 1558 DISALLOW_COPY_AND_ASSIGN(AnyBitTestPolicy); |
| 1559 }; |
| 1560 |
| 1561 ResultExpr AnyBitTestPolicy::HasAnyBits32(uint32_t bits) { |
| 1562 if (bits == 0) { |
| 1563 return Error(0); |
| 1564 } |
| 1565 const Arg<uint32_t> arg(1); |
| 1566 return If((arg & bits) != 0, Error(1)).Else(Error(0)); |
| 1567 } |
| 1568 |
| 1569 ResultExpr AnyBitTestPolicy::HasAnyBits64(uint64_t bits) { |
| 1570 if (bits == 0) { |
| 1571 return Error(0); |
| 1572 } |
| 1573 const Arg<uint64_t> arg(1); |
| 1574 return If((arg & bits) != 0, Error(1)).Else(Error(0)); |
| 1575 } |
| 1576 |
| 1577 ResultExpr AnyBitTestPolicy::EvaluateSyscall(int sysno) const { |
| 1578 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1579 // Test masked-equality cases that should trigger the "has any bits" |
| 1580 // peephole optimizations. We try to find bitmasks that could conceivably |
| 1581 // touch corner cases. |
| 1582 // For all of these tests, we override the uname(). We can make use with |
| 1583 // a single system call number, as we use the first system call argument to |
| 1584 // select the different bit masks that we want to test against. |
| 1585 if (sysno == __NR_uname) { |
| 1586 const Arg<int> option(0); |
| 1587 return Switch(option) |
| 1588 .Case(0, HasAnyBits32(0x0)) |
| 1589 .Case(1, HasAnyBits32(0x1)) |
| 1590 .Case(2, HasAnyBits32(0x3)) |
| 1591 .Case(3, HasAnyBits32(0x80000000)) |
| 1592 #if __SIZEOF_POINTER__ > 4 |
| 1593 .Case(4, HasAnyBits64(0x0)) |
| 1594 .Case(5, HasAnyBits64(0x1)) |
| 1595 .Case(6, HasAnyBits64(0x3)) |
| 1596 .Case(7, HasAnyBits64(0x80000000)) |
| 1597 .Case(8, HasAnyBits64(0x100000000ULL)) |
| 1598 .Case(9, HasAnyBits64(0x300000000ULL)) |
| 1599 .Case(10, HasAnyBits64(0x100000001ULL)) |
| 1600 #endif |
| 1601 .Default(Kill("Invalid test case number")); |
| 1602 } |
| 1603 return Allow(); |
| 1604 } |
| 1605 |
| 1606 BPF_TEST_C(SandboxBPF, AnyBitTests, AnyBitTestPolicy) { |
| 1607 // 32bit test: any of 0x0 (should always be false) |
| 1608 BITMASK_TEST( 0, 0, ANYBITS32, 0x0, EXPECT_FAILURE); |
| 1609 BITMASK_TEST( 0, 1, ANYBITS32, 0x0, EXPECT_FAILURE); |
| 1610 BITMASK_TEST( 0, 3, ANYBITS32, 0x0, EXPECT_FAILURE); |
| 1611 BITMASK_TEST( 0, 0xFFFFFFFFU, ANYBITS32, 0x0, EXPECT_FAILURE); |
| 1612 BITMASK_TEST( 0, -1LL, ANYBITS32, 0x0, EXPECT_FAILURE); |
| 1613 |
| 1614 // 32bit test: any of 0x1 |
| 1615 BITMASK_TEST( 1, 0, ANYBITS32, 0x1, EXPECT_FAILURE); |
| 1616 BITMASK_TEST( 1, 1, ANYBITS32, 0x1, EXPECT_SUCCESS); |
| 1617 BITMASK_TEST( 1, 2, ANYBITS32, 0x1, EXPECT_FAILURE); |
| 1618 BITMASK_TEST( 1, 3, ANYBITS32, 0x1, EXPECT_SUCCESS); |
| 1619 |
| 1620 // 32bit test: any of 0x3 |
| 1621 BITMASK_TEST( 2, 0, ANYBITS32, 0x3, EXPECT_FAILURE); |
| 1622 BITMASK_TEST( 2, 1, ANYBITS32, 0x3, EXPECT_SUCCESS); |
| 1623 BITMASK_TEST( 2, 2, ANYBITS32, 0x3, EXPECT_SUCCESS); |
| 1624 BITMASK_TEST( 2, 3, ANYBITS32, 0x3, EXPECT_SUCCESS); |
| 1625 BITMASK_TEST( 2, 7, ANYBITS32, 0x3, EXPECT_SUCCESS); |
| 1626 |
| 1627 // 32bit test: any of 0x80000000 |
| 1628 BITMASK_TEST( 3, 0, ANYBITS32, 0x80000000, EXPECT_FAILURE); |
| 1629 BITMASK_TEST( 3, 0x40000000U, ANYBITS32, 0x80000000, EXPECT_FAILURE); |
| 1630 BITMASK_TEST( 3, 0x80000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1631 BITMASK_TEST( 3, 0xC0000000U, ANYBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1632 BITMASK_TEST( 3, -0x80000000LL, ANYBITS32, 0x80000000, EXPECT_SUCCESS); |
| 1633 |
| 1634 #if __SIZEOF_POINTER__ > 4 |
| 1635 // 64bit test: any of 0x0 (should always be false) |
| 1636 BITMASK_TEST( 4, 0, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1637 BITMASK_TEST( 4, 1, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1638 BITMASK_TEST( 4, 3, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1639 BITMASK_TEST( 4, 0xFFFFFFFFU, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1640 BITMASK_TEST( 4, 0x100000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1641 BITMASK_TEST( 4, 0x300000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1642 BITMASK_TEST( 4,0x8000000000000000LL, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1643 BITMASK_TEST( 4, -1LL, ANYBITS64, 0x0, EXPECT_FAILURE); |
| 1644 |
| 1645 // 64bit test: any of 0x1 |
| 1646 BITMASK_TEST( 5, 0, ANYBITS64, 0x1, EXPECT_FAILURE); |
| 1647 BITMASK_TEST( 5, 1, ANYBITS64, 0x1, EXPECT_SUCCESS); |
| 1648 BITMASK_TEST( 5, 2, ANYBITS64, 0x1, EXPECT_FAILURE); |
| 1649 BITMASK_TEST( 5, 3, ANYBITS64, 0x1, EXPECT_SUCCESS); |
| 1650 BITMASK_TEST( 5, 0x100000001LL, ANYBITS64, 0x1, EXPECT_SUCCESS); |
| 1651 BITMASK_TEST( 5, 0x100000000LL, ANYBITS64, 0x1, EXPECT_FAILURE); |
| 1652 BITMASK_TEST( 5, 0x100000002LL, ANYBITS64, 0x1, EXPECT_FAILURE); |
| 1653 BITMASK_TEST( 5, 0x100000003LL, ANYBITS64, 0x1, EXPECT_SUCCESS); |
| 1654 |
| 1655 // 64bit test: any of 0x3 |
| 1656 BITMASK_TEST( 6, 0, ANYBITS64, 0x3, EXPECT_FAILURE); |
| 1657 BITMASK_TEST( 6, 1, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1658 BITMASK_TEST( 6, 2, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1659 BITMASK_TEST( 6, 3, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1660 BITMASK_TEST( 6, 7, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1661 BITMASK_TEST( 6, 0x100000000LL, ANYBITS64, 0x3, EXPECT_FAILURE); |
| 1662 BITMASK_TEST( 6, 0x100000001LL, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1663 BITMASK_TEST( 6, 0x100000002LL, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1664 BITMASK_TEST( 6, 0x100000003LL, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1665 BITMASK_TEST( 6, 0x100000007LL, ANYBITS64, 0x3, EXPECT_SUCCESS); |
| 1666 |
| 1667 // 64bit test: any of 0x80000000 |
| 1668 BITMASK_TEST( 7, 0, ANYBITS64, 0x80000000, EXPECT_FAILURE); |
| 1669 BITMASK_TEST( 7, 0x40000000U, ANYBITS64, 0x80000000, EXPECT_FAILURE); |
| 1670 BITMASK_TEST( 7, 0x80000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1671 BITMASK_TEST( 7, 0xC0000000U, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1672 BITMASK_TEST( 7, -0x80000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1673 BITMASK_TEST( 7, 0x100000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); |
| 1674 BITMASK_TEST( 7, 0x140000000LL, ANYBITS64, 0x80000000, EXPECT_FAILURE); |
| 1675 BITMASK_TEST( 7, 0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1676 BITMASK_TEST( 7, 0x1C0000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1677 BITMASK_TEST( 7, -0x180000000LL, ANYBITS64, 0x80000000, EXPECT_SUCCESS); |
| 1678 |
| 1679 // 64bit test: any of 0x100000000 |
| 1680 BITMASK_TEST( 8, 0x000000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); |
| 1681 BITMASK_TEST( 8, 0x100000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); |
| 1682 BITMASK_TEST( 8, 0x200000000LL, ANYBITS64,0x100000000, EXPECT_FAILURE); |
| 1683 BITMASK_TEST( 8, 0x300000000LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); |
| 1684 BITMASK_TEST( 8, 0x000000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); |
| 1685 BITMASK_TEST( 8, 0x100000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); |
| 1686 BITMASK_TEST( 8, 0x200000001LL, ANYBITS64,0x100000000, EXPECT_FAILURE); |
| 1687 BITMASK_TEST( 8, 0x300000001LL, ANYBITS64,0x100000000, EXPT64_SUCCESS); |
| 1688 |
| 1689 // 64bit test: any of 0x300000000 |
| 1690 BITMASK_TEST( 9, 0x000000000LL, ANYBITS64,0x300000000, EXPECT_FAILURE); |
| 1691 BITMASK_TEST( 9, 0x100000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1692 BITMASK_TEST( 9, 0x200000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1693 BITMASK_TEST( 9, 0x300000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1694 BITMASK_TEST( 9, 0x700000000LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1695 BITMASK_TEST( 9, 0x000000001LL, ANYBITS64,0x300000000, EXPECT_FAILURE); |
| 1696 BITMASK_TEST( 9, 0x100000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1697 BITMASK_TEST( 9, 0x200000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1698 BITMASK_TEST( 9, 0x300000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1699 BITMASK_TEST( 9, 0x700000001LL, ANYBITS64,0x300000000, EXPT64_SUCCESS); |
| 1700 |
| 1701 // 64bit test: any of 0x100000001 |
| 1702 BITMASK_TEST( 10, 0x000000000LL, ANYBITS64,0x100000001, EXPECT_FAILURE); |
| 1703 BITMASK_TEST( 10, 0x000000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); |
| 1704 BITMASK_TEST( 10, 0x100000000LL, ANYBITS64,0x100000001, EXPT64_SUCCESS); |
| 1705 BITMASK_TEST( 10, 0x100000001LL, ANYBITS64,0x100000001, EXPECT_SUCCESS); |
| 1706 BITMASK_TEST( 10, 0xFFFFFFFFU, ANYBITS64,0x100000001, EXPECT_SUCCESS); |
| 1707 BITMASK_TEST( 10, -1L, ANYBITS64,0x100000001, EXPECT_SUCCESS); |
| 1708 #endif |
| 1709 } |
| 1710 |
| 1711 class MaskedEqualTestPolicy : public SandboxBPFDSLPolicy { |
| 1712 public: |
| 1713 MaskedEqualTestPolicy() {} |
| 1714 virtual ~MaskedEqualTestPolicy() {} |
| 1715 |
| 1716 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1717 |
| 1718 private: |
| 1719 static ResultExpr MaskedEqual32(uint32_t mask, uint32_t value); |
| 1720 static ResultExpr MaskedEqual64(uint64_t mask, uint64_t value); |
| 1721 |
| 1722 DISALLOW_COPY_AND_ASSIGN(MaskedEqualTestPolicy); |
| 1723 }; |
| 1724 |
| 1725 ResultExpr MaskedEqualTestPolicy::MaskedEqual32(uint32_t mask, uint32_t value) { |
| 1726 const Arg<uint32_t> arg(1); |
| 1727 return If((arg & mask) == value, Error(1)).Else(Error(0)); |
| 1728 } |
| 1729 |
| 1730 ResultExpr MaskedEqualTestPolicy::MaskedEqual64(uint64_t mask, uint64_t value) { |
| 1731 const Arg<uint64_t> arg(1); |
| 1732 return If((arg & mask) == value, Error(1)).Else(Error(0)); |
| 1733 } |
| 1734 |
| 1735 ResultExpr MaskedEqualTestPolicy::EvaluateSyscall(int sysno) const { |
| 1736 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1737 |
| 1738 if (sysno == __NR_uname) { |
| 1739 const Arg<int> option(0); |
| 1740 return Switch(option) |
| 1741 .Case(0, MaskedEqual32(0x00ff00ff, 0x005500aa)) |
| 1742 #if __SIZEOF_POINTER__ > 4 |
| 1743 .Case(1, MaskedEqual64(0x00ff00ff00000000, 0x005500aa00000000)) |
| 1744 .Case(2, MaskedEqual64(0x00ff00ff00ff00ff, 0x005500aa005500aa)) |
| 1745 #endif |
| 1746 .Default(Kill("Invalid test case number")); |
| 1747 } |
| 1748 |
| 1749 return Allow(); |
| 1750 } |
| 1751 |
| 1752 #define MASKEQ_TEST(rulenum, arg, expected_result) \ |
| 1753 BPF_ASSERT(Syscall::Call(__NR_uname, (rulenum), (arg)) == (expected_result)) |
| 1754 |
| 1755 BPF_TEST_C(SandboxBPF, MaskedEqualTests, MaskedEqualTestPolicy) { |
| 1756 // Allowed: 0x__55__aa |
| 1757 MASKEQ_TEST(0, 0x00000000, EXPECT_FAILURE); |
| 1758 MASKEQ_TEST(0, 0x00000001, EXPECT_FAILURE); |
| 1759 MASKEQ_TEST(0, 0x00000003, EXPECT_FAILURE); |
| 1760 MASKEQ_TEST(0, 0x00000100, EXPECT_FAILURE); |
| 1761 MASKEQ_TEST(0, 0x00000300, EXPECT_FAILURE); |
| 1762 MASKEQ_TEST(0, 0x005500aa, EXPECT_SUCCESS); |
| 1763 MASKEQ_TEST(0, 0x005500ab, EXPECT_FAILURE); |
| 1764 MASKEQ_TEST(0, 0x005600aa, EXPECT_FAILURE); |
| 1765 MASKEQ_TEST(0, 0x005501aa, EXPECT_SUCCESS); |
| 1766 MASKEQ_TEST(0, 0x005503aa, EXPECT_SUCCESS); |
| 1767 MASKEQ_TEST(0, 0x555500aa, EXPECT_SUCCESS); |
| 1768 MASKEQ_TEST(0, 0xaa5500aa, EXPECT_SUCCESS); |
| 1769 |
| 1770 #if __SIZEOF_POINTER__ > 4 |
| 1771 // Allowed: 0x__55__aa________ |
| 1772 MASKEQ_TEST(1, 0x0000000000000000, EXPECT_FAILURE); |
| 1773 MASKEQ_TEST(1, 0x0000000000000010, EXPECT_FAILURE); |
| 1774 MASKEQ_TEST(1, 0x0000000000000050, EXPECT_FAILURE); |
| 1775 MASKEQ_TEST(1, 0x0000000100000000, EXPECT_FAILURE); |
| 1776 MASKEQ_TEST(1, 0x0000000300000000, EXPECT_FAILURE); |
| 1777 MASKEQ_TEST(1, 0x0000010000000000, EXPECT_FAILURE); |
| 1778 MASKEQ_TEST(1, 0x0000030000000000, EXPECT_FAILURE); |
| 1779 MASKEQ_TEST(1, 0x005500aa00000000, EXPECT_SUCCESS); |
| 1780 MASKEQ_TEST(1, 0x005500ab00000000, EXPECT_FAILURE); |
| 1781 MASKEQ_TEST(1, 0x005600aa00000000, EXPECT_FAILURE); |
| 1782 MASKEQ_TEST(1, 0x005501aa00000000, EXPECT_SUCCESS); |
| 1783 MASKEQ_TEST(1, 0x005503aa00000000, EXPECT_SUCCESS); |
| 1784 MASKEQ_TEST(1, 0x555500aa00000000, EXPECT_SUCCESS); |
| 1785 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); |
| 1786 MASKEQ_TEST(1, 0xaa5500aa00000000, EXPECT_SUCCESS); |
| 1787 MASKEQ_TEST(1, 0xaa5500aa0000cafe, EXPECT_SUCCESS); |
| 1788 |
| 1789 // Allowed: 0x__55__aa__55__aa |
| 1790 MASKEQ_TEST(2, 0x0000000000000000, EXPECT_FAILURE); |
| 1791 MASKEQ_TEST(2, 0x0000000000000010, EXPECT_FAILURE); |
| 1792 MASKEQ_TEST(2, 0x0000000000000050, EXPECT_FAILURE); |
| 1793 MASKEQ_TEST(2, 0x0000000100000000, EXPECT_FAILURE); |
| 1794 MASKEQ_TEST(2, 0x0000000300000000, EXPECT_FAILURE); |
| 1795 MASKEQ_TEST(2, 0x0000010000000000, EXPECT_FAILURE); |
| 1796 MASKEQ_TEST(2, 0x0000030000000000, EXPECT_FAILURE); |
| 1797 MASKEQ_TEST(2, 0x00000000005500aa, EXPECT_FAILURE); |
| 1798 MASKEQ_TEST(2, 0x005500aa00000000, EXPECT_FAILURE); |
| 1799 MASKEQ_TEST(2, 0x005500aa005500aa, EXPECT_SUCCESS); |
| 1800 MASKEQ_TEST(2, 0x005500aa005700aa, EXPECT_FAILURE); |
| 1801 MASKEQ_TEST(2, 0x005700aa005500aa, EXPECT_FAILURE); |
| 1802 MASKEQ_TEST(2, 0x005500aa004500aa, EXPECT_FAILURE); |
| 1803 MASKEQ_TEST(2, 0x004500aa005500aa, EXPECT_FAILURE); |
| 1804 MASKEQ_TEST(2, 0x005512aa005500aa, EXPECT_SUCCESS); |
| 1805 MASKEQ_TEST(2, 0x005500aa005534aa, EXPECT_SUCCESS); |
| 1806 MASKEQ_TEST(2, 0xff5500aa0055ffaa, EXPECT_SUCCESS); |
| 1807 #endif |
| 1808 } |
| 1809 |
| 1810 intptr_t PthreadTrapHandler(const struct arch_seccomp_data& args, void* aux) { |
| 1811 if (args.args[0] != (CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD)) { |
| 1812 // We expect to get called for an attempt to fork(). No need to log that |
| 1813 // call. But if we ever get called for anything else, we want to verbosely |
| 1814 // print as much information as possible. |
| 1815 const char* msg = (const char*)aux; |
| 1816 printf( |
| 1817 "Clone() was called with unexpected arguments\n" |
| 1818 " nr: %d\n" |
| 1819 " 1: 0x%llX\n" |
| 1820 " 2: 0x%llX\n" |
| 1821 " 3: 0x%llX\n" |
| 1822 " 4: 0x%llX\n" |
| 1823 " 5: 0x%llX\n" |
| 1824 " 6: 0x%llX\n" |
| 1825 "%s\n", |
| 1826 args.nr, |
| 1827 (long long)args.args[0], |
| 1828 (long long)args.args[1], |
| 1829 (long long)args.args[2], |
| 1830 (long long)args.args[3], |
| 1831 (long long)args.args[4], |
| 1832 (long long)args.args[5], |
| 1833 msg); |
| 1834 } |
| 1835 return -EPERM; |
| 1836 } |
| 1837 |
| 1838 class PthreadPolicyEquality : public SandboxBPFDSLPolicy { |
| 1839 public: |
| 1840 PthreadPolicyEquality() {} |
| 1841 virtual ~PthreadPolicyEquality() {} |
| 1842 |
| 1843 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1844 |
| 1845 private: |
| 1846 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyEquality); |
| 1847 }; |
| 1848 |
| 1849 ResultExpr PthreadPolicyEquality::EvaluateSyscall(int sysno) const { |
| 1850 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1851 // This policy allows creating threads with pthread_create(). But it |
| 1852 // doesn't allow any other uses of clone(). Most notably, it does not |
| 1853 // allow callers to implement fork() or vfork() by passing suitable flags |
| 1854 // to the clone() system call. |
| 1855 if (sysno == __NR_clone) { |
| 1856 // We have seen two different valid combinations of flags. Glibc |
| 1857 // uses the more modern flags, sets the TLS from the call to clone(), and |
| 1858 // uses futexes to monitor threads. Android's C run-time library, doesn't |
| 1859 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. |
| 1860 // More recent versions of Android don't set CLONE_DETACHED anymore, so |
| 1861 // the last case accounts for that. |
| 1862 // The following policy is very strict. It only allows the exact masks |
| 1863 // that we have seen in known implementations. It is probably somewhat |
| 1864 // stricter than what we would want to do. |
| 1865 const uint64_t kGlibcCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | |
| 1866 CLONE_SIGHAND | CLONE_THREAD | |
| 1867 CLONE_SYSVSEM | CLONE_SETTLS | |
| 1868 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; |
| 1869 const uint64_t kBaseAndroidCloneMask = CLONE_VM | CLONE_FS | CLONE_FILES | |
| 1870 CLONE_SIGHAND | CLONE_THREAD | |
| 1871 CLONE_SYSVSEM; |
| 1872 const Arg<unsigned long> flags(0); |
| 1873 return If(flags == kGlibcCloneMask || |
| 1874 flags == (kBaseAndroidCloneMask | CLONE_DETACHED) || |
| 1875 flags == kBaseAndroidCloneMask, |
| 1876 Allow()).Else(Trap(PthreadTrapHandler, "Unknown mask")); |
| 1877 } |
| 1878 |
| 1879 return Allow(); |
| 1880 } |
| 1881 |
| 1882 class PthreadPolicyBitMask : public SandboxBPFDSLPolicy { |
| 1883 public: |
| 1884 PthreadPolicyBitMask() {} |
| 1885 virtual ~PthreadPolicyBitMask() {} |
| 1886 |
| 1887 virtual ResultExpr EvaluateSyscall(int sysno) const override; |
| 1888 |
| 1889 private: |
| 1890 static BoolExpr HasAnyBits(const Arg<unsigned long>& arg, unsigned long bits); |
| 1891 static BoolExpr HasAllBits(const Arg<unsigned long>& arg, unsigned long bits); |
| 1892 |
| 1893 DISALLOW_COPY_AND_ASSIGN(PthreadPolicyBitMask); |
| 1894 }; |
| 1895 |
| 1896 BoolExpr PthreadPolicyBitMask::HasAnyBits(const Arg<unsigned long>& arg, |
| 1897 unsigned long bits) { |
| 1898 return (arg & bits) != 0; |
| 1899 } |
| 1900 |
| 1901 BoolExpr PthreadPolicyBitMask::HasAllBits(const Arg<unsigned long>& arg, |
| 1902 unsigned long bits) { |
| 1903 return (arg & bits) == bits; |
| 1904 } |
| 1905 |
| 1906 ResultExpr PthreadPolicyBitMask::EvaluateSyscall(int sysno) const { |
| 1907 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 1908 // This policy allows creating threads with pthread_create(). But it |
| 1909 // doesn't allow any other uses of clone(). Most notably, it does not |
| 1910 // allow callers to implement fork() or vfork() by passing suitable flags |
| 1911 // to the clone() system call. |
| 1912 if (sysno == __NR_clone) { |
| 1913 // We have seen two different valid combinations of flags. Glibc |
| 1914 // uses the more modern flags, sets the TLS from the call to clone(), and |
| 1915 // uses futexes to monitor threads. Android's C run-time library, doesn't |
| 1916 // do any of this, but it sets the obsolete (and no-op) CLONE_DETACHED. |
| 1917 // The following policy allows for either combination of flags, but it |
| 1918 // is generally a little more conservative than strictly necessary. We |
| 1919 // err on the side of rather safe than sorry. |
| 1920 // Very noticeably though, we disallow fork() (which is often just a |
| 1921 // wrapper around clone()). |
| 1922 const unsigned long kMandatoryFlags = CLONE_VM | CLONE_FS | CLONE_FILES | |
| 1923 CLONE_SIGHAND | CLONE_THREAD | |
| 1924 CLONE_SYSVSEM; |
| 1925 const unsigned long kFutexFlags = |
| 1926 CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID; |
| 1927 const unsigned long kNoopFlags = CLONE_DETACHED; |
| 1928 const unsigned long kKnownFlags = |
| 1929 kMandatoryFlags | kFutexFlags | kNoopFlags; |
| 1930 |
| 1931 const Arg<unsigned long> flags(0); |
| 1932 return If(HasAnyBits(flags, ~kKnownFlags), |
| 1933 Trap(PthreadTrapHandler, "Unexpected CLONE_XXX flag found")) |
| 1934 .ElseIf(!HasAllBits(flags, kMandatoryFlags), |
| 1935 Trap(PthreadTrapHandler, |
| 1936 "Missing mandatory CLONE_XXX flags " |
| 1937 "when creating new thread")) |
| 1938 .ElseIf( |
| 1939 !HasAllBits(flags, kFutexFlags) && HasAnyBits(flags, kFutexFlags), |
| 1940 Trap(PthreadTrapHandler, |
| 1941 "Must set either all or none of the TLS and futex bits in " |
| 1942 "call to clone()")) |
| 1943 .Else(Allow()); |
| 1944 } |
| 1945 |
| 1946 return Allow(); |
| 1947 } |
| 1948 |
| 1949 static void* ThreadFnc(void* arg) { |
| 1950 ++*reinterpret_cast<int*>(arg); |
| 1951 Syscall::Call(__NR_futex, arg, FUTEX_WAKE, 1, 0, 0, 0); |
| 1952 return NULL; |
| 1953 } |
| 1954 |
| 1955 static void PthreadTest() { |
| 1956 // Attempt to start a joinable thread. This should succeed. |
| 1957 pthread_t thread; |
| 1958 int thread_ran = 0; |
| 1959 BPF_ASSERT(!pthread_create(&thread, NULL, ThreadFnc, &thread_ran)); |
| 1960 BPF_ASSERT(!pthread_join(thread, NULL)); |
| 1961 BPF_ASSERT(thread_ran); |
| 1962 |
| 1963 // Attempt to start a detached thread. This should succeed. |
| 1964 thread_ran = 0; |
| 1965 pthread_attr_t attr; |
| 1966 BPF_ASSERT(!pthread_attr_init(&attr)); |
| 1967 BPF_ASSERT(!pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
| 1968 BPF_ASSERT(!pthread_create(&thread, &attr, ThreadFnc, &thread_ran)); |
| 1969 BPF_ASSERT(!pthread_attr_destroy(&attr)); |
| 1970 while (Syscall::Call(__NR_futex, &thread_ran, FUTEX_WAIT, 0, 0, 0, 0) == |
| 1971 -EINTR) { |
| 1972 } |
| 1973 BPF_ASSERT(thread_ran); |
| 1974 |
| 1975 // Attempt to fork() a process using clone(). This should fail. We use the |
| 1976 // same flags that glibc uses when calling fork(). But we don't actually |
| 1977 // try calling the fork() implementation in the C run-time library, as |
| 1978 // run-time libraries other than glibc might call __NR_fork instead of |
| 1979 // __NR_clone, and that would introduce a bogus test failure. |
| 1980 int pid; |
| 1981 BPF_ASSERT(Syscall::Call(__NR_clone, |
| 1982 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | SIGCHLD, |
| 1983 0, |
| 1984 0, |
| 1985 &pid) == -EPERM); |
| 1986 } |
| 1987 |
| 1988 BPF_TEST_C(SandboxBPF, PthreadEquality, PthreadPolicyEquality) { |
| 1989 PthreadTest(); |
| 1990 } |
| 1991 |
| 1992 BPF_TEST_C(SandboxBPF, PthreadBitMask, PthreadPolicyBitMask) { |
| 1993 PthreadTest(); |
| 1994 } |
| 1995 |
| 1996 // libc might not define these even though the kernel supports it. |
| 1997 #ifndef PTRACE_O_TRACESECCOMP |
| 1998 #define PTRACE_O_TRACESECCOMP 0x00000080 |
| 1999 #endif |
| 2000 |
| 2001 #ifdef PTRACE_EVENT_SECCOMP |
| 2002 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) |
| 2003 #else |
| 2004 // When Debian/Ubuntu backported seccomp-bpf support into earlier kernels, they |
| 2005 // changed the value of PTRACE_EVENT_SECCOMP from 7 to 8, since 7 was taken by |
| 2006 // PTRACE_EVENT_STOP (upstream chose to renumber PTRACE_EVENT_STOP to 128). If |
| 2007 // PTRACE_EVENT_SECCOMP isn't defined, we have no choice but to consider both |
| 2008 // values here. |
| 2009 #define IS_SECCOMP_EVENT(status) ((status >> 16) == 7 || (status >> 16) == 8) |
| 2010 #endif |
| 2011 |
| 2012 #if defined(__arm__) |
| 2013 #ifndef PTRACE_SET_SYSCALL |
| 2014 #define PTRACE_SET_SYSCALL 23 |
| 2015 #endif |
| 2016 #endif |
| 2017 |
| 2018 #if defined(__aarch64__) |
| 2019 #ifndef PTRACE_GETREGS |
| 2020 #define PTRACE_GETREGS 12 |
| 2021 #endif |
| 2022 #endif |
| 2023 |
| 2024 #if defined(__aarch64__) |
| 2025 #ifndef PTRACE_SETREGS |
| 2026 #define PTRACE_SETREGS 13 |
| 2027 #endif |
| 2028 #endif |
| 2029 |
| 2030 // Changes the syscall to run for a child being sandboxed using seccomp-bpf with |
| 2031 // PTRACE_O_TRACESECCOMP. Should only be called when the child is stopped on |
| 2032 // PTRACE_EVENT_SECCOMP. |
| 2033 // |
| 2034 // regs should contain the current set of registers of the child, obtained using |
| 2035 // PTRACE_GETREGS. |
| 2036 // |
| 2037 // Depending on the architecture, this may modify regs, so the caller is |
| 2038 // responsible for committing these changes using PTRACE_SETREGS. |
| 2039 long SetSyscall(pid_t pid, regs_struct* regs, int syscall_number) { |
| 2040 #if defined(__arm__) |
| 2041 // On ARM, the syscall is changed using PTRACE_SET_SYSCALL. We cannot use the |
| 2042 // libc ptrace call as the request parameter is an enum, and |
| 2043 // PTRACE_SET_SYSCALL may not be in the enum. |
| 2044 return syscall(__NR_ptrace, PTRACE_SET_SYSCALL, pid, NULL, syscall_number); |
| 2045 #endif |
| 2046 |
| 2047 SECCOMP_PT_SYSCALL(*regs) = syscall_number; |
| 2048 return 0; |
| 2049 } |
| 2050 |
| 2051 const uint16_t kTraceData = 0xcc; |
| 2052 |
| 2053 class TraceAllPolicy : public SandboxBPFDSLPolicy { |
| 2054 public: |
| 2055 TraceAllPolicy() {} |
| 2056 virtual ~TraceAllPolicy() {} |
| 2057 |
| 2058 virtual ResultExpr EvaluateSyscall(int system_call_number) const override { |
| 2059 return Trace(kTraceData); |
| 2060 } |
| 2061 |
| 2062 private: |
| 2063 DISALLOW_COPY_AND_ASSIGN(TraceAllPolicy); |
| 2064 }; |
| 2065 |
| 2066 SANDBOX_TEST(SandboxBPF, DISABLE_ON_TSAN(SeccompRetTrace)) { |
| 2067 if (SandboxBPF::SupportsSeccompSandbox(-1) != |
| 2068 sandbox::SandboxBPF::STATUS_AVAILABLE) { |
| 2069 return; |
| 2070 } |
| 2071 |
| 2072 // This test is disabled on arm due to a kernel bug. |
| 2073 // See https://code.google.com/p/chromium/issues/detail?id=383977 |
| 2074 #if defined(__arm__) || defined(__aarch64__) |
| 2075 printf("This test is currently disabled on ARM32/64 due to a kernel bug."); |
| 2076 return; |
| 2077 #endif |
| 2078 |
| 2079 #if defined(__mips__) |
| 2080 // TODO: Figure out how to support specificity of handling indirect syscalls |
| 2081 // in this test and enable it. |
| 2082 printf("This test is currently disabled on MIPS."); |
| 2083 return; |
| 2084 #endif |
| 2085 |
| 2086 pid_t pid = fork(); |
| 2087 BPF_ASSERT_NE(-1, pid); |
| 2088 if (pid == 0) { |
| 2089 pid_t my_pid = getpid(); |
| 2090 BPF_ASSERT_NE(-1, ptrace(PTRACE_TRACEME, -1, NULL, NULL)); |
| 2091 BPF_ASSERT_EQ(0, raise(SIGSTOP)); |
| 2092 SandboxBPF sandbox; |
| 2093 sandbox.SetSandboxPolicy(new TraceAllPolicy); |
| 2094 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); |
| 2095 |
| 2096 // getpid is allowed. |
| 2097 BPF_ASSERT_EQ(my_pid, syscall(__NR_getpid)); |
| 2098 |
| 2099 // write to stdout is skipped and returns a fake value. |
| 2100 BPF_ASSERT_EQ(kExpectedReturnValue, |
| 2101 syscall(__NR_write, STDOUT_FILENO, "A", 1)); |
| 2102 |
| 2103 // kill is rewritten to exit(kExpectedReturnValue). |
| 2104 syscall(__NR_kill, my_pid, SIGKILL); |
| 2105 |
| 2106 // Should not be reached. |
| 2107 BPF_ASSERT(false); |
| 2108 } |
| 2109 |
| 2110 int status; |
| 2111 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, WUNTRACED)) != -1); |
| 2112 BPF_ASSERT(WIFSTOPPED(status)); |
| 2113 |
| 2114 BPF_ASSERT_NE(-1, |
| 2115 ptrace(PTRACE_SETOPTIONS, |
| 2116 pid, |
| 2117 NULL, |
| 2118 reinterpret_cast<void*>(PTRACE_O_TRACESECCOMP))); |
| 2119 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); |
| 2120 while (true) { |
| 2121 BPF_ASSERT(HANDLE_EINTR(waitpid(pid, &status, 0)) != -1); |
| 2122 if (WIFEXITED(status) || WIFSIGNALED(status)) { |
| 2123 BPF_ASSERT(WIFEXITED(status)); |
| 2124 BPF_ASSERT_EQ(kExpectedReturnValue, WEXITSTATUS(status)); |
| 2125 break; |
| 2126 } |
| 2127 |
| 2128 if (!WIFSTOPPED(status) || WSTOPSIG(status) != SIGTRAP || |
| 2129 !IS_SECCOMP_EVENT(status)) { |
| 2130 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); |
| 2131 continue; |
| 2132 } |
| 2133 |
| 2134 unsigned long data; |
| 2135 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETEVENTMSG, pid, NULL, &data)); |
| 2136 BPF_ASSERT_EQ(kTraceData, data); |
| 2137 |
| 2138 regs_struct regs; |
| 2139 BPF_ASSERT_NE(-1, ptrace(PTRACE_GETREGS, pid, NULL, ®s)); |
| 2140 switch (SECCOMP_PT_SYSCALL(regs)) { |
| 2141 case __NR_write: |
| 2142 // Skip writes to stdout, make it return kExpectedReturnValue. Allow |
| 2143 // writes to stderr so that BPF_ASSERT messages show up. |
| 2144 if (SECCOMP_PT_PARM1(regs) == STDOUT_FILENO) { |
| 2145 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, -1)); |
| 2146 SECCOMP_PT_RESULT(regs) = kExpectedReturnValue; |
| 2147 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); |
| 2148 } |
| 2149 break; |
| 2150 |
| 2151 case __NR_kill: |
| 2152 // Rewrite to exit(kExpectedReturnValue). |
| 2153 BPF_ASSERT_NE(-1, SetSyscall(pid, ®s, __NR_exit)); |
| 2154 SECCOMP_PT_PARM1(regs) = kExpectedReturnValue; |
| 2155 BPF_ASSERT_NE(-1, ptrace(PTRACE_SETREGS, pid, NULL, ®s)); |
| 2156 break; |
| 2157 |
| 2158 default: |
| 2159 // Allow all other syscalls. |
| 2160 break; |
| 2161 } |
| 2162 |
| 2163 BPF_ASSERT_NE(-1, ptrace(PTRACE_CONT, pid, NULL, NULL)); |
| 2164 } |
| 2165 } |
| 2166 |
| 2167 // Android does not expose pread64 nor pwrite64. |
| 2168 #if !defined(OS_ANDROID) |
| 2169 |
| 2170 bool FullPwrite64(int fd, const char* buffer, size_t count, off64_t offset) { |
| 2171 while (count > 0) { |
| 2172 const ssize_t transfered = |
| 2173 HANDLE_EINTR(pwrite64(fd, buffer, count, offset)); |
| 2174 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { |
| 2175 return false; |
| 2176 } |
| 2177 count -= transfered; |
| 2178 buffer += transfered; |
| 2179 offset += transfered; |
| 2180 } |
| 2181 return true; |
| 2182 } |
| 2183 |
| 2184 bool FullPread64(int fd, char* buffer, size_t count, off64_t offset) { |
| 2185 while (count > 0) { |
| 2186 const ssize_t transfered = HANDLE_EINTR(pread64(fd, buffer, count, offset)); |
| 2187 if (transfered <= 0 || static_cast<size_t>(transfered) > count) { |
| 2188 return false; |
| 2189 } |
| 2190 count -= transfered; |
| 2191 buffer += transfered; |
| 2192 offset += transfered; |
| 2193 } |
| 2194 return true; |
| 2195 } |
| 2196 |
| 2197 bool pread_64_was_forwarded = false; |
| 2198 |
| 2199 class TrapPread64Policy : public SandboxBPFDSLPolicy { |
| 2200 public: |
| 2201 TrapPread64Policy() {} |
| 2202 virtual ~TrapPread64Policy() {} |
| 2203 |
| 2204 virtual ResultExpr EvaluateSyscall(int system_call_number) const override { |
| 2205 // Set the global environment for unsafe traps once. |
| 2206 if (system_call_number == MIN_SYSCALL) { |
| 2207 EnableUnsafeTraps(); |
| 2208 } |
| 2209 |
| 2210 if (system_call_number == __NR_pread64) { |
| 2211 return UnsafeTrap(ForwardPreadHandler, NULL); |
| 2212 } |
| 2213 return Allow(); |
| 2214 } |
| 2215 |
| 2216 private: |
| 2217 static intptr_t ForwardPreadHandler(const struct arch_seccomp_data& args, |
| 2218 void* aux) { |
| 2219 BPF_ASSERT(args.nr == __NR_pread64); |
| 2220 pread_64_was_forwarded = true; |
| 2221 |
| 2222 return SandboxBPF::ForwardSyscall(args); |
| 2223 } |
| 2224 |
| 2225 DISALLOW_COPY_AND_ASSIGN(TrapPread64Policy); |
| 2226 }; |
| 2227 |
| 2228 // pread(2) takes a 64 bits offset. On 32 bits systems, it will be split |
| 2229 // between two arguments. In this test, we make sure that ForwardSyscall() can |
| 2230 // forward it properly. |
| 2231 BPF_TEST_C(SandboxBPF, Pread64, TrapPread64Policy) { |
| 2232 ScopedTemporaryFile temp_file; |
| 2233 const uint64_t kLargeOffset = (static_cast<uint64_t>(1) << 32) | 0xBEEF; |
| 2234 const char kTestString[] = "This is a test!"; |
| 2235 BPF_ASSERT(FullPwrite64( |
| 2236 temp_file.fd(), kTestString, sizeof(kTestString), kLargeOffset)); |
| 2237 |
| 2238 char read_test_string[sizeof(kTestString)] = {0}; |
| 2239 BPF_ASSERT(FullPread64(temp_file.fd(), |
| 2240 read_test_string, |
| 2241 sizeof(read_test_string), |
| 2242 kLargeOffset)); |
| 2243 BPF_ASSERT_EQ(0, memcmp(kTestString, read_test_string, sizeof(kTestString))); |
| 2244 BPF_ASSERT(pread_64_was_forwarded); |
| 2245 } |
| 2246 |
| 2247 #endif // !defined(OS_ANDROID) |
| 2248 |
| 2249 void* TsyncApplyToTwoThreadsFunc(void* cond_ptr) { |
| 2250 base::WaitableEvent* event = static_cast<base::WaitableEvent*>(cond_ptr); |
| 2251 |
| 2252 // Wait for the main thread to signal that the filter has been applied. |
| 2253 if (!event->IsSignaled()) { |
| 2254 event->Wait(); |
| 2255 } |
| 2256 |
| 2257 BPF_ASSERT(event->IsSignaled()); |
| 2258 |
| 2259 BlacklistNanosleepPolicy::AssertNanosleepFails(); |
| 2260 |
| 2261 return NULL; |
| 2262 } |
| 2263 |
| 2264 SANDBOX_TEST(SandboxBPF, Tsync) { |
| 2265 if (SandboxBPF::SupportsSeccompThreadFilterSynchronization() != |
| 2266 SandboxBPF::STATUS_AVAILABLE) { |
| 2267 return; |
| 2268 } |
| 2269 |
| 2270 base::WaitableEvent event(true, false); |
| 2271 |
| 2272 // Create a thread on which to invoke the blocked syscall. |
| 2273 pthread_t thread; |
| 2274 BPF_ASSERT_EQ( |
| 2275 0, pthread_create(&thread, NULL, &TsyncApplyToTwoThreadsFunc, &event)); |
| 2276 |
| 2277 // Test that nanoseelp success. |
| 2278 const struct timespec ts = {0, 0}; |
| 2279 BPF_ASSERT_EQ(0, HANDLE_EINTR(syscall(__NR_nanosleep, &ts, NULL))); |
| 2280 |
| 2281 // Engage the sandbox. |
| 2282 SandboxBPF sandbox; |
| 2283 sandbox.SetSandboxPolicy(new BlacklistNanosleepPolicy()); |
| 2284 BPF_ASSERT(sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED)); |
| 2285 |
| 2286 // This thread should have the filter applied as well. |
| 2287 BlacklistNanosleepPolicy::AssertNanosleepFails(); |
| 2288 |
| 2289 // Signal the condition to invoke the system call. |
| 2290 event.Signal(); |
| 2291 |
| 2292 // Wait for the thread to finish. |
| 2293 BPF_ASSERT_EQ(0, pthread_join(thread, NULL)); |
| 2294 } |
| 2295 |
| 2296 class AllowAllPolicy : public SandboxBPFDSLPolicy { |
| 2297 public: |
| 2298 AllowAllPolicy() {} |
| 2299 virtual ~AllowAllPolicy() {} |
| 2300 |
| 2301 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 2302 return Allow(); |
| 2303 } |
| 2304 |
| 2305 private: |
| 2306 DISALLOW_COPY_AND_ASSIGN(AllowAllPolicy); |
| 2307 }; |
| 2308 |
| 2309 SANDBOX_DEATH_TEST( |
| 2310 SandboxBPF, |
| 2311 StartMultiThreadedAsSingleThreaded, |
| 2312 DEATH_MESSAGE("Cannot start sandbox; process is already multi-threaded")) { |
| 2313 base::Thread thread("sandbox.linux.StartMultiThreadedAsSingleThreaded"); |
| 2314 BPF_ASSERT(thread.Start()); |
| 2315 |
| 2316 SandboxBPF sandbox; |
| 2317 sandbox.SetSandboxPolicy(new AllowAllPolicy()); |
| 2318 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_SINGLE_THREADED)); |
| 2319 } |
| 2320 |
| 2321 // http://crbug.com/407357 |
| 2322 #if !defined(THREAD_SANITIZER) |
| 2323 SANDBOX_DEATH_TEST( |
| 2324 SandboxBPF, |
| 2325 StartSingleThreadedAsMultiThreaded, |
| 2326 DEATH_MESSAGE( |
| 2327 "Cannot start sandbox; process may be single-threaded when " |
| 2328 "reported as not")) { |
| 2329 SandboxBPF sandbox; |
| 2330 sandbox.SetSandboxPolicy(new AllowAllPolicy()); |
| 2331 BPF_ASSERT(!sandbox.StartSandbox(SandboxBPF::PROCESS_MULTI_THREADED)); |
| 2332 } |
| 2333 #endif // !defined(THREAD_SANITIZER) |
| 2334 |
| 2335 // A stub handler for the UnsafeTrap. Never called. |
| 2336 intptr_t NoOpHandler(const struct arch_seccomp_data& args, void*) { |
| 2337 return -1; |
| 2338 } |
| 2339 |
| 2340 class UnsafeTrapWithCondPolicy : public SandboxBPFDSLPolicy { |
| 2341 public: |
| 2342 UnsafeTrapWithCondPolicy() {} |
| 2343 virtual ~UnsafeTrapWithCondPolicy() {} |
| 2344 |
| 2345 virtual ResultExpr EvaluateSyscall(int sysno) const override { |
| 2346 DCHECK(SandboxBPF::IsValidSyscallNumber(sysno)); |
| 2347 setenv(kSandboxDebuggingEnv, "t", 0); |
| 2348 Die::SuppressInfoMessages(true); |
| 2349 |
| 2350 if (SandboxBPF::IsRequiredForUnsafeTrap(sysno)) |
| 2351 return Allow(); |
| 2352 |
| 2353 switch (sysno) { |
| 2354 case __NR_uname: { |
| 2355 const Arg<uint32_t> arg(0); |
| 2356 return If(arg == 0, Allow()).Else(Error(EPERM)); |
| 2357 } |
| 2358 case __NR_setgid: { |
| 2359 const Arg<uint32_t> arg(0); |
| 2360 return Switch(arg) |
| 2361 .Case(100, Error(ENOMEM)) |
| 2362 .Case(200, Error(ENOSYS)) |
| 2363 .Default(Error(EPERM)); |
| 2364 } |
| 2365 case __NR_close: |
| 2366 case __NR_exit_group: |
| 2367 case __NR_write: |
| 2368 return Allow(); |
| 2369 case __NR_getppid: |
| 2370 return UnsafeTrap(NoOpHandler, NULL); |
| 2371 default: |
| 2372 return Error(EPERM); |
| 2373 } |
| 2374 } |
| 2375 |
| 2376 private: |
| 2377 DISALLOW_COPY_AND_ASSIGN(UnsafeTrapWithCondPolicy); |
| 2378 }; |
| 2379 |
| 2380 BPF_TEST_C(SandboxBPF, UnsafeTrapWithCond, UnsafeTrapWithCondPolicy) { |
| 2381 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 0)); |
| 2382 BPF_ASSERT_EQ(EFAULT, errno); |
| 2383 |
| 2384 BPF_ASSERT_EQ(-1, syscall(__NR_uname, 1)); |
| 2385 BPF_ASSERT_EQ(EPERM, errno); |
| 2386 |
| 2387 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 100)); |
| 2388 BPF_ASSERT_EQ(ENOMEM, errno); |
| 2389 |
| 2390 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 200)); |
| 2391 BPF_ASSERT_EQ(ENOSYS, errno); |
| 2392 |
| 2393 BPF_ASSERT_EQ(-1, syscall(__NR_setgid, 300)); |
| 2394 BPF_ASSERT_EQ(EPERM, errno); |
| 2395 } |
| 2396 |
| 2397 } // namespace |
| 2398 |
| 2399 } // namespace bpf_dsl |
| 2400 } // namespace sandbox |
OLD | NEW |