OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "sandbox/linux/seccomp-bpf/verifier.h" |
| 6 |
| 7 #include <string.h> |
| 8 |
| 9 #include <limits> |
| 10 |
| 11 #include "sandbox/linux/bpf_dsl/bpf_dsl.h" |
| 12 #include "sandbox/linux/bpf_dsl/bpf_dsl_impl.h" |
| 13 #include "sandbox/linux/bpf_dsl/policy_compiler.h" |
| 14 #include "sandbox/linux/seccomp-bpf/errorcode.h" |
| 15 #include "sandbox/linux/seccomp-bpf/linux_seccomp.h" |
| 16 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" |
| 17 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" |
| 18 |
| 19 namespace sandbox { |
| 20 |
| 21 namespace { |
| 22 |
| 23 const uint64_t kLower32Bits = std::numeric_limits<uint32_t>::max(); |
| 24 const uint64_t kUpper32Bits = static_cast<uint64_t>(kLower32Bits) << 32; |
| 25 const uint64_t kFull64Bits = std::numeric_limits<uint64_t>::max(); |
| 26 |
| 27 struct State { |
| 28 State(const std::vector<struct sock_filter>& p, |
| 29 const struct arch_seccomp_data& d) |
| 30 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {} |
| 31 const std::vector<struct sock_filter>& program; |
| 32 const struct arch_seccomp_data& data; |
| 33 unsigned int ip; |
| 34 uint32_t accumulator; |
| 35 bool acc_is_valid; |
| 36 |
| 37 private: |
| 38 DISALLOW_IMPLICIT_CONSTRUCTORS(State); |
| 39 }; |
| 40 |
| 41 uint32_t EvaluateErrorCode(bpf_dsl::PolicyCompiler* compiler, |
| 42 const ErrorCode& code, |
| 43 const struct arch_seccomp_data& data) { |
| 44 if (code.error_type() == ErrorCode::ET_SIMPLE || |
| 45 code.error_type() == ErrorCode::ET_TRAP) { |
| 46 return code.err(); |
| 47 } else if (code.error_type() == ErrorCode::ET_COND) { |
| 48 if (code.width() == ErrorCode::TP_32BIT && |
| 49 (data.args[code.argno()] >> 32) && |
| 50 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) != |
| 51 0xFFFFFFFF80000000ull) { |
| 52 return compiler->Unexpected64bitArgument().err(); |
| 53 } |
| 54 bool equal = (data.args[code.argno()] & code.mask()) == code.value(); |
| 55 return EvaluateErrorCode( |
| 56 compiler, equal ? *code.passed() : *code.failed(), data); |
| 57 } else { |
| 58 return SECCOMP_RET_INVALID; |
| 59 } |
| 60 } |
| 61 |
| 62 bool VerifyErrorCode(bpf_dsl::PolicyCompiler* compiler, |
| 63 const std::vector<struct sock_filter>& program, |
| 64 struct arch_seccomp_data* data, |
| 65 const ErrorCode& root_code, |
| 66 const ErrorCode& code, |
| 67 const char** err) { |
| 68 if (code.error_type() == ErrorCode::ET_SIMPLE || |
| 69 code.error_type() == ErrorCode::ET_TRAP) { |
| 70 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err); |
| 71 if (*err) { |
| 72 return false; |
| 73 } else if (computed_ret != EvaluateErrorCode(compiler, root_code, *data)) { |
| 74 // For efficiency's sake, we'd much rather compare "computed_ret" |
| 75 // against "code.err()". This works most of the time, but it doesn't |
| 76 // always work for nested conditional expressions. The test values |
| 77 // that we generate on the fly to probe expressions can trigger |
| 78 // code flow decisions in multiple nodes of the decision tree, and the |
| 79 // only way to compute the correct error code in that situation is by |
| 80 // calling EvaluateErrorCode(). |
| 81 *err = "Exit code from BPF program doesn't match"; |
| 82 return false; |
| 83 } |
| 84 } else if (code.error_type() == ErrorCode::ET_COND) { |
| 85 if (code.argno() < 0 || code.argno() >= 6) { |
| 86 *err = "Invalid argument number in error code"; |
| 87 return false; |
| 88 } |
| 89 |
| 90 // TODO(mdempsky): The test values generated here try to provide good |
| 91 // coverage for generated BPF instructions while avoiding combinatorial |
| 92 // explosion on large policies. Ideally we would instead take a fuzzing-like |
| 93 // approach and generate a bounded number of test cases regardless of policy |
| 94 // size. |
| 95 |
| 96 // Verify that we can check a value for simple equality. |
| 97 data->args[code.argno()] = code.value(); |
| 98 if (!VerifyErrorCode( |
| 99 compiler, program, data, root_code, *code.passed(), err)) { |
| 100 return false; |
| 101 } |
| 102 |
| 103 // If mask ignores any bits, verify that setting those bits is still |
| 104 // detected as equality. |
| 105 uint64_t ignored_bits = ~code.mask(); |
| 106 if (code.width() == ErrorCode::TP_32BIT) { |
| 107 ignored_bits = static_cast<uint32_t>(ignored_bits); |
| 108 } |
| 109 if ((ignored_bits & kLower32Bits) != 0) { |
| 110 data->args[code.argno()] = code.value() | (ignored_bits & kLower32Bits); |
| 111 if (!VerifyErrorCode( |
| 112 compiler, program, data, root_code, *code.passed(), err)) { |
| 113 return false; |
| 114 } |
| 115 } |
| 116 if ((ignored_bits & kUpper32Bits) != 0) { |
| 117 data->args[code.argno()] = code.value() | (ignored_bits & kUpper32Bits); |
| 118 if (!VerifyErrorCode( |
| 119 compiler, program, data, root_code, *code.passed(), err)) { |
| 120 return false; |
| 121 } |
| 122 } |
| 123 |
| 124 // Verify that changing bits included in the mask is detected as inequality. |
| 125 if ((code.mask() & kLower32Bits) != 0) { |
| 126 data->args[code.argno()] = code.value() ^ (code.mask() & kLower32Bits); |
| 127 if (!VerifyErrorCode( |
| 128 compiler, program, data, root_code, *code.failed(), err)) { |
| 129 return false; |
| 130 } |
| 131 } |
| 132 if ((code.mask() & kUpper32Bits) != 0) { |
| 133 data->args[code.argno()] = code.value() ^ (code.mask() & kUpper32Bits); |
| 134 if (!VerifyErrorCode( |
| 135 compiler, program, data, root_code, *code.failed(), err)) { |
| 136 return false; |
| 137 } |
| 138 } |
| 139 |
| 140 if (code.width() == ErrorCode::TP_32BIT) { |
| 141 // For 32-bit system call arguments, we emit additional instructions to |
| 142 // validate the upper 32-bits. Here we test that validation. |
| 143 |
| 144 // Arbitrary 64-bit values should be rejected. |
| 145 data->args[code.argno()] = 1ULL << 32; |
| 146 if (!VerifyErrorCode(compiler, |
| 147 program, |
| 148 data, |
| 149 root_code, |
| 150 compiler->Unexpected64bitArgument(), |
| 151 err)) { |
| 152 return false; |
| 153 } |
| 154 |
| 155 // Upper 32-bits set without the MSB of the lower 32-bits set should be |
| 156 // rejected too. |
| 157 data->args[code.argno()] = kUpper32Bits; |
| 158 if (!VerifyErrorCode(compiler, |
| 159 program, |
| 160 data, |
| 161 root_code, |
| 162 compiler->Unexpected64bitArgument(), |
| 163 err)) { |
| 164 return false; |
| 165 } |
| 166 } |
| 167 } else { |
| 168 *err = "Attempting to return invalid error code from BPF program"; |
| 169 return false; |
| 170 } |
| 171 return true; |
| 172 } |
| 173 |
| 174 void Ld(State* state, const struct sock_filter& insn, const char** err) { |
| 175 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS || |
| 176 insn.jt != 0 || insn.jf != 0) { |
| 177 *err = "Invalid BPF_LD instruction"; |
| 178 return; |
| 179 } |
| 180 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) { |
| 181 // We only allow loading of properly aligned 32bit quantities. |
| 182 memcpy(&state->accumulator, |
| 183 reinterpret_cast<const char*>(&state->data) + insn.k, |
| 184 4); |
| 185 } else { |
| 186 *err = "Invalid operand in BPF_LD instruction"; |
| 187 return; |
| 188 } |
| 189 state->acc_is_valid = true; |
| 190 return; |
| 191 } |
| 192 |
| 193 void Jmp(State* state, const struct sock_filter& insn, const char** err) { |
| 194 if (BPF_OP(insn.code) == BPF_JA) { |
| 195 if (state->ip + insn.k + 1 >= state->program.size() || |
| 196 state->ip + insn.k + 1 <= state->ip) { |
| 197 compilation_failure: |
| 198 *err = "Invalid BPF_JMP instruction"; |
| 199 return; |
| 200 } |
| 201 state->ip += insn.k; |
| 202 } else { |
| 203 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid || |
| 204 state->ip + insn.jt + 1 >= state->program.size() || |
| 205 state->ip + insn.jf + 1 >= state->program.size()) { |
| 206 goto compilation_failure; |
| 207 } |
| 208 switch (BPF_OP(insn.code)) { |
| 209 case BPF_JEQ: |
| 210 if (state->accumulator == insn.k) { |
| 211 state->ip += insn.jt; |
| 212 } else { |
| 213 state->ip += insn.jf; |
| 214 } |
| 215 break; |
| 216 case BPF_JGT: |
| 217 if (state->accumulator > insn.k) { |
| 218 state->ip += insn.jt; |
| 219 } else { |
| 220 state->ip += insn.jf; |
| 221 } |
| 222 break; |
| 223 case BPF_JGE: |
| 224 if (state->accumulator >= insn.k) { |
| 225 state->ip += insn.jt; |
| 226 } else { |
| 227 state->ip += insn.jf; |
| 228 } |
| 229 break; |
| 230 case BPF_JSET: |
| 231 if (state->accumulator & insn.k) { |
| 232 state->ip += insn.jt; |
| 233 } else { |
| 234 state->ip += insn.jf; |
| 235 } |
| 236 break; |
| 237 default: |
| 238 goto compilation_failure; |
| 239 } |
| 240 } |
| 241 } |
| 242 |
| 243 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) { |
| 244 if (BPF_SRC(insn.code) != BPF_K) { |
| 245 *err = "Invalid BPF_RET instruction"; |
| 246 return 0; |
| 247 } |
| 248 return insn.k; |
| 249 } |
| 250 |
| 251 void Alu(State* state, const struct sock_filter& insn, const char** err) { |
| 252 if (BPF_OP(insn.code) == BPF_NEG) { |
| 253 state->accumulator = -state->accumulator; |
| 254 return; |
| 255 } else { |
| 256 if (BPF_SRC(insn.code) != BPF_K) { |
| 257 *err = "Unexpected source operand in arithmetic operation"; |
| 258 return; |
| 259 } |
| 260 switch (BPF_OP(insn.code)) { |
| 261 case BPF_ADD: |
| 262 state->accumulator += insn.k; |
| 263 break; |
| 264 case BPF_SUB: |
| 265 state->accumulator -= insn.k; |
| 266 break; |
| 267 case BPF_MUL: |
| 268 state->accumulator *= insn.k; |
| 269 break; |
| 270 case BPF_DIV: |
| 271 if (!insn.k) { |
| 272 *err = "Illegal division by zero"; |
| 273 break; |
| 274 } |
| 275 state->accumulator /= insn.k; |
| 276 break; |
| 277 case BPF_MOD: |
| 278 if (!insn.k) { |
| 279 *err = "Illegal division by zero"; |
| 280 break; |
| 281 } |
| 282 state->accumulator %= insn.k; |
| 283 break; |
| 284 case BPF_OR: |
| 285 state->accumulator |= insn.k; |
| 286 break; |
| 287 case BPF_XOR: |
| 288 state->accumulator ^= insn.k; |
| 289 break; |
| 290 case BPF_AND: |
| 291 state->accumulator &= insn.k; |
| 292 break; |
| 293 case BPF_LSH: |
| 294 if (insn.k > 32) { |
| 295 *err = "Illegal shift operation"; |
| 296 break; |
| 297 } |
| 298 state->accumulator <<= insn.k; |
| 299 break; |
| 300 case BPF_RSH: |
| 301 if (insn.k > 32) { |
| 302 *err = "Illegal shift operation"; |
| 303 break; |
| 304 } |
| 305 state->accumulator >>= insn.k; |
| 306 break; |
| 307 default: |
| 308 *err = "Invalid operator in arithmetic operation"; |
| 309 break; |
| 310 } |
| 311 } |
| 312 } |
| 313 |
| 314 } // namespace |
| 315 |
| 316 bool Verifier::VerifyBPF(bpf_dsl::PolicyCompiler* compiler, |
| 317 const std::vector<struct sock_filter>& program, |
| 318 const bpf_dsl::SandboxBPFDSLPolicy& policy, |
| 319 const char** err) { |
| 320 *err = NULL; |
| 321 for (uint32_t sysnum : SyscallSet::All()) { |
| 322 // We ideally want to iterate over the full system call range and values |
| 323 // just above and just below this range. This gives us the full result set |
| 324 // of the "evaluators". |
| 325 // On Intel systems, this can fail in a surprising way, as a cleared bit 30 |
| 326 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And |
| 327 // unless we pay attention to setting this bit correctly, an early check in |
| 328 // our BPF program will make us fail with a misleading error code. |
| 329 struct arch_seccomp_data data = {static_cast<int>(sysnum), |
| 330 static_cast<uint32_t>(SECCOMP_ARCH)}; |
| 331 #if defined(__i386__) || defined(__x86_64__) |
| 332 #if defined(__x86_64__) && defined(__ILP32__) |
| 333 if (!(sysnum & 0x40000000u)) { |
| 334 continue; |
| 335 } |
| 336 #else |
| 337 if (sysnum & 0x40000000u) { |
| 338 continue; |
| 339 } |
| 340 #endif |
| 341 #endif |
| 342 ErrorCode code = SyscallSet::IsValid(sysnum) |
| 343 ? policy.EvaluateSyscall(sysnum)->Compile(compiler) |
| 344 : policy.InvalidSyscall()->Compile(compiler); |
| 345 if (!VerifyErrorCode(compiler, program, &data, code, code, err)) { |
| 346 return false; |
| 347 } |
| 348 } |
| 349 return true; |
| 350 } |
| 351 |
| 352 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program, |
| 353 const struct arch_seccomp_data& data, |
| 354 const char** err) { |
| 355 *err = NULL; |
| 356 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) { |
| 357 *err = "Invalid program length"; |
| 358 return 0; |
| 359 } |
| 360 for (State state(program, data); !*err; ++state.ip) { |
| 361 if (state.ip >= program.size()) { |
| 362 *err = "Invalid instruction pointer in BPF program"; |
| 363 break; |
| 364 } |
| 365 const struct sock_filter& insn = program[state.ip]; |
| 366 switch (BPF_CLASS(insn.code)) { |
| 367 case BPF_LD: |
| 368 Ld(&state, insn, err); |
| 369 break; |
| 370 case BPF_JMP: |
| 371 Jmp(&state, insn, err); |
| 372 break; |
| 373 case BPF_RET: { |
| 374 uint32_t r = Ret(&state, insn, err); |
| 375 switch (r & SECCOMP_RET_ACTION) { |
| 376 case SECCOMP_RET_TRAP: |
| 377 case SECCOMP_RET_ERRNO: |
| 378 case SECCOMP_RET_TRACE: |
| 379 case SECCOMP_RET_ALLOW: |
| 380 break; |
| 381 case SECCOMP_RET_KILL: // We don't ever generate this |
| 382 case SECCOMP_RET_INVALID: // Should never show up in BPF program |
| 383 default: |
| 384 *err = "Unexpected return code found in BPF program"; |
| 385 return 0; |
| 386 } |
| 387 return r; |
| 388 } |
| 389 case BPF_ALU: |
| 390 Alu(&state, insn, err); |
| 391 break; |
| 392 default: |
| 393 *err = "Unexpected instruction in BPF program"; |
| 394 break; |
| 395 } |
| 396 } |
| 397 return 0; |
| 398 } |
| 399 |
| 400 } // namespace sandbox |
OLD | NEW |