OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <string.h> | 5 #include <string.h> |
6 | 6 |
7 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" | 7 #include "sandbox/linux/seccomp-bpf/sandbox_bpf.h" |
8 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h" | 8 #include "sandbox/linux/seccomp-bpf/sandbox_bpf_policy.h" |
9 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" | 9 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" |
10 #include "sandbox/linux/seccomp-bpf/verifier.h" | 10 #include "sandbox/linux/seccomp-bpf/verifier.h" |
11 | 11 |
12 | |
13 namespace { | 12 namespace { |
14 | 13 |
15 using playground2::ErrorCode; | 14 using playground2::ErrorCode; |
16 using playground2::Sandbox; | 15 using playground2::Sandbox; |
17 using playground2::Verifier; | 16 using playground2::Verifier; |
18 using playground2::arch_seccomp_data; | 17 using playground2::arch_seccomp_data; |
19 | 18 |
20 struct State { | 19 struct State { |
21 State(const std::vector<struct sock_filter>& p, | 20 State(const std::vector<struct sock_filter>& p, |
22 const struct arch_seccomp_data& d) : | 21 const struct arch_seccomp_data& d) |
23 program(p), | 22 : program(p), data(d), ip(0), accumulator(0), acc_is_valid(false) {} |
24 data(d), | |
25 ip(0), | |
26 accumulator(0), | |
27 acc_is_valid(false) { | |
28 } | |
29 const std::vector<struct sock_filter>& program; | 23 const std::vector<struct sock_filter>& program; |
30 const struct arch_seccomp_data& data; | 24 const struct arch_seccomp_data& data; |
31 unsigned int ip; | 25 unsigned int ip; |
32 uint32_t accumulator; | 26 uint32_t accumulator; |
33 bool acc_is_valid; | 27 bool acc_is_valid; |
34 | 28 |
35 private: | 29 private: |
36 DISALLOW_IMPLICIT_CONSTRUCTORS(State); | 30 DISALLOW_IMPLICIT_CONSTRUCTORS(State); |
37 }; | 31 }; |
38 | 32 |
39 uint32_t EvaluateErrorCode(Sandbox *sandbox, const ErrorCode& code, | 33 uint32_t EvaluateErrorCode(Sandbox* sandbox, |
34 const ErrorCode& code, | |
40 const struct arch_seccomp_data& data) { | 35 const struct arch_seccomp_data& data) { |
41 if (code.error_type() == ErrorCode::ET_SIMPLE || | 36 if (code.error_type() == ErrorCode::ET_SIMPLE || |
42 code.error_type() == ErrorCode::ET_TRAP) { | 37 code.error_type() == ErrorCode::ET_TRAP) { |
43 return code.err(); | 38 return code.err(); |
44 } else if (code.error_type() == ErrorCode::ET_COND) { | 39 } else if (code.error_type() == ErrorCode::ET_COND) { |
45 if (code.width() == ErrorCode::TP_32BIT && | 40 if (code.width() == ErrorCode::TP_32BIT && |
46 (data.args[code.argno()] >> 32) && | 41 (data.args[code.argno()] >> 32) && |
47 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) != | 42 (data.args[code.argno()] & 0xFFFFFFFF80000000ull) != |
48 0xFFFFFFFF80000000ull) { | 43 0xFFFFFFFF80000000ull) { |
49 return sandbox->Unexpected64bitArgument().err(); | 44 return sandbox->Unexpected64bitArgument().err(); |
50 } | 45 } |
51 switch (code.op()) { | 46 switch (code.op()) { |
52 case ErrorCode::OP_EQUAL: | 47 case ErrorCode::OP_EQUAL: |
53 return EvaluateErrorCode(sandbox, | 48 return EvaluateErrorCode(sandbox, |
54 (code.width() == ErrorCode::TP_32BIT | 49 (code.width() == ErrorCode::TP_32BIT |
55 ? uint32_t(data.args[code.argno()]) | 50 ? uint32_t(data.args[code.argno()]) |
Robert Sesek
2013/11/08 21:14:33
For another CL: nested ternary is unreadable.
jln (very slow on Chromium)
2013/11/08 21:22:37
Agreed!
| |
56 : data.args[code.argno()]) == code.value() | 51 : data.args[code.argno()]) == code.value() |
57 ? *code.passed() | 52 ? *code.passed() |
58 : *code.failed(), | 53 : *code.failed(), |
59 data); | 54 data); |
60 case ErrorCode::OP_HAS_ALL_BITS: | 55 case ErrorCode::OP_HAS_ALL_BITS: |
61 return EvaluateErrorCode(sandbox, | 56 return EvaluateErrorCode(sandbox, |
62 ((code.width() == ErrorCode::TP_32BIT | 57 ((code.width() == ErrorCode::TP_32BIT |
63 ? uint32_t(data.args[code.argno()]) | 58 ? uint32_t(data.args[code.argno()]) |
64 : data.args[code.argno()]) & code.value()) | 59 : data.args[code.argno()]) & |
65 == code.value() | 60 code.value()) == code.value() |
66 ? *code.passed() | 61 ? *code.passed() |
67 : *code.failed(), | 62 : *code.failed(), |
68 data); | 63 data); |
69 case ErrorCode::OP_HAS_ANY_BITS: | 64 case ErrorCode::OP_HAS_ANY_BITS: |
70 return EvaluateErrorCode(sandbox, | 65 return EvaluateErrorCode(sandbox, |
71 (code.width() == ErrorCode::TP_32BIT | 66 (code.width() == ErrorCode::TP_32BIT |
72 ? uint32_t(data.args[code.argno()]) | 67 ? uint32_t(data.args[code.argno()]) |
73 : data.args[code.argno()]) & code.value() | 68 : data.args[code.argno()]) & |
74 ? *code.passed() | 69 code.value() |
75 : *code.failed(), | 70 ? *code.passed() |
76 data); | 71 : *code.failed(), |
77 default: | 72 data); |
78 return SECCOMP_RET_INVALID; | 73 default: |
74 return SECCOMP_RET_INVALID; | |
79 } | 75 } |
80 } else { | 76 } else { |
81 return SECCOMP_RET_INVALID; | 77 return SECCOMP_RET_INVALID; |
82 } | 78 } |
83 } | 79 } |
84 | 80 |
85 bool VerifyErrorCode(Sandbox *sandbox, | 81 bool VerifyErrorCode(Sandbox* sandbox, |
86 const std::vector<struct sock_filter>& program, | 82 const std::vector<struct sock_filter>& program, |
87 struct arch_seccomp_data *data, | 83 struct arch_seccomp_data* data, |
88 const ErrorCode& root_code, | 84 const ErrorCode& root_code, |
89 const ErrorCode& code, | 85 const ErrorCode& code, |
90 const char **err) { | 86 const char** err) { |
91 if (code.error_type() == ErrorCode::ET_SIMPLE || | 87 if (code.error_type() == ErrorCode::ET_SIMPLE || |
92 code.error_type() == ErrorCode::ET_TRAP) { | 88 code.error_type() == ErrorCode::ET_TRAP) { |
93 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err); | 89 uint32_t computed_ret = Verifier::EvaluateBPF(program, *data, err); |
94 if (*err) { | 90 if (*err) { |
95 return false; | 91 return false; |
96 } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) { | 92 } else if (computed_ret != EvaluateErrorCode(sandbox, root_code, *data)) { |
97 // For efficiency's sake, we'd much rather compare "computed_ret" | 93 // For efficiency's sake, we'd much rather compare "computed_ret" |
98 // against "code.err()". This works most of the time, but it doesn't | 94 // against "code.err()". This works most of the time, but it doesn't |
99 // always work for nested conditional expressions. The test values | 95 // always work for nested conditional expressions. The test values |
100 // that we generate on the fly to probe expressions can trigger | 96 // that we generate on the fly to probe expressions can trigger |
101 // code flow decisions in multiple nodes of the decision tree, and the | 97 // code flow decisions in multiple nodes of the decision tree, and the |
102 // only way to compute the correct error code in that situation is by | 98 // only way to compute the correct error code in that situation is by |
103 // calling EvaluateErrorCode(). | 99 // calling EvaluateErrorCode(). |
104 *err = "Exit code from BPF program doesn't match"; | 100 *err = "Exit code from BPF program doesn't match"; |
105 return false; | 101 return false; |
106 } | 102 } |
107 } else if (code.error_type() == ErrorCode::ET_COND) { | 103 } else if (code.error_type() == ErrorCode::ET_COND) { |
108 if (code.argno() < 0 || code.argno() >= 6) { | 104 if (code.argno() < 0 || code.argno() >= 6) { |
109 *err = "Invalid argument number in error code"; | 105 *err = "Invalid argument number in error code"; |
110 return false; | 106 return false; |
111 } | 107 } |
112 switch (code.op()) { | 108 switch (code.op()) { |
113 case ErrorCode::OP_EQUAL: | 109 case ErrorCode::OP_EQUAL: |
114 // Verify that we can check a 32bit value (or the LSB of a 64bit value) | 110 // Verify that we can check a 32bit value (or the LSB of a 64bit value) |
115 // for equality. | 111 // for equality. |
116 data->args[code.argno()] = code.value(); | 112 data->args[code.argno()] = code.value(); |
117 if (!VerifyErrorCode(sandbox, program, data, root_code, | 113 if (!VerifyErrorCode( |
118 *code.passed(), err)) { | 114 sandbox, program, data, root_code, *code.passed(), err)) { |
119 return false; | 115 return false; |
120 } | |
121 | |
122 // Change the value to no longer match and verify that this is detected | |
123 // as an inequality. | |
124 data->args[code.argno()] = code.value() ^ 0x55AA55AA; | |
125 if (!VerifyErrorCode(sandbox, program, data, root_code, | |
126 *code.failed(), err)) { | |
127 return false; | |
128 } | |
129 | |
130 // BPF programs can only ever operate on 32bit values. So, we have | |
131 // generated additional BPF instructions that inspect the MSB. Verify | |
132 // that they behave as intended. | |
133 if (code.width() == ErrorCode::TP_32BIT) { | |
134 if (code.value() >> 32) { | |
135 SANDBOX_DIE("Invalid comparison of a 32bit system call argument " | |
136 "against a 64bit constant; this test is always false."); | |
137 } | 116 } |
138 | 117 |
139 // If the system call argument was intended to be a 32bit parameter, | 118 // Change the value to no longer match and verify that this is detected |
140 // verify that it is a fatal error if a 64bit value is ever passed | 119 // as an inequality. |
141 // here. | 120 data->args[code.argno()] = code.value() ^ 0x55AA55AA; |
142 data->args[code.argno()] = 0x100000000ull; | 121 if (!VerifyErrorCode( |
143 if (!VerifyErrorCode(sandbox, program, data, root_code, | 122 sandbox, program, data, root_code, *code.failed(), err)) { |
144 sandbox->Unexpected64bitArgument(), | |
145 err)) { | |
146 return false; | 123 return false; |
147 } | 124 } |
148 } else { | 125 |
149 // If the system call argument was intended to be a 64bit parameter, | 126 // BPF programs can only ever operate on 32bit values. So, we have |
150 // verify that we can handle (in-)equality for the MSB. This is | 127 // generated additional BPF instructions that inspect the MSB. Verify |
151 // essentially the same test that we did earlier for the LSB. | 128 // that they behave as intended. |
152 // We only need to verify the behavior of the inequality test. We | 129 if (code.width() == ErrorCode::TP_32BIT) { |
153 // know that the equality test already passed, as unlike the kernel | 130 if (code.value() >> 32) { |
154 // the Verifier does operate on 64bit quantities. | 131 SANDBOX_DIE( |
155 data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull; | 132 "Invalid comparison of a 32bit system call argument " |
156 if (!VerifyErrorCode(sandbox, program, data, root_code, | 133 "against a 64bit constant; this test is always false."); |
157 *code.failed(), err)) { | 134 } |
158 return false; | 135 |
136 // If the system call argument was intended to be a 32bit parameter, | |
137 // verify that it is a fatal error if a 64bit value is ever passed | |
138 // here. | |
139 data->args[code.argno()] = 0x100000000ull; | |
140 if (!VerifyErrorCode(sandbox, | |
141 program, | |
142 data, | |
143 root_code, | |
144 sandbox->Unexpected64bitArgument(), | |
145 err)) { | |
146 return false; | |
147 } | |
148 } else { | |
149 // If the system call argument was intended to be a 64bit parameter, | |
150 // verify that we can handle (in-)equality for the MSB. This is | |
151 // essentially the same test that we did earlier for the LSB. | |
152 // We only need to verify the behavior of the inequality test. We | |
153 // know that the equality test already passed, as unlike the kernel | |
154 // the Verifier does operate on 64bit quantities. | |
155 data->args[code.argno()] = code.value() ^ 0x55AA55AA00000000ull; | |
156 if (!VerifyErrorCode( | |
157 sandbox, program, data, root_code, *code.failed(), err)) { | |
158 return false; | |
159 } | |
159 } | 160 } |
160 } | 161 break; |
161 break; | 162 case ErrorCode::OP_HAS_ALL_BITS: |
162 case ErrorCode::OP_HAS_ALL_BITS: | 163 case ErrorCode::OP_HAS_ANY_BITS: |
163 case ErrorCode::OP_HAS_ANY_BITS: | 164 // A comprehensive test of bit values is difficult and potentially |
164 // A comprehensive test of bit values is difficult and potentially rather | 165 // rather |
165 // time-expensive. We avoid doing so at run-time and instead rely on the | 166 // time-expensive. We avoid doing so at run-time and instead rely on the |
166 // unittest for full testing. The test that we have here covers just the | 167 // unittest for full testing. The test that we have here covers just the |
167 // common cases. We test against the bitmask itself, all zeros and all | 168 // common cases. We test against the bitmask itself, all zeros and all |
168 // ones. | 169 // ones. |
169 { | 170 { |
170 // Testing "any" bits against a zero mask is always false. So, there | 171 // Testing "any" bits against a zero mask is always false. So, there |
171 // are some cases, where we expect tests to take the "failed()" branch | 172 // are some cases, where we expect tests to take the "failed()" branch |
172 // even though this is a test that normally should take "passed()". | 173 // even though this is a test that normally should take "passed()". |
173 const ErrorCode& passed = | 174 const ErrorCode& passed = |
174 (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) || | 175 (!code.value() && code.op() == ErrorCode::OP_HAS_ANY_BITS) || |
175 | 176 |
176 // On a 32bit system, it is impossible to pass a 64bit value as a | 177 // On a 32bit system, it is impossible to pass a 64bit |
177 // system call argument. So, some additional tests always evaluate | 178 // value as a |
178 // as false. | 179 // system call argument. So, some additional tests always |
179 ((code.value() & ~uint64_t(uintptr_t(-1))) && | 180 // evaluate |
180 code.op() == ErrorCode::OP_HAS_ALL_BITS) || | 181 // as false. |
181 (code.value() && !(code.value() & uintptr_t(-1)) && | 182 ((code.value() & ~uint64_t(uintptr_t(-1))) && |
182 code.op() == ErrorCode::OP_HAS_ANY_BITS) | 183 code.op() == ErrorCode::OP_HAS_ALL_BITS) || |
184 (code.value() && !(code.value() & uintptr_t(-1)) && | |
185 code.op() == ErrorCode::OP_HAS_ANY_BITS) | |
186 ? *code.failed() | |
187 : *code.passed(); | |
183 | 188 |
184 ? *code.failed() : *code.passed(); | 189 // Similary, testing for "all" bits in a zero mask is always true. So, |
190 // some cases pass despite them normally failing. | |
191 const ErrorCode& failed = | |
192 !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS | |
193 ? *code.passed() | |
194 : *code.failed(); | |
185 | 195 |
186 // Similary, testing for "all" bits in a zero mask is always true. So, | 196 data->args[code.argno()] = code.value() & uintptr_t(-1); |
187 // some cases pass despite them normally failing. | 197 if (!VerifyErrorCode( |
188 const ErrorCode& failed = | 198 sandbox, program, data, root_code, passed, err)) { |
189 !code.value() && code.op() == ErrorCode::OP_HAS_ALL_BITS | 199 return false; |
190 ? *code.passed() : *code.failed(); | 200 } |
191 | 201 data->args[code.argno()] = uintptr_t(-1); |
192 data->args[code.argno()] = code.value() & uintptr_t(-1); | 202 if (!VerifyErrorCode( |
193 if (!VerifyErrorCode(sandbox, program, data, root_code, passed, err)) { | 203 sandbox, program, data, root_code, passed, err)) { |
194 return false; | 204 return false; |
205 } | |
206 data->args[code.argno()] = 0; | |
207 if (!VerifyErrorCode( | |
208 sandbox, program, data, root_code, failed, err)) { | |
209 return false; | |
210 } | |
195 } | 211 } |
196 data->args[code.argno()] = uintptr_t(-1); | 212 break; |
197 if (!VerifyErrorCode(sandbox, program, data, root_code, passed, err)) { | 213 default: // TODO(markus): Need to add support for OP_GREATER |
198 return false; | 214 *err = "Unsupported operation in conditional error code"; |
199 } | 215 return false; |
200 data->args[code.argno()] = 0; | |
201 if (!VerifyErrorCode(sandbox, program, data, root_code, failed, err)) { | |
202 return false; | |
203 } | |
204 } | |
205 break; | |
206 default: // TODO(markus): Need to add support for OP_GREATER | |
207 *err = "Unsupported operation in conditional error code"; | |
208 return false; | |
209 } | 216 } |
210 } else { | 217 } else { |
211 *err = "Attempting to return invalid error code from BPF program"; | 218 *err = "Attempting to return invalid error code from BPF program"; |
212 return false; | 219 return false; |
213 } | 220 } |
214 return true; | 221 return true; |
215 } | 222 } |
216 | 223 |
217 void Ld(State *state, const struct sock_filter& insn, const char **err) { | 224 void Ld(State* state, const struct sock_filter& insn, const char** err) { |
218 if (BPF_SIZE(insn.code) != BPF_W || | 225 if (BPF_SIZE(insn.code) != BPF_W || BPF_MODE(insn.code) != BPF_ABS) { |
219 BPF_MODE(insn.code) != BPF_ABS) { | |
220 *err = "Invalid BPF_LD instruction"; | 226 *err = "Invalid BPF_LD instruction"; |
221 return; | 227 return; |
222 } | 228 } |
223 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) { | 229 if (insn.k < sizeof(struct arch_seccomp_data) && (insn.k & 3) == 0) { |
224 // We only allow loading of properly aligned 32bit quantities. | 230 // We only allow loading of properly aligned 32bit quantities. |
225 memcpy(&state->accumulator, | 231 memcpy(&state->accumulator, |
226 reinterpret_cast<const char *>(&state->data) + insn.k, | 232 reinterpret_cast<const char*>(&state->data) + insn.k, |
227 4); | 233 4); |
228 } else { | 234 } else { |
229 *err = "Invalid operand in BPF_LD instruction"; | 235 *err = "Invalid operand in BPF_LD instruction"; |
230 return; | 236 return; |
231 } | 237 } |
232 state->acc_is_valid = true; | 238 state->acc_is_valid = true; |
233 return; | 239 return; |
234 } | 240 } |
235 | 241 |
236 void Jmp(State *state, const struct sock_filter& insn, const char **err) { | 242 void Jmp(State* state, const struct sock_filter& insn, const char** err) { |
237 if (BPF_OP(insn.code) == BPF_JA) { | 243 if (BPF_OP(insn.code) == BPF_JA) { |
238 if (state->ip + insn.k + 1 >= state->program.size() || | 244 if (state->ip + insn.k + 1 >= state->program.size() || |
239 state->ip + insn.k + 1 <= state->ip) { | 245 state->ip + insn.k + 1 <= state->ip) { |
240 compilation_failure: | 246 compilation_failure: |
241 *err = "Invalid BPF_JMP instruction"; | 247 *err = "Invalid BPF_JMP instruction"; |
242 return; | 248 return; |
243 } | 249 } |
244 state->ip += insn.k; | 250 state->ip += insn.k; |
245 } else { | 251 } else { |
246 if (BPF_SRC(insn.code) != BPF_K || | 252 if (BPF_SRC(insn.code) != BPF_K || !state->acc_is_valid || |
247 !state->acc_is_valid || | |
248 state->ip + insn.jt + 1 >= state->program.size() || | 253 state->ip + insn.jt + 1 >= state->program.size() || |
249 state->ip + insn.jf + 1 >= state->program.size()) { | 254 state->ip + insn.jf + 1 >= state->program.size()) { |
250 goto compilation_failure; | 255 goto compilation_failure; |
251 } | 256 } |
252 switch (BPF_OP(insn.code)) { | 257 switch (BPF_OP(insn.code)) { |
253 case BPF_JEQ: | 258 case BPF_JEQ: |
254 if (state->accumulator == insn.k) { | 259 if (state->accumulator == insn.k) { |
255 state->ip += insn.jt; | 260 state->ip += insn.jt; |
256 } else { | 261 } else { |
257 state->ip += insn.jf; | 262 state->ip += insn.jf; |
258 } | 263 } |
259 break; | 264 break; |
260 case BPF_JGT: | 265 case BPF_JGT: |
261 if (state->accumulator > insn.k) { | 266 if (state->accumulator > insn.k) { |
262 state->ip += insn.jt; | 267 state->ip += insn.jt; |
263 } else { | 268 } else { |
264 state->ip += insn.jf; | 269 state->ip += insn.jf; |
265 } | 270 } |
266 break; | 271 break; |
267 case BPF_JGE: | 272 case BPF_JGE: |
268 if (state->accumulator >= insn.k) { | 273 if (state->accumulator >= insn.k) { |
269 state->ip += insn.jt; | 274 state->ip += insn.jt; |
270 } else { | 275 } else { |
271 state->ip += insn.jf; | 276 state->ip += insn.jf; |
272 } | 277 } |
273 break; | 278 break; |
274 case BPF_JSET: | 279 case BPF_JSET: |
275 if (state->accumulator & insn.k) { | 280 if (state->accumulator & insn.k) { |
276 state->ip += insn.jt; | 281 state->ip += insn.jt; |
277 } else { | 282 } else { |
278 state->ip += insn.jf; | 283 state->ip += insn.jf; |
279 } | 284 } |
280 break; | 285 break; |
281 default: | 286 default: |
282 goto compilation_failure; | 287 goto compilation_failure; |
283 } | 288 } |
284 } | 289 } |
285 } | 290 } |
286 | 291 |
287 uint32_t Ret(State *, const struct sock_filter& insn, const char **err) { | 292 uint32_t Ret(State*, const struct sock_filter& insn, const char** err) { |
288 if (BPF_SRC(insn.code) != BPF_K) { | 293 if (BPF_SRC(insn.code) != BPF_K) { |
289 *err = "Invalid BPF_RET instruction"; | 294 *err = "Invalid BPF_RET instruction"; |
290 return 0; | 295 return 0; |
291 } | 296 } |
292 return insn.k; | 297 return insn.k; |
293 } | 298 } |
294 | 299 |
295 void Alu(State *state, const struct sock_filter& insn, const char **err) { | 300 void Alu(State* state, const struct sock_filter& insn, const char** err) { |
296 if (BPF_OP(insn.code) == BPF_NEG) { | 301 if (BPF_OP(insn.code) == BPF_NEG) { |
297 state->accumulator = -state->accumulator; | 302 state->accumulator = -state->accumulator; |
298 return; | 303 return; |
299 } else { | 304 } else { |
300 if (BPF_SRC(insn.code) != BPF_K) { | 305 if (BPF_SRC(insn.code) != BPF_K) { |
301 *err = "Unexpected source operand in arithmetic operation"; | 306 *err = "Unexpected source operand in arithmetic operation"; |
302 return; | 307 return; |
303 } | 308 } |
304 switch (BPF_OP(insn.code)) { | 309 switch (BPF_OP(insn.code)) { |
305 case BPF_ADD: | 310 case BPF_ADD: |
306 state->accumulator += insn.k; | 311 state->accumulator += insn.k; |
307 break; | |
308 case BPF_SUB: | |
309 state->accumulator -= insn.k; | |
310 break; | |
311 case BPF_MUL: | |
312 state->accumulator *= insn.k; | |
313 break; | |
314 case BPF_DIV: | |
315 if (!insn.k) { | |
316 *err = "Illegal division by zero"; | |
317 break; | 312 break; |
318 } | 313 case BPF_SUB: |
319 state->accumulator /= insn.k; | 314 state->accumulator -= insn.k; |
320 break; | |
321 case BPF_MOD: | |
322 if (!insn.k) { | |
323 *err = "Illegal division by zero"; | |
324 break; | 315 break; |
325 } | 316 case BPF_MUL: |
326 state->accumulator %= insn.k; | 317 state->accumulator *= insn.k; |
327 break; | |
328 case BPF_OR: | |
329 state->accumulator |= insn.k; | |
330 break; | |
331 case BPF_XOR: | |
332 state->accumulator ^= insn.k; | |
333 break; | |
334 case BPF_AND: | |
335 state->accumulator &= insn.k; | |
336 break; | |
337 case BPF_LSH: | |
338 if (insn.k > 32) { | |
339 *err = "Illegal shift operation"; | |
340 break; | 318 break; |
341 } | 319 case BPF_DIV: |
342 state->accumulator <<= insn.k; | 320 if (!insn.k) { |
343 break; | 321 *err = "Illegal division by zero"; |
344 case BPF_RSH: | 322 break; |
345 if (insn.k > 32) { | 323 } |
346 *err = "Illegal shift operation"; | 324 state->accumulator /= insn.k; |
347 break; | 325 break; |
348 } | 326 case BPF_MOD: |
349 state->accumulator >>= insn.k; | 327 if (!insn.k) { |
350 break; | 328 *err = "Illegal division by zero"; |
351 default: | 329 break; |
352 *err = "Invalid operator in arithmetic operation"; | 330 } |
353 break; | 331 state->accumulator %= insn.k; |
332 break; | |
333 case BPF_OR: | |
334 state->accumulator |= insn.k; | |
335 break; | |
336 case BPF_XOR: | |
337 state->accumulator ^= insn.k; | |
338 break; | |
339 case BPF_AND: | |
340 state->accumulator &= insn.k; | |
341 break; | |
342 case BPF_LSH: | |
343 if (insn.k > 32) { | |
344 *err = "Illegal shift operation"; | |
345 break; | |
346 } | |
347 state->accumulator <<= insn.k; | |
348 break; | |
349 case BPF_RSH: | |
350 if (insn.k > 32) { | |
351 *err = "Illegal shift operation"; | |
352 break; | |
353 } | |
354 state->accumulator >>= insn.k; | |
355 break; | |
356 default: | |
357 *err = "Invalid operator in arithmetic operation"; | |
358 break; | |
354 } | 359 } |
355 } | 360 } |
356 } | 361 } |
357 | 362 |
358 } // namespace | 363 } // namespace |
359 | 364 |
360 namespace playground2 { | 365 namespace playground2 { |
361 | 366 |
362 bool Verifier::VerifyBPF(Sandbox *sandbox, | 367 bool Verifier::VerifyBPF(Sandbox* sandbox, |
363 const std::vector<struct sock_filter>& program, | 368 const std::vector<struct sock_filter>& program, |
364 const SandboxBpfPolicy& policy, | 369 const SandboxBpfPolicy& policy, |
365 const char **err) { | 370 const char** err) { |
366 *err = NULL; | 371 *err = NULL; |
367 for (SyscallIterator iter(false); !iter.Done(); ) { | 372 for (SyscallIterator iter(false); !iter.Done();) { |
368 uint32_t sysnum = iter.Next(); | 373 uint32_t sysnum = iter.Next(); |
369 // We ideally want to iterate over the full system call range and values | 374 // We ideally want to iterate over the full system call range and values |
370 // just above and just below this range. This gives us the full result set | 375 // just above and just below this range. This gives us the full result set |
371 // of the "evaluators". | 376 // of the "evaluators". |
372 // On Intel systems, this can fail in a surprising way, as a cleared bit 30 | 377 // On Intel systems, this can fail in a surprising way, as a cleared bit 30 |
373 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And | 378 // indicates either i386 or x86-64; and a set bit 30 indicates x32. And |
374 // unless we pay attention to setting this bit correctly, an early check in | 379 // unless we pay attention to setting this bit correctly, an early check in |
375 // our BPF program will make us fail with a misleading error code. | 380 // our BPF program will make us fail with a misleading error code. |
376 struct arch_seccomp_data data = { static_cast<int>(sysnum), | 381 struct arch_seccomp_data data = {static_cast<int>(sysnum), |
377 static_cast<uint32_t>(SECCOMP_ARCH) }; | 382 static_cast<uint32_t>(SECCOMP_ARCH)}; |
378 #if defined(__i386__) || defined(__x86_64__) | 383 #if defined(__i386__) || defined(__x86_64__) |
379 #if defined(__x86_64__) && defined(__ILP32__) | 384 #if defined(__x86_64__) && defined(__ILP32__) |
380 if (!(sysnum & 0x40000000u)) { | 385 if (!(sysnum & 0x40000000u)) { |
381 continue; | 386 continue; |
382 } | 387 } |
383 #else | 388 #else |
384 if (sysnum & 0x40000000u) { | 389 if (sysnum & 0x40000000u) { |
385 continue; | 390 continue; |
386 } | 391 } |
387 #endif | 392 #endif |
388 #endif | 393 #endif |
389 ErrorCode code = policy.EvaluateSyscall(sandbox, sysnum); | 394 ErrorCode code = policy.EvaluateSyscall(sandbox, sysnum); |
390 if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) { | 395 if (!VerifyErrorCode(sandbox, program, &data, code, code, err)) { |
391 return false; | 396 return false; |
392 } | 397 } |
393 } | 398 } |
394 return true; | 399 return true; |
395 } | 400 } |
396 | 401 |
397 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program, | 402 uint32_t Verifier::EvaluateBPF(const std::vector<struct sock_filter>& program, |
398 const struct arch_seccomp_data& data, | 403 const struct arch_seccomp_data& data, |
399 const char **err) { | 404 const char** err) { |
400 *err = NULL; | 405 *err = NULL; |
401 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) { | 406 if (program.size() < 1 || program.size() >= SECCOMP_MAX_PROGRAM_SIZE) { |
402 *err = "Invalid program length"; | 407 *err = "Invalid program length"; |
403 return 0; | 408 return 0; |
404 } | 409 } |
405 for (State state(program, data); !*err; ++state.ip) { | 410 for (State state(program, data); !*err; ++state.ip) { |
406 if (state.ip >= program.size()) { | 411 if (state.ip >= program.size()) { |
407 *err = "Invalid instruction pointer in BPF program"; | 412 *err = "Invalid instruction pointer in BPF program"; |
408 break; | 413 break; |
409 } | 414 } |
410 const struct sock_filter& insn = program[state.ip]; | 415 const struct sock_filter& insn = program[state.ip]; |
411 switch (BPF_CLASS(insn.code)) { | 416 switch (BPF_CLASS(insn.code)) { |
412 case BPF_LD: | 417 case BPF_LD: |
413 Ld(&state, insn, err); | 418 Ld(&state, insn, err); |
414 break; | |
415 case BPF_JMP: | |
416 Jmp(&state, insn, err); | |
417 break; | |
418 case BPF_RET: { | |
419 uint32_t r = Ret(&state, insn, err); | |
420 switch (r & SECCOMP_RET_ACTION) { | |
421 case SECCOMP_RET_TRAP: | |
422 case SECCOMP_RET_ERRNO: | |
423 case SECCOMP_RET_ALLOW: | |
424 break; | 419 break; |
425 case SECCOMP_RET_KILL: // We don't ever generate this | 420 case BPF_JMP: |
426 case SECCOMP_RET_TRACE: // We don't ever generate this | 421 Jmp(&state, insn, err); |
427 case SECCOMP_RET_INVALID: // Should never show up in BPF program | 422 break; |
423 case BPF_RET: { | |
424 uint32_t r = Ret(&state, insn, err); | |
425 switch (r & SECCOMP_RET_ACTION) { | |
426 case SECCOMP_RET_TRAP: | |
427 case SECCOMP_RET_ERRNO: | |
428 case SECCOMP_RET_ALLOW: | |
429 break; | |
430 case SECCOMP_RET_KILL: // We don't ever generate this | |
431 case SECCOMP_RET_TRACE: // We don't ever generate this | |
432 case SECCOMP_RET_INVALID: // Should never show up in BPF program | |
433 default: | |
434 *err = "Unexpected return code found in BPF program"; | |
435 return 0; | |
436 } | |
437 return r; | |
438 } | |
439 case BPF_ALU: | |
440 Alu(&state, insn, err); | |
441 break; | |
428 default: | 442 default: |
429 *err = "Unexpected return code found in BPF program"; | 443 *err = "Unexpected instruction in BPF program"; |
430 return 0; | 444 break; |
431 } | |
432 return r; } | |
433 case BPF_ALU: | |
434 Alu(&state, insn, err); | |
435 break; | |
436 default: | |
437 *err = "Unexpected instruction in BPF program"; | |
438 break; | |
439 } | 445 } |
440 } | 446 } |
441 return 0; | 447 return 0; |
442 } | 448 } |
443 | 449 |
444 } // namespace | 450 } // namespace |
OLD | NEW |