OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Some headers on Android are missing cdefs: crbug.com/172337. | 5 // Some headers on Android are missing cdefs: crbug.com/172337. |
6 // (We can't use OS_ANDROID here since build_config.h is not included). | 6 // (We can't use OS_ANDROID here since build_config.h is not included). |
7 #if defined(ANDROID) | 7 #if defined(ANDROID) |
8 #include <sys/cdefs.h> | 8 #include <sys/cdefs.h> |
9 #endif | 9 #endif |
10 | 10 |
(...skipping 20 matching lines...) Expand all Loading... |
31 #include "sandbox/linux/seccomp-bpf/syscall.h" | 31 #include "sandbox/linux/seccomp-bpf/syscall.h" |
32 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" | 32 #include "sandbox/linux/seccomp-bpf/syscall_iterator.h" |
33 #include "sandbox/linux/seccomp-bpf/verifier.h" | 33 #include "sandbox/linux/seccomp-bpf/verifier.h" |
34 | 34 |
35 namespace playground2 { | 35 namespace playground2 { |
36 | 36 |
37 namespace { | 37 namespace { |
38 | 38 |
39 const int kExpectedExitCode = 100; | 39 const int kExpectedExitCode = 100; |
40 | 40 |
41 template<class T> int popcount(T x); | 41 template <class T> |
42 template<> int popcount<unsigned int>(unsigned int x) { | 42 int popcount(T x); |
| 43 template <> |
| 44 int popcount<unsigned int>(unsigned int x) { |
43 return __builtin_popcount(x); | 45 return __builtin_popcount(x); |
44 } | 46 } |
45 template<> int popcount<unsigned long>(unsigned long x) { | 47 template <> |
| 48 int popcount<unsigned long>(unsigned long x) { |
46 return __builtin_popcountl(x); | 49 return __builtin_popcountl(x); |
47 } | 50 } |
48 template<> int popcount<unsigned long long>(unsigned long long x) { | 51 template <> |
| 52 int popcount<unsigned long long>(unsigned long long x) { |
49 return __builtin_popcountll(x); | 53 return __builtin_popcountll(x); |
50 } | 54 } |
51 | 55 |
52 void WriteFailedStderrSetupMessage(int out_fd) { | 56 void WriteFailedStderrSetupMessage(int out_fd) { |
53 const char* error_string = strerror(errno); | 57 const char* error_string = strerror(errno); |
54 static const char msg[] = "You have reproduced a puzzling issue.\n" | 58 static const char msg[] = |
55 "Please, report to crbug.com/152530!\n" | 59 "You have reproduced a puzzling issue.\n" |
56 "Failed to set up stderr: "; | 60 "Please, report to crbug.com/152530!\n" |
57 if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg)-1)) > 0 && error_string && | 61 "Failed to set up stderr: "; |
| 62 if (HANDLE_EINTR(write(out_fd, msg, sizeof(msg) - 1)) > 0 && error_string && |
58 HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 && | 63 HANDLE_EINTR(write(out_fd, error_string, strlen(error_string))) > 0 && |
59 HANDLE_EINTR(write(out_fd, "\n", 1))) { | 64 HANDLE_EINTR(write(out_fd, "\n", 1))) { |
60 } | 65 } |
61 } | 66 } |
62 | 67 |
63 // We define a really simple sandbox policy. It is just good enough for us | 68 // We define a really simple sandbox policy. It is just good enough for us |
64 // to tell that the sandbox has actually been activated. | 69 // to tell that the sandbox has actually been activated. |
65 ErrorCode ProbeEvaluator(Sandbox *, int sysnum, void *) __attribute__((const)); | 70 ErrorCode ProbeEvaluator(Sandbox*, int sysnum, void*) __attribute__((const)); |
66 ErrorCode ProbeEvaluator(Sandbox *, int sysnum, void *) { | 71 ErrorCode ProbeEvaluator(Sandbox*, int sysnum, void*) { |
67 switch (sysnum) { | 72 switch (sysnum) { |
68 case __NR_getpid: | 73 case __NR_getpid: |
69 // Return EPERM so that we can check that the filter actually ran. | 74 // Return EPERM so that we can check that the filter actually ran. |
70 return ErrorCode(EPERM); | 75 return ErrorCode(EPERM); |
71 case __NR_exit_group: | 76 case __NR_exit_group: |
72 // Allow exit() with a non-default return code. | 77 // Allow exit() with a non-default return code. |
73 return ErrorCode(ErrorCode::ERR_ALLOWED); | 78 return ErrorCode(ErrorCode::ERR_ALLOWED); |
74 default: | 79 default: |
75 // Make everything else fail in an easily recognizable way. | 80 // Make everything else fail in an easily recognizable way. |
76 return ErrorCode(EINVAL); | 81 return ErrorCode(EINVAL); |
77 } | 82 } |
78 } | 83 } |
79 | 84 |
80 void ProbeProcess(void) { | 85 void ProbeProcess(void) { |
81 if (syscall(__NR_getpid) < 0 && errno == EPERM) { | 86 if (syscall(__NR_getpid) < 0 && errno == EPERM) { |
82 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); | 87 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); |
83 } | 88 } |
84 } | 89 } |
85 | 90 |
86 ErrorCode AllowAllEvaluator(Sandbox *, int sysnum, void *) { | 91 ErrorCode AllowAllEvaluator(Sandbox*, int sysnum, void*) { |
87 if (!Sandbox::IsValidSyscallNumber(sysnum)) { | 92 if (!Sandbox::IsValidSyscallNumber(sysnum)) { |
88 return ErrorCode(ENOSYS); | 93 return ErrorCode(ENOSYS); |
89 } | 94 } |
90 return ErrorCode(ErrorCode::ERR_ALLOWED); | 95 return ErrorCode(ErrorCode::ERR_ALLOWED); |
91 } | 96 } |
92 | 97 |
93 void TryVsyscallProcess(void) { | 98 void TryVsyscallProcess(void) { |
94 time_t current_time; | 99 time_t current_time; |
95 // time() is implemented as a vsyscall. With an older glibc, with | 100 // time() is implemented as a vsyscall. With an older glibc, with |
96 // vsyscall=emulate and some versions of the seccomp BPF patch | 101 // vsyscall=emulate and some versions of the seccomp BPF patch |
97 // we may get SIGKILL-ed. Detect this! | 102 // we may get SIGKILL-ed. Detect this! |
98 if (time(¤t_time) != static_cast<time_t>(-1)) { | 103 if (time(¤t_time) != static_cast<time_t>(-1)) { |
99 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); | 104 syscall(__NR_exit_group, static_cast<intptr_t>(kExpectedExitCode)); |
100 } | 105 } |
101 } | 106 } |
102 | 107 |
103 bool IsSingleThreaded(int proc_fd) { | 108 bool IsSingleThreaded(int proc_fd) { |
104 if (proc_fd < 0) { | 109 if (proc_fd < 0) { |
105 // Cannot determine whether program is single-threaded. Hope for | 110 // Cannot determine whether program is single-threaded. Hope for |
106 // the best... | 111 // the best... |
107 return true; | 112 return true; |
108 } | 113 } |
109 | 114 |
110 struct stat sb; | 115 struct stat sb; |
111 int task = -1; | 116 int task = -1; |
112 if ((task = openat(proc_fd, "self/task", O_RDONLY|O_DIRECTORY)) < 0 || | 117 if ((task = openat(proc_fd, "self/task", O_RDONLY | O_DIRECTORY)) < 0 || |
113 fstat(task, &sb) != 0 || | 118 fstat(task, &sb) != 0 || sb.st_nlink != 3 || HANDLE_EINTR(close(task))) { |
114 sb.st_nlink != 3 || | |
115 HANDLE_EINTR(close(task))) { | |
116 if (task >= 0) { | 119 if (task >= 0) { |
117 if (HANDLE_EINTR(close(task))) { } | 120 if (HANDLE_EINTR(close(task))) { |
| 121 } |
118 } | 122 } |
119 return false; | 123 return false; |
120 } | 124 } |
121 return true; | 125 return true; |
122 } | 126 } |
123 | 127 |
124 bool IsDenied(const ErrorCode& code) { | 128 bool IsDenied(const ErrorCode& code) { |
125 return (code.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP || | 129 return (code.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP || |
126 (code.err() >= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MIN_ERRNO) && | 130 (code.err() >= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MIN_ERRNO) && |
127 code.err() <= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MAX_ERRNO)); | 131 code.err() <= (SECCOMP_RET_ERRNO + ErrorCode::ERR_MAX_ERRNO)); |
128 } | 132 } |
129 | 133 |
130 // Function that can be passed as a callback function to CodeGen::Traverse(). | 134 // Function that can be passed as a callback function to CodeGen::Traverse(). |
131 // Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it | 135 // Checks whether the "insn" returns an UnsafeTrap() ErrorCode. If so, it |
132 // sets the "bool" variable pointed to by "aux". | 136 // sets the "bool" variable pointed to by "aux". |
133 void CheckForUnsafeErrorCodes(Instruction *insn, void *aux) { | 137 void CheckForUnsafeErrorCodes(Instruction* insn, void* aux) { |
134 bool *is_unsafe = static_cast<bool *>(aux); | 138 bool* is_unsafe = static_cast<bool*>(aux); |
135 if (!*is_unsafe) { | 139 if (!*is_unsafe) { |
136 if (BPF_CLASS(insn->code) == BPF_RET && | 140 if (BPF_CLASS(insn->code) == BPF_RET && insn->k > SECCOMP_RET_TRAP && |
137 insn->k > SECCOMP_RET_TRAP && | |
138 insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) { | 141 insn->k - SECCOMP_RET_TRAP <= SECCOMP_RET_DATA) { |
139 const ErrorCode& err = | 142 const ErrorCode& err = |
140 Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA); | 143 Trap::ErrorCodeFromTrapId(insn->k & SECCOMP_RET_DATA); |
141 if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) { | 144 if (err.error_type() != ErrorCode::ET_INVALID && !err.safe()) { |
142 *is_unsafe = true; | 145 *is_unsafe = true; |
143 } | 146 } |
144 } | 147 } |
145 } | 148 } |
146 } | 149 } |
147 | 150 |
148 // A Trap() handler that returns an "errno" value. The value is encoded | 151 // A Trap() handler that returns an "errno" value. The value is encoded |
149 // in the "aux" parameter. | 152 // in the "aux" parameter. |
150 intptr_t ReturnErrno(const struct arch_seccomp_data&, void *aux) { | 153 intptr_t ReturnErrno(const struct arch_seccomp_data&, void* aux) { |
151 // TrapFnc functions report error by following the native kernel convention | 154 // TrapFnc functions report error by following the native kernel convention |
152 // of returning an exit code in the range of -1..-4096. They do not try to | 155 // of returning an exit code in the range of -1..-4096. They do not try to |
153 // set errno themselves. The glibc wrapper that triggered the SIGSYS will | 156 // set errno themselves. The glibc wrapper that triggered the SIGSYS will |
154 // ultimately do so for us. | 157 // ultimately do so for us. |
155 int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA; | 158 int err = reinterpret_cast<intptr_t>(aux) & SECCOMP_RET_DATA; |
156 return -err; | 159 return -err; |
157 } | 160 } |
158 | 161 |
159 // Function that can be passed as a callback function to CodeGen::Traverse(). | 162 // Function that can be passed as a callback function to CodeGen::Traverse(). |
160 // Checks whether the "insn" returns an errno value from a BPF filter. If so, | 163 // Checks whether the "insn" returns an errno value from a BPF filter. If so, |
161 // it rewrites the instruction to instead call a Trap() handler that does | 164 // it rewrites the instruction to instead call a Trap() handler that does |
162 // the same thing. "aux" is ignored. | 165 // the same thing. "aux" is ignored. |
163 void RedirectToUserspace(Instruction *insn, void *aux) { | 166 void RedirectToUserspace(Instruction* insn, void* aux) { |
164 // When inside an UnsafeTrap() callback, we want to allow all system calls. | 167 // When inside an UnsafeTrap() callback, we want to allow all system calls. |
165 // This means, we must conditionally disable the sandbox -- and that's not | 168 // This means, we must conditionally disable the sandbox -- and that's not |
166 // something that kernel-side BPF filters can do, as they cannot inspect | 169 // something that kernel-side BPF filters can do, as they cannot inspect |
167 // any state other than the syscall arguments. | 170 // any state other than the syscall arguments. |
168 // But if we redirect all error handlers to user-space, then we can easily | 171 // But if we redirect all error handlers to user-space, then we can easily |
169 // make this decision. | 172 // make this decision. |
170 // The performance penalty for this extra round-trip to user-space is not | 173 // The performance penalty for this extra round-trip to user-space is not |
171 // actually that bad, as we only ever pay it for denied system calls; and a | 174 // actually that bad, as we only ever pay it for denied system calls; and a |
172 // typical program has very few of these. | 175 // typical program has very few of these. |
173 Sandbox *sandbox = static_cast<Sandbox *>(aux); | 176 Sandbox* sandbox = static_cast<Sandbox*>(aux); |
174 if (BPF_CLASS(insn->code) == BPF_RET && | 177 if (BPF_CLASS(insn->code) == BPF_RET && |
175 (insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { | 178 (insn->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { |
176 insn->k = sandbox->Trap(ReturnErrno, | 179 insn->k = sandbox->Trap(ReturnErrno, |
177 reinterpret_cast<void *>(insn->k & SECCOMP_RET_DATA)).err(); | 180 reinterpret_cast<void*>(insn->k & SECCOMP_RET_DATA)).err(); |
178 } | 181 } |
179 } | 182 } |
180 | 183 |
181 // This wraps an existing policy and changes its behavior to match the changes | 184 // This wraps an existing policy and changes its behavior to match the changes |
182 // made by RedirectToUserspace(). This is part of the framework that allows BPF | 185 // made by RedirectToUserspace(). This is part of the framework that allows BPF |
183 // evaluation in userland. | 186 // evaluation in userland. |
184 // TODO(markus): document the code inside better. | 187 // TODO(markus): document the code inside better. |
185 class RedirectToUserSpacePolicyWrapper : public SandboxBpfPolicy { | 188 class RedirectToUserSpacePolicyWrapper : public SandboxBpfPolicy { |
186 public: | 189 public: |
187 explicit RedirectToUserSpacePolicyWrapper( | 190 explicit RedirectToUserSpacePolicyWrapper( |
188 const SandboxBpfPolicy* wrapped_policy) | 191 const SandboxBpfPolicy* wrapped_policy) |
189 : wrapped_policy_(wrapped_policy) { | 192 : wrapped_policy_(wrapped_policy) { |
190 DCHECK(wrapped_policy_); | 193 DCHECK(wrapped_policy_); |
191 } | 194 } |
192 | 195 |
193 virtual ErrorCode EvaluateSyscall(Sandbox* sandbox_compiler, | 196 virtual ErrorCode EvaluateSyscall(Sandbox* sandbox_compiler, |
194 int system_call_number) const OVERRIDE { | 197 int system_call_number) const OVERRIDE { |
195 ErrorCode err = | 198 ErrorCode err = |
196 wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number); | 199 wrapped_policy_->EvaluateSyscall(sandbox_compiler, system_call_number); |
197 if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { | 200 if ((err.err() & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) { |
198 return sandbox_compiler->Trap(ReturnErrno, | 201 return sandbox_compiler->Trap( |
199 reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA)); | 202 ReturnErrno, reinterpret_cast<void*>(err.err() & SECCOMP_RET_DATA)); |
200 } | 203 } |
201 return err; | 204 return err; |
202 } | 205 } |
203 | 206 |
204 private: | 207 private: |
205 const SandboxBpfPolicy* wrapped_policy_; | 208 const SandboxBpfPolicy* wrapped_policy_; |
206 DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper); | 209 DISALLOW_COPY_AND_ASSIGN(RedirectToUserSpacePolicyWrapper); |
207 }; | 210 }; |
208 | 211 |
209 intptr_t BpfFailure(const struct arch_seccomp_data&, void *aux) { | 212 intptr_t BpfFailure(const struct arch_seccomp_data&, void* aux) { |
210 SANDBOX_DIE(static_cast<char *>(aux)); | 213 SANDBOX_DIE(static_cast<char*>(aux)); |
211 } | 214 } |
212 | 215 |
213 // This class allows compatibility with the old, deprecated SetSandboxPolicy. | 216 // This class allows compatibility with the old, deprecated SetSandboxPolicy. |
214 class CompatibilityPolicy : public SandboxBpfPolicy { | 217 class CompatibilityPolicy : public SandboxBpfPolicy { |
215 public: | 218 public: |
216 CompatibilityPolicy(Sandbox::EvaluateSyscall syscall_evaluator, void* aux) | 219 CompatibilityPolicy(Sandbox::EvaluateSyscall syscall_evaluator, void* aux) |
217 : syscall_evaluator_(syscall_evaluator), | 220 : syscall_evaluator_(syscall_evaluator), aux_(aux) { |
218 aux_(aux) { DCHECK(syscall_evaluator_); } | 221 DCHECK(syscall_evaluator_); |
| 222 } |
219 | 223 |
220 virtual ErrorCode EvaluateSyscall(Sandbox* sandbox_compiler, | 224 virtual ErrorCode EvaluateSyscall(Sandbox* sandbox_compiler, |
221 int system_call_number) const OVERRIDE { | 225 int system_call_number) const OVERRIDE { |
222 return syscall_evaluator_(sandbox_compiler, system_call_number, aux_); | 226 return syscall_evaluator_(sandbox_compiler, system_call_number, aux_); |
223 } | 227 } |
224 | 228 |
225 private: | 229 private: |
226 Sandbox::EvaluateSyscall syscall_evaluator_; | 230 Sandbox::EvaluateSyscall syscall_evaluator_; |
227 void* aux_; | 231 void* aux_; |
228 DISALLOW_COPY_AND_ASSIGN(CompatibilityPolicy); | 232 DISALLOW_COPY_AND_ASSIGN(CompatibilityPolicy); |
229 }; | 233 }; |
230 | 234 |
231 } // namespace | 235 } // namespace |
232 | 236 |
233 Sandbox::Sandbox() | 237 Sandbox::Sandbox() |
234 : quiet_(false), | 238 : quiet_(false), |
235 proc_fd_(-1), | 239 proc_fd_(-1), |
236 conds_(new Conds), | 240 conds_(new Conds), |
237 sandbox_has_started_(false) { | 241 sandbox_has_started_(false) {} |
238 } | |
239 | 242 |
240 Sandbox::~Sandbox() { | 243 Sandbox::~Sandbox() { |
241 // It is generally unsafe to call any memory allocator operations or to even | 244 // It is generally unsafe to call any memory allocator operations or to even |
242 // call arbitrary destructors after having installed a new policy. We just | 245 // call arbitrary destructors after having installed a new policy. We just |
243 // have no way to tell whether this policy would allow the system calls that | 246 // have no way to tell whether this policy would allow the system calls that |
244 // the constructors can trigger. | 247 // the constructors can trigger. |
245 // So, we normally destroy all of our complex state prior to starting the | 248 // So, we normally destroy all of our complex state prior to starting the |
246 // sandbox. But this won't happen, if the Sandbox object was created and | 249 // sandbox. But this won't happen, if the Sandbox object was created and |
247 // never actually used to set up a sandbox. So, just in case, we are | 250 // never actually used to set up a sandbox. So, just in case, we are |
248 // destroying any remaining state. | 251 // destroying any remaining state. |
249 // The "if ()" statements are technically superfluous. But let's be explicit | 252 // The "if ()" statements are technically superfluous. But let's be explicit |
250 // that we really don't want to run any code, when we already destroyed | 253 // that we really don't want to run any code, when we already destroyed |
251 // objects before setting up the sandbox. | 254 // objects before setting up the sandbox. |
252 if (conds_) { | 255 if (conds_) { |
253 delete conds_; | 256 delete conds_; |
254 } | 257 } |
255 } | 258 } |
256 | 259 |
257 bool Sandbox::IsValidSyscallNumber(int sysnum) { | 260 bool Sandbox::IsValidSyscallNumber(int sysnum) { |
258 return SyscallIterator::IsValid(sysnum); | 261 return SyscallIterator::IsValid(sysnum); |
259 } | 262 } |
260 | 263 |
261 | |
262 bool Sandbox::RunFunctionInPolicy(void (*code_in_sandbox)(), | 264 bool Sandbox::RunFunctionInPolicy(void (*code_in_sandbox)(), |
263 Sandbox::EvaluateSyscall syscall_evaluator, | 265 Sandbox::EvaluateSyscall syscall_evaluator, |
264 void *aux) { | 266 void* aux) { |
265 // Block all signals before forking a child process. This prevents an | 267 // Block all signals before forking a child process. This prevents an |
266 // attacker from manipulating our test by sending us an unexpected signal. | 268 // attacker from manipulating our test by sending us an unexpected signal. |
267 sigset_t old_mask, new_mask; | 269 sigset_t old_mask, new_mask; |
268 if (sigfillset(&new_mask) || | 270 if (sigfillset(&new_mask) || sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) { |
269 sigprocmask(SIG_BLOCK, &new_mask, &old_mask)) { | |
270 SANDBOX_DIE("sigprocmask() failed"); | 271 SANDBOX_DIE("sigprocmask() failed"); |
271 } | 272 } |
272 int fds[2]; | 273 int fds[2]; |
273 if (pipe2(fds, O_NONBLOCK|O_CLOEXEC)) { | 274 if (pipe2(fds, O_NONBLOCK | O_CLOEXEC)) { |
274 SANDBOX_DIE("pipe() failed"); | 275 SANDBOX_DIE("pipe() failed"); |
275 } | 276 } |
276 | 277 |
277 if (fds[0] <= 2 || fds[1] <= 2) { | 278 if (fds[0] <= 2 || fds[1] <= 2) { |
278 SANDBOX_DIE("Process started without standard file descriptors"); | 279 SANDBOX_DIE("Process started without standard file descriptors"); |
279 } | 280 } |
280 | 281 |
281 pid_t pid = fork(); | 282 pid_t pid = fork(); |
282 if (pid < 0) { | 283 if (pid < 0) { |
283 // Die if we cannot fork(). We would probably fail a little later | 284 // Die if we cannot fork(). We would probably fail a little later |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
353 | 354 |
354 // If we fail to support sandboxing, there might be an additional | 355 // If we fail to support sandboxing, there might be an additional |
355 // error message. If so, this was an entirely unexpected and fatal | 356 // error message. If so, this was an entirely unexpected and fatal |
356 // failure. We should report the failure and somebody must fix | 357 // failure. We should report the failure and somebody must fix |
357 // things. This is probably a security-critical bug in the sandboxing | 358 // things. This is probably a security-critical bug in the sandboxing |
358 // code. | 359 // code. |
359 if (!rc) { | 360 if (!rc) { |
360 char buf[4096]; | 361 char buf[4096]; |
361 ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1)); | 362 ssize_t len = HANDLE_EINTR(read(fds[0], buf, sizeof(buf) - 1)); |
362 if (len > 0) { | 363 if (len > 0) { |
363 while (len > 1 && buf[len-1] == '\n') { | 364 while (len > 1 && buf[len - 1] == '\n') { |
364 --len; | 365 --len; |
365 } | 366 } |
366 buf[len] = '\000'; | 367 buf[len] = '\000'; |
367 SANDBOX_DIE(buf); | 368 SANDBOX_DIE(buf); |
368 } | 369 } |
369 } | 370 } |
370 if (HANDLE_EINTR(close(fds[0]))) { | 371 if (HANDLE_EINTR(close(fds[0]))) { |
371 SANDBOX_DIE("close() failed"); | 372 SANDBOX_DIE("close() failed"); |
372 } | 373 } |
373 | 374 |
374 return rc; | 375 return rc; |
375 } | 376 } |
376 | 377 |
377 bool Sandbox::KernelSupportSeccompBPF() { | 378 bool Sandbox::KernelSupportSeccompBPF() { |
378 return | 379 return RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) && |
379 RunFunctionInPolicy(ProbeProcess, ProbeEvaluator, 0) && | 380 RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0); |
380 RunFunctionInPolicy(TryVsyscallProcess, AllowAllEvaluator, 0); | |
381 } | 381 } |
382 | 382 |
383 Sandbox::SandboxStatus Sandbox::SupportsSeccompSandbox(int proc_fd) { | 383 Sandbox::SandboxStatus Sandbox::SupportsSeccompSandbox(int proc_fd) { |
384 // It the sandbox is currently active, we clearly must have support for | 384 // It the sandbox is currently active, we clearly must have support for |
385 // sandboxing. | 385 // sandboxing. |
386 if (status_ == STATUS_ENABLED) { | 386 if (status_ == STATUS_ENABLED) { |
387 return status_; | 387 return status_; |
388 } | 388 } |
389 | 389 |
390 // Even if the sandbox was previously available, something might have | 390 // Even if the sandbox was previously available, something might have |
(...skipping 23 matching lines...) Expand all Loading... |
414 if (status_ == STATUS_UNKNOWN) { | 414 if (status_ == STATUS_UNKNOWN) { |
415 // We create our own private copy of a "Sandbox" object. This ensures that | 415 // We create our own private copy of a "Sandbox" object. This ensures that |
416 // the object does not have any policies configured, that might interfere | 416 // the object does not have any policies configured, that might interfere |
417 // with the tests done by "KernelSupportSeccompBPF()". | 417 // with the tests done by "KernelSupportSeccompBPF()". |
418 Sandbox sandbox; | 418 Sandbox sandbox; |
419 | 419 |
420 // By setting "quiet_ = true" we suppress messages for expected and benign | 420 // By setting "quiet_ = true" we suppress messages for expected and benign |
421 // failures (e.g. if the current kernel lacks support for BPF filters). | 421 // failures (e.g. if the current kernel lacks support for BPF filters). |
422 sandbox.quiet_ = true; | 422 sandbox.quiet_ = true; |
423 sandbox.set_proc_fd(proc_fd); | 423 sandbox.set_proc_fd(proc_fd); |
424 status_ = sandbox.KernelSupportSeccompBPF() | 424 status_ = sandbox.KernelSupportSeccompBPF() ? STATUS_AVAILABLE |
425 ? STATUS_AVAILABLE : STATUS_UNSUPPORTED; | 425 : STATUS_UNSUPPORTED; |
426 | 426 |
427 // As we are performing our tests from a child process, the run-time | 427 // As we are performing our tests from a child process, the run-time |
428 // environment that is visible to the sandbox is always guaranteed to be | 428 // environment that is visible to the sandbox is always guaranteed to be |
429 // single-threaded. Let's check here whether the caller is single- | 429 // single-threaded. Let's check here whether the caller is single- |
430 // threaded. Otherwise, we mark the sandbox as temporarily unavailable. | 430 // threaded. Otherwise, we mark the sandbox as temporarily unavailable. |
431 if (status_ == STATUS_AVAILABLE && !IsSingleThreaded(proc_fd)) { | 431 if (status_ == STATUS_AVAILABLE && !IsSingleThreaded(proc_fd)) { |
432 status_ = STATUS_UNAVAILABLE; | 432 status_ = STATUS_UNAVAILABLE; |
433 } | 433 } |
434 } | 434 } |
435 return status_; | 435 return status_; |
436 } | 436 } |
437 | 437 |
438 void Sandbox::set_proc_fd(int proc_fd) { | 438 void Sandbox::set_proc_fd(int proc_fd) { proc_fd_ = proc_fd; } |
439 proc_fd_ = proc_fd; | |
440 } | |
441 | 439 |
442 void Sandbox::StartSandbox() { | 440 void Sandbox::StartSandbox() { |
443 if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) { | 441 if (status_ == STATUS_UNSUPPORTED || status_ == STATUS_UNAVAILABLE) { |
444 SANDBOX_DIE("Trying to start sandbox, even though it is known to be " | 442 SANDBOX_DIE( |
445 "unavailable"); | 443 "Trying to start sandbox, even though it is known to be " |
| 444 "unavailable"); |
446 } else if (sandbox_has_started_ || !conds_) { | 445 } else if (sandbox_has_started_ || !conds_) { |
447 SANDBOX_DIE("Cannot repeatedly start sandbox. Create a separate Sandbox " | 446 SANDBOX_DIE( |
448 "object instead."); | 447 "Cannot repeatedly start sandbox. Create a separate Sandbox " |
| 448 "object instead."); |
449 } | 449 } |
450 if (proc_fd_ < 0) { | 450 if (proc_fd_ < 0) { |
451 proc_fd_ = open("/proc", O_RDONLY|O_DIRECTORY); | 451 proc_fd_ = open("/proc", O_RDONLY | O_DIRECTORY); |
452 } | 452 } |
453 if (proc_fd_ < 0) { | 453 if (proc_fd_ < 0) { |
454 // For now, continue in degraded mode, if we can't access /proc. | 454 // For now, continue in degraded mode, if we can't access /proc. |
455 // In the future, we might want to tighten this requirement. | 455 // In the future, we might want to tighten this requirement. |
456 } | 456 } |
457 if (!IsSingleThreaded(proc_fd_)) { | 457 if (!IsSingleThreaded(proc_fd_)) { |
458 SANDBOX_DIE("Cannot start sandbox, if process is already multi-threaded"); | 458 SANDBOX_DIE("Cannot start sandbox, if process is already multi-threaded"); |
459 } | 459 } |
460 | 460 |
461 // We no longer need access to any files in /proc. We want to do this | 461 // We no longer need access to any files in /proc. We want to do this |
462 // before installing the filters, just in case that our policy denies | 462 // before installing the filters, just in case that our policy denies |
463 // close(). | 463 // close(). |
464 if (proc_fd_ >= 0) { | 464 if (proc_fd_ >= 0) { |
465 if (HANDLE_EINTR(close(proc_fd_))) { | 465 if (HANDLE_EINTR(close(proc_fd_))) { |
466 SANDBOX_DIE("Failed to close file descriptor for /proc"); | 466 SANDBOX_DIE("Failed to close file descriptor for /proc"); |
467 } | 467 } |
468 proc_fd_ = -1; | 468 proc_fd_ = -1; |
469 } | 469 } |
470 | 470 |
471 // Install the filters. | 471 // Install the filters. |
472 InstallFilter(); | 472 InstallFilter(); |
473 | 473 |
474 // We are now inside the sandbox. | 474 // We are now inside the sandbox. |
475 status_ = STATUS_ENABLED; | 475 status_ = STATUS_ENABLED; |
476 } | 476 } |
477 | 477 |
478 void Sandbox::PolicySanityChecks(SandboxBpfPolicy* policy) { | 478 void Sandbox::PolicySanityChecks(SandboxBpfPolicy* policy) { |
479 for (SyscallIterator iter(true); !iter.Done(); ) { | 479 for (SyscallIterator iter(true); !iter.Done();) { |
480 uint32_t sysnum = iter.Next(); | 480 uint32_t sysnum = iter.Next(); |
481 if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) { | 481 if (!IsDenied(policy->EvaluateSyscall(this, sysnum))) { |
482 SANDBOX_DIE("Policies should deny system calls that are outside the " | 482 SANDBOX_DIE( |
483 "expected range (typically MIN_SYSCALL..MAX_SYSCALL)"); | 483 "Policies should deny system calls that are outside the " |
| 484 "expected range (typically MIN_SYSCALL..MAX_SYSCALL)"); |
484 } | 485 } |
485 } | 486 } |
486 return; | 487 return; |
487 } | 488 } |
488 | 489 |
489 // Deprecated API, supported with a wrapper to the new API. | 490 // Deprecated API, supported with a wrapper to the new API. |
490 void Sandbox::SetSandboxPolicyDeprecated(EvaluateSyscall syscall_evaluator, | 491 void Sandbox::SetSandboxPolicyDeprecated(EvaluateSyscall syscall_evaluator, |
491 void* aux) { | 492 void* aux) { |
492 if (sandbox_has_started_ || !conds_) { | 493 if (sandbox_has_started_ || !conds_) { |
493 SANDBOX_DIE("Cannot change policy after sandbox has started"); | 494 SANDBOX_DIE("Cannot change policy after sandbox has started"); |
(...skipping 16 matching lines...) Expand all Loading... |
510 // policies that are set with SetSandboxPolicy(). This means, as soon as | 511 // policies that are set with SetSandboxPolicy(). This means, as soon as |
511 // the sandbox is active, we shouldn't be relying on libraries that could | 512 // the sandbox is active, we shouldn't be relying on libraries that could |
512 // be making system calls. This, for example, means we should avoid | 513 // be making system calls. This, for example, means we should avoid |
513 // using the heap and we should avoid using STL functions. | 514 // using the heap and we should avoid using STL functions. |
514 // Temporarily copy the contents of the "program" vector into a | 515 // Temporarily copy the contents of the "program" vector into a |
515 // stack-allocated array; and then explicitly destroy that object. | 516 // stack-allocated array; and then explicitly destroy that object. |
516 // This makes sure we don't ex- or implicitly call new/delete after we | 517 // This makes sure we don't ex- or implicitly call new/delete after we |
517 // installed the BPF filter program in the kernel. Depending on the | 518 // installed the BPF filter program in the kernel. Depending on the |
518 // system memory allocator that is in effect, these operators can result | 519 // system memory allocator that is in effect, these operators can result |
519 // in system calls to things like munmap() or brk(). | 520 // in system calls to things like munmap() or brk(). |
520 Program *program = AssembleFilter(false /* force_verification */); | 521 Program* program = AssembleFilter(false /* force_verification */); |
521 | 522 |
522 struct sock_filter bpf[program->size()]; | 523 struct sock_filter bpf[program->size()]; |
523 const struct sock_fprog prog = { | 524 const struct sock_fprog prog = {static_cast<unsigned short>(program->size()), |
524 static_cast<unsigned short>(program->size()), bpf }; | 525 bpf}; |
525 memcpy(bpf, &(*program)[0], sizeof(bpf)); | 526 memcpy(bpf, &(*program)[0], sizeof(bpf)); |
526 delete program; | 527 delete program; |
527 | 528 |
528 // Make an attempt to release memory that is no longer needed here, rather | 529 // Make an attempt to release memory that is no longer needed here, rather |
529 // than in the destructor. Try to avoid as much as possible to presume of | 530 // than in the destructor. Try to avoid as much as possible to presume of |
530 // what will be possible to do in the new (sandboxed) execution environment. | 531 // what will be possible to do in the new (sandboxed) execution environment. |
531 delete conds_; | 532 delete conds_; |
532 conds_ = NULL; | 533 conds_ = NULL; |
533 policy_.reset(); | 534 policy_.reset(); |
534 | 535 |
535 // Install BPF filter program | 536 // Install BPF filter program |
536 if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { | 537 if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { |
537 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to enable no-new-privs"); | 538 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to enable no-new-privs"); |
538 } else { | 539 } else { |
539 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) { | 540 if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog)) { |
540 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to turn on BPF filters"); | 541 SANDBOX_DIE(quiet_ ? NULL : "Kernel refuses to turn on BPF filters"); |
541 } | 542 } |
542 } | 543 } |
543 | 544 |
544 sandbox_has_started_ = true; | 545 sandbox_has_started_ = true; |
545 | 546 |
546 return; | 547 return; |
547 } | 548 } |
548 | 549 |
549 Sandbox::Program *Sandbox::AssembleFilter(bool force_verification) { | 550 Sandbox::Program* Sandbox::AssembleFilter(bool force_verification) { |
550 #if !defined(NDEBUG) | 551 #if !defined(NDEBUG) |
551 force_verification = true; | 552 force_verification = true; |
552 #endif | 553 #endif |
553 | 554 |
554 // Verify that the user pushed a policy. | 555 // Verify that the user pushed a policy. |
555 DCHECK(policy_); | 556 DCHECK(policy_); |
556 | 557 |
557 // Assemble the BPF filter program. | 558 // Assemble the BPF filter program. |
558 CodeGen *gen = new CodeGen(); | 559 CodeGen* gen = new CodeGen(); |
559 if (!gen) { | 560 if (!gen) { |
560 SANDBOX_DIE("Out of memory"); | 561 SANDBOX_DIE("Out of memory"); |
561 } | 562 } |
562 | 563 |
563 // If the architecture doesn't match SECCOMP_ARCH, disallow the | 564 // If the architecture doesn't match SECCOMP_ARCH, disallow the |
564 // system call. | 565 // system call. |
565 Instruction *tail; | 566 Instruction* tail; |
566 Instruction *head = | 567 Instruction* head = gen->MakeInstruction( |
567 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_ARCH_IDX, | 568 BPF_LD + BPF_W + BPF_ABS, |
568 tail = | 569 SECCOMP_ARCH_IDX, |
569 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, SECCOMP_ARCH, | 570 tail = gen->MakeInstruction( |
570 NULL, | 571 BPF_JMP + BPF_JEQ + BPF_K, |
571 gen->MakeInstruction(BPF_RET+BPF_K, | 572 SECCOMP_ARCH, |
572 Kill("Invalid audit architecture in BPF filter")))); | 573 NULL, |
| 574 gen->MakeInstruction( |
| 575 BPF_RET + BPF_K, |
| 576 Kill("Invalid audit architecture in BPF filter")))); |
573 | 577 |
574 bool has_unsafe_traps = false; | 578 bool has_unsafe_traps = false; |
575 { | 579 { |
576 // Evaluate all possible system calls and group their ErrorCodes into | 580 // Evaluate all possible system calls and group their ErrorCodes into |
577 // ranges of identical codes. | 581 // ranges of identical codes. |
578 Ranges ranges; | 582 Ranges ranges; |
579 FindRanges(&ranges); | 583 FindRanges(&ranges); |
580 | 584 |
581 // Compile the system call ranges to an optimized BPF jumptable | 585 // Compile the system call ranges to an optimized BPF jumptable |
582 Instruction *jumptable = | 586 Instruction* jumptable = |
583 AssembleJumpTable(gen, ranges.begin(), ranges.end()); | 587 AssembleJumpTable(gen, ranges.begin(), ranges.end()); |
584 | 588 |
585 // If there is at least one UnsafeTrap() in our program, the entire sandbox | 589 // If there is at least one UnsafeTrap() in our program, the entire sandbox |
586 // is unsafe. We need to modify the program so that all non- | 590 // is unsafe. We need to modify the program so that all non- |
587 // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then | 591 // SECCOMP_RET_ALLOW ErrorCodes are handled in user-space. This will then |
588 // allow us to temporarily disable sandboxing rules inside of callbacks to | 592 // allow us to temporarily disable sandboxing rules inside of callbacks to |
589 // UnsafeTrap(). | 593 // UnsafeTrap(). |
590 gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps); | 594 gen->Traverse(jumptable, CheckForUnsafeErrorCodes, &has_unsafe_traps); |
591 | 595 |
592 // Grab the system call number, so that we can implement jump tables. | 596 // Grab the system call number, so that we can implement jump tables. |
593 Instruction *load_nr = | 597 Instruction* load_nr = |
594 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_NR_IDX); | 598 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, SECCOMP_NR_IDX); |
595 | 599 |
596 // If our BPF program has unsafe jumps, enable support for them. This | 600 // If our BPF program has unsafe jumps, enable support for them. This |
597 // test happens very early in the BPF filter program. Even before we | 601 // test happens very early in the BPF filter program. Even before we |
598 // consider looking at system call numbers. | 602 // consider looking at system call numbers. |
599 // As support for unsafe jumps essentially defeats all the security | 603 // As support for unsafe jumps essentially defeats all the security |
600 // measures that the sandbox provides, we print a big warning message -- | 604 // measures that the sandbox provides, we print a big warning message -- |
601 // and of course, we make sure to only ever enable this feature if it | 605 // and of course, we make sure to only ever enable this feature if it |
602 // is actually requested by the sandbox policy. | 606 // is actually requested by the sandbox policy. |
603 if (has_unsafe_traps) { | 607 if (has_unsafe_traps) { |
604 if (SandboxSyscall(-1) == -1 && errno == ENOSYS) { | 608 if (SandboxSyscall(-1) == -1 && errno == ENOSYS) { |
605 SANDBOX_DIE("Support for UnsafeTrap() has not yet been ported to this " | 609 SANDBOX_DIE( |
606 "architecture"); | 610 "Support for UnsafeTrap() has not yet been ported to this " |
| 611 "architecture"); |
607 } | 612 } |
608 | 613 |
609 if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask). | 614 if (!policy_->EvaluateSyscall(this, __NR_rt_sigprocmask) |
610 Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) || | 615 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) || |
611 !policy_->EvaluateSyscall(this, __NR_rt_sigreturn). | 616 !policy_->EvaluateSyscall(this, __NR_rt_sigreturn) |
612 Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) | 617 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) |
613 #if defined(__NR_sigprocmask) | 618 #if defined(__NR_sigprocmask) |
614 || !policy_->EvaluateSyscall(this, __NR_sigprocmask). | 619 || |
615 Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) | 620 !policy_->EvaluateSyscall(this, __NR_sigprocmask) |
| 621 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) |
616 #endif | 622 #endif |
617 #if defined(__NR_sigreturn) | 623 #if defined(__NR_sigreturn) |
618 || !policy_->EvaluateSyscall(this, __NR_sigreturn). | 624 || |
619 Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) | 625 !policy_->EvaluateSyscall(this, __NR_sigreturn) |
| 626 .Equals(ErrorCode(ErrorCode::ERR_ALLOWED)) |
620 #endif | 627 #endif |
621 ) { | 628 ) { |
622 SANDBOX_DIE("Invalid seccomp policy; if using UnsafeTrap(), you must " | 629 SANDBOX_DIE( |
623 "unconditionally allow sigreturn() and sigprocmask()"); | 630 "Invalid seccomp policy; if using UnsafeTrap(), you must " |
| 631 "unconditionally allow sigreturn() and sigprocmask()"); |
624 } | 632 } |
625 | 633 |
626 if (!Trap::EnableUnsafeTrapsInSigSysHandler()) { | 634 if (!Trap::EnableUnsafeTrapsInSigSysHandler()) { |
627 // We should never be able to get here, as UnsafeTrap() should never | 635 // We should never be able to get here, as UnsafeTrap() should never |
628 // actually return a valid ErrorCode object unless the user set the | 636 // actually return a valid ErrorCode object unless the user set the |
629 // CHROME_SANDBOX_DEBUGGING environment variable; and therefore, | 637 // CHROME_SANDBOX_DEBUGGING environment variable; and therefore, |
630 // "has_unsafe_traps" would always be false. But better double-check | 638 // "has_unsafe_traps" would always be false. But better double-check |
631 // than enabling dangerous code. | 639 // than enabling dangerous code. |
632 SANDBOX_DIE("We'd rather die than enable unsafe traps"); | 640 SANDBOX_DIE("We'd rather die than enable unsafe traps"); |
633 } | 641 } |
634 gen->Traverse(jumptable, RedirectToUserspace, this); | 642 gen->Traverse(jumptable, RedirectToUserspace, this); |
635 | 643 |
636 // Allow system calls, if they originate from our magic return address | 644 // Allow system calls, if they originate from our magic return address |
637 // (which we can query by calling SandboxSyscall(-1)). | 645 // (which we can query by calling SandboxSyscall(-1)). |
638 uintptr_t syscall_entry_point = | 646 uintptr_t syscall_entry_point = |
639 static_cast<uintptr_t>(SandboxSyscall(-1)); | 647 static_cast<uintptr_t>(SandboxSyscall(-1)); |
640 uint32_t low = static_cast<uint32_t>(syscall_entry_point); | 648 uint32_t low = static_cast<uint32_t>(syscall_entry_point); |
641 #if __SIZEOF_POINTER__ > 4 | 649 #if __SIZEOF_POINTER__ > 4 |
642 uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32); | 650 uint32_t hi = static_cast<uint32_t>(syscall_entry_point >> 32); |
643 #endif | 651 #endif |
644 | 652 |
645 // BPF cannot do native 64bit comparisons. On 64bit architectures, we | 653 // BPF cannot do native 64bit comparisons. On 64bit architectures, we |
646 // have to compare both 32bit halves of the instruction pointer. If they | 654 // have to compare both 32bit halves of the instruction pointer. If they |
647 // match what we expect, we return ERR_ALLOWED. If either or both don't | 655 // match what we expect, we return ERR_ALLOWED. If either or both don't |
648 // match, we continue evalutating the rest of the sandbox policy. | 656 // match, we continue evalutating the rest of the sandbox policy. |
649 Instruction *escape_hatch = | 657 Instruction* escape_hatch = gen->MakeInstruction( |
650 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_IP_LSB_IDX, | 658 BPF_LD + BPF_W + BPF_ABS, |
651 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, low, | 659 SECCOMP_IP_LSB_IDX, |
| 660 gen->MakeInstruction( |
| 661 BPF_JMP + BPF_JEQ + BPF_K, |
| 662 low, |
652 #if __SIZEOF_POINTER__ > 4 | 663 #if __SIZEOF_POINTER__ > 4 |
653 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, SECCOMP_IP_MSB_IDX, | 664 gen->MakeInstruction( |
654 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, hi, | 665 BPF_LD + BPF_W + BPF_ABS, |
| 666 SECCOMP_IP_MSB_IDX, |
| 667 gen->MakeInstruction( |
| 668 BPF_JMP + BPF_JEQ + BPF_K, |
| 669 hi, |
655 #endif | 670 #endif |
656 gen->MakeInstruction(BPF_RET+BPF_K, ErrorCode(ErrorCode::ERR_ALLOWED)), | 671 gen->MakeInstruction(BPF_RET + BPF_K, |
| 672 ErrorCode(ErrorCode::ERR_ALLOWED)), |
657 #if __SIZEOF_POINTER__ > 4 | 673 #if __SIZEOF_POINTER__ > 4 |
658 load_nr)), | 674 load_nr)), |
659 #endif | 675 #endif |
660 load_nr)); | 676 load_nr)); |
661 gen->JoinInstructions(tail, escape_hatch); | 677 gen->JoinInstructions(tail, escape_hatch); |
662 } else { | 678 } else { |
663 gen->JoinInstructions(tail, load_nr); | 679 gen->JoinInstructions(tail, load_nr); |
664 } | 680 } |
665 tail = load_nr; | 681 tail = load_nr; |
666 | 682 |
667 // On Intel architectures, verify that system call numbers are in the | 683 // On Intel architectures, verify that system call numbers are in the |
668 // expected number range. The older i386 and x86-64 APIs clear bit 30 | 684 // expected number range. The older i386 and x86-64 APIs clear bit 30 |
669 // on all system calls. The newer x32 API always sets bit 30. | 685 // on all system calls. The newer x32 API always sets bit 30. |
670 #if defined(__i386__) || defined(__x86_64__) | 686 #if defined(__i386__) || defined(__x86_64__) |
671 Instruction *invalidX32 = | 687 Instruction* invalidX32 = gen->MakeInstruction( |
672 gen->MakeInstruction(BPF_RET+BPF_K, | 688 BPF_RET + BPF_K, Kill("Illegal mixing of system call ABIs").err_); |
673 Kill("Illegal mixing of system call ABIs").err_); | 689 Instruction* checkX32 = |
674 Instruction *checkX32 = | |
675 #if defined(__x86_64__) && defined(__ILP32__) | 690 #if defined(__x86_64__) && defined(__ILP32__) |
676 gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, 0x40000000, 0, invalidX32); | 691 gen->MakeInstruction( |
| 692 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, 0, invalidX32); |
677 #else | 693 #else |
678 gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, 0x40000000, invalidX32, 0); | 694 gen->MakeInstruction( |
| 695 BPF_JMP + BPF_JSET + BPF_K, 0x40000000, invalidX32, 0); |
679 #endif | 696 #endif |
680 gen->JoinInstructions(tail, checkX32); | 697 gen->JoinInstructions(tail, checkX32); |
681 tail = checkX32; | 698 tail = checkX32; |
682 #endif | 699 #endif |
683 | 700 |
684 // Append jump table to our pre-amble | 701 // Append jump table to our pre-amble |
685 gen->JoinInstructions(tail, jumptable); | 702 gen->JoinInstructions(tail, jumptable); |
686 } | 703 } |
687 | 704 |
688 // Turn the DAG into a vector of instructions. | 705 // Turn the DAG into a vector of instructions. |
689 Program *program = new Program(); | 706 Program* program = new Program(); |
690 gen->Compile(head, program); | 707 gen->Compile(head, program); |
691 delete gen; | 708 delete gen; |
692 | 709 |
693 // Make sure compilation resulted in BPF program that executes | 710 // Make sure compilation resulted in BPF program that executes |
694 // correctly. Otherwise, there is an internal error in our BPF compiler. | 711 // correctly. Otherwise, there is an internal error in our BPF compiler. |
695 // There is really nothing the caller can do until the bug is fixed. | 712 // There is really nothing the caller can do until the bug is fixed. |
696 if (force_verification) { | 713 if (force_verification) { |
697 // Verification is expensive. We only perform this step, if we are | 714 // Verification is expensive. We only perform this step, if we are |
698 // compiled in debug mode, or if the caller explicitly requested | 715 // compiled in debug mode, or if the caller explicitly requested |
699 // verification. | 716 // verification. |
700 VerifyProgram(*program, has_unsafe_traps); | 717 VerifyProgram(*program, has_unsafe_traps); |
701 } | 718 } |
702 | 719 |
703 return program; | 720 return program; |
704 } | 721 } |
705 | 722 |
706 void Sandbox::VerifyProgram(const Program& program, bool has_unsafe_traps) { | 723 void Sandbox::VerifyProgram(const Program& program, bool has_unsafe_traps) { |
707 // If we previously rewrote the BPF program so that it calls user-space | 724 // If we previously rewrote the BPF program so that it calls user-space |
708 // whenever we return an "errno" value from the filter, then we have to | 725 // whenever we return an "errno" value from the filter, then we have to |
709 // wrap our system call evaluator to perform the same operation. Otherwise, | 726 // wrap our system call evaluator to perform the same operation. Otherwise, |
710 // the verifier would also report a mismatch in return codes. | 727 // the verifier would also report a mismatch in return codes. |
711 scoped_ptr<const RedirectToUserSpacePolicyWrapper> redirected_policy( | 728 scoped_ptr<const RedirectToUserSpacePolicyWrapper> redirected_policy( |
712 new RedirectToUserSpacePolicyWrapper(policy_.get())); | 729 new RedirectToUserSpacePolicyWrapper(policy_.get())); |
713 | 730 |
714 const char* err = NULL; | 731 const char* err = NULL; |
715 if (!Verifier::VerifyBPF( | 732 if (!Verifier::VerifyBPF(this, |
716 this, | 733 program, |
717 program, | 734 has_unsafe_traps ? *redirected_policy : *policy_, |
718 has_unsafe_traps ? *redirected_policy : *policy_, | 735 &err)) { |
719 &err)) { | |
720 CodeGen::PrintProgram(program); | 736 CodeGen::PrintProgram(program); |
721 SANDBOX_DIE(err); | 737 SANDBOX_DIE(err); |
722 } | 738 } |
723 } | 739 } |
724 | 740 |
725 void Sandbox::FindRanges(Ranges *ranges) { | 741 void Sandbox::FindRanges(Ranges* ranges) { |
726 // Please note that "struct seccomp_data" defines system calls as a signed | 742 // Please note that "struct seccomp_data" defines system calls as a signed |
727 // int32_t, but BPF instructions always operate on unsigned quantities. We | 743 // int32_t, but BPF instructions always operate on unsigned quantities. We |
728 // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL, | 744 // deal with this disparity by enumerating from MIN_SYSCALL to MAX_SYSCALL, |
729 // and then verifying that the rest of the number range (both positive and | 745 // and then verifying that the rest of the number range (both positive and |
730 // negative) all return the same ErrorCode. | 746 // negative) all return the same ErrorCode. |
731 uint32_t old_sysnum = 0; | 747 uint32_t old_sysnum = 0; |
732 ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum); | 748 ErrorCode old_err = policy_->EvaluateSyscall(this, old_sysnum); |
733 ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1); | 749 ErrorCode invalid_err = policy_->EvaluateSyscall(this, MIN_SYSCALL - 1); |
734 | 750 |
735 for (SyscallIterator iter(false); !iter.Done(); ) { | 751 for (SyscallIterator iter(false); !iter.Done();) { |
736 uint32_t sysnum = iter.Next(); | 752 uint32_t sysnum = iter.Next(); |
737 ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum)); | 753 ErrorCode err = policy_->EvaluateSyscall(this, static_cast<int>(sysnum)); |
738 if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) { | 754 if (!iter.IsValid(sysnum) && !invalid_err.Equals(err)) { |
739 // A proper sandbox policy should always treat system calls outside of | 755 // A proper sandbox policy should always treat system calls outside of |
740 // the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns | 756 // the range MIN_SYSCALL..MAX_SYSCALL (i.e. anything that returns |
741 // "false" for SyscallIterator::IsValid()) identically. Typically, all | 757 // "false" for SyscallIterator::IsValid()) identically. Typically, all |
742 // of these system calls would be denied with the same ErrorCode. | 758 // of these system calls would be denied with the same ErrorCode. |
743 SANDBOX_DIE("Invalid seccomp policy"); | 759 SANDBOX_DIE("Invalid seccomp policy"); |
744 } | 760 } |
745 if (!err.Equals(old_err) || iter.Done()) { | 761 if (!err.Equals(old_err) || iter.Done()) { |
746 ranges->push_back(Range(old_sysnum, sysnum - 1, old_err)); | 762 ranges->push_back(Range(old_sysnum, sysnum - 1, old_err)); |
747 old_sysnum = sysnum; | 763 old_sysnum = sysnum; |
748 old_err = err; | 764 old_err = err; |
749 } | 765 } |
750 } | 766 } |
751 } | 767 } |
752 | 768 |
753 Instruction *Sandbox::AssembleJumpTable(CodeGen *gen, | 769 Instruction* Sandbox::AssembleJumpTable(CodeGen* gen, |
754 Ranges::const_iterator start, | 770 Ranges::const_iterator start, |
755 Ranges::const_iterator stop) { | 771 Ranges::const_iterator stop) { |
756 // We convert the list of system call ranges into jump table that performs | 772 // We convert the list of system call ranges into jump table that performs |
757 // a binary search over the ranges. | 773 // a binary search over the ranges. |
758 // As a sanity check, we need to have at least one distinct ranges for us | 774 // As a sanity check, we need to have at least one distinct ranges for us |
759 // to be able to build a jump table. | 775 // to be able to build a jump table. |
760 if (stop - start <= 0) { | 776 if (stop - start <= 0) { |
761 SANDBOX_DIE("Invalid set of system call ranges"); | 777 SANDBOX_DIE("Invalid set of system call ranges"); |
762 } else if (stop - start == 1) { | 778 } else if (stop - start == 1) { |
763 // If we have narrowed things down to a single range object, we can | 779 // If we have narrowed things down to a single range object, we can |
764 // return from the BPF filter program. | 780 // return from the BPF filter program. |
765 return RetExpression(gen, start->err); | 781 return RetExpression(gen, start->err); |
766 } | 782 } |
767 | 783 |
768 // Pick the range object that is located at the mid point of our list. | 784 // Pick the range object that is located at the mid point of our list. |
769 // We compare our system call number against the lowest valid system call | 785 // We compare our system call number against the lowest valid system call |
770 // number in this range object. If our number is lower, it is outside of | 786 // number in this range object. If our number is lower, it is outside of |
771 // this range object. If it is greater or equal, it might be inside. | 787 // this range object. If it is greater or equal, it might be inside. |
772 Ranges::const_iterator mid = start + (stop - start)/2; | 788 Ranges::const_iterator mid = start + (stop - start) / 2; |
773 | 789 |
774 // Sub-divide the list of ranges and continue recursively. | 790 // Sub-divide the list of ranges and continue recursively. |
775 Instruction *jf = AssembleJumpTable(gen, start, mid); | 791 Instruction* jf = AssembleJumpTable(gen, start, mid); |
776 Instruction *jt = AssembleJumpTable(gen, mid, stop); | 792 Instruction* jt = AssembleJumpTable(gen, mid, stop); |
777 return gen->MakeInstruction(BPF_JMP+BPF_JGE+BPF_K, mid->from, jt, jf); | 793 return gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, mid->from, jt, jf); |
778 } | 794 } |
779 | 795 |
780 Instruction *Sandbox::RetExpression(CodeGen *gen, const ErrorCode& err) { | 796 Instruction* Sandbox::RetExpression(CodeGen* gen, const ErrorCode& err) { |
781 if (err.error_type_ == ErrorCode::ET_COND) { | 797 if (err.error_type_ == ErrorCode::ET_COND) { |
782 return CondExpression(gen, err); | 798 return CondExpression(gen, err); |
783 } else { | 799 } else { |
784 return gen->MakeInstruction(BPF_RET+BPF_K, err); | 800 return gen->MakeInstruction(BPF_RET + BPF_K, err); |
785 } | 801 } |
786 } | 802 } |
787 | 803 |
788 Instruction *Sandbox::CondExpression(CodeGen *gen, const ErrorCode& cond) { | 804 Instruction* Sandbox::CondExpression(CodeGen* gen, const ErrorCode& cond) { |
789 // We can only inspect the six system call arguments that are passed in | 805 // We can only inspect the six system call arguments that are passed in |
790 // CPU registers. | 806 // CPU registers. |
791 if (cond.argno_ < 0 || cond.argno_ >= 6) { | 807 if (cond.argno_ < 0 || cond.argno_ >= 6) { |
792 SANDBOX_DIE("Internal compiler error; invalid argument number " | 808 SANDBOX_DIE( |
793 "encountered"); | 809 "Internal compiler error; invalid argument number " |
| 810 "encountered"); |
794 } | 811 } |
795 | 812 |
796 // BPF programs operate on 32bit entities. Load both halfs of the 64bit | 813 // BPF programs operate on 32bit entities. Load both halfs of the 64bit |
797 // system call argument and then generate suitable conditional statements. | 814 // system call argument and then generate suitable conditional statements. |
798 Instruction *msb_head = | 815 Instruction* msb_head = gen->MakeInstruction( |
799 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, | 816 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_MSB_IDX(cond.argno_)); |
800 SECCOMP_ARG_MSB_IDX(cond.argno_)); | 817 Instruction* msb_tail = msb_head; |
801 Instruction *msb_tail = msb_head; | 818 Instruction* lsb_head = gen->MakeInstruction( |
802 Instruction *lsb_head = | 819 BPF_LD + BPF_W + BPF_ABS, SECCOMP_ARG_LSB_IDX(cond.argno_)); |
803 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, | 820 Instruction* lsb_tail = lsb_head; |
804 SECCOMP_ARG_LSB_IDX(cond.argno_)); | |
805 Instruction *lsb_tail = lsb_head; | |
806 | 821 |
807 // Emit a suitable comparison statement. | 822 // Emit a suitable comparison statement. |
808 switch (cond.op_) { | 823 switch (cond.op_) { |
809 case ErrorCode::OP_EQUAL: | 824 case ErrorCode::OP_EQUAL: |
810 // Compare the least significant bits for equality | 825 // Compare the least significant bits for equality |
811 lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, | 826 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, |
812 static_cast<uint32_t>(cond.value_), | 827 static_cast<uint32_t>(cond.value_), |
813 RetExpression(gen, *cond.passed_), | 828 RetExpression(gen, *cond.passed_), |
814 RetExpression(gen, *cond.failed_)); | 829 RetExpression(gen, *cond.failed_)); |
815 gen->JoinInstructions(lsb_head, lsb_tail); | 830 gen->JoinInstructions(lsb_head, lsb_tail); |
816 | 831 |
817 // If we are looking at a 64bit argument, we need to also compare the | 832 // If we are looking at a 64bit argument, we need to also compare the |
818 // most significant bits. | 833 // most significant bits. |
819 if (cond.width_ == ErrorCode::TP_64BIT) { | 834 if (cond.width_ == ErrorCode::TP_64BIT) { |
820 msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, | 835 msb_tail = |
821 static_cast<uint32_t>(cond.value_ >> 32), | 836 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, |
822 lsb_head, | 837 static_cast<uint32_t>(cond.value_ >> 32), |
823 RetExpression(gen, *cond.failed_)); | 838 lsb_head, |
824 gen->JoinInstructions(msb_head, msb_tail); | 839 RetExpression(gen, *cond.failed_)); |
825 } | 840 gen->JoinInstructions(msb_head, msb_tail); |
826 break; | 841 } |
827 case ErrorCode::OP_HAS_ALL_BITS: | 842 break; |
828 // Check the bits in the LSB half of the system call argument. Our | 843 case ErrorCode::OP_HAS_ALL_BITS: |
829 // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is | 844 // Check the bits in the LSB half of the system call argument. Our |
830 // different from the kernel's BPF_JSET operation which passes, if any of | 845 // OP_HAS_ALL_BITS operator passes, iff all of the bits are set. This is |
831 // the bits are set. | 846 // different from the kernel's BPF_JSET operation which passes, if any of |
832 // Of course, if there is only a single set bit (or none at all), then | 847 // the bits are set. |
833 // things get easier. | 848 // Of course, if there is only a single set bit (or none at all), then |
834 { | 849 // things get easier. |
835 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); | 850 { |
836 int lsb_bit_count = popcount(lsb_bits); | 851 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); |
837 if (lsb_bit_count == 0) { | 852 int lsb_bit_count = popcount(lsb_bits); |
838 // No bits are set in the LSB half. The test will always pass. | 853 if (lsb_bit_count == 0) { |
839 lsb_head = RetExpression(gen, *cond.passed_); | 854 // No bits are set in the LSB half. The test will always pass. |
840 lsb_tail = NULL; | 855 lsb_head = RetExpression(gen, *cond.passed_); |
841 } else if (lsb_bit_count == 1) { | 856 lsb_tail = NULL; |
842 // Exactly one bit is set in the LSB half. We can use the BPF_JSET | 857 } else if (lsb_bit_count == 1) { |
843 // operator. | 858 // Exactly one bit is set in the LSB half. We can use the BPF_JSET |
844 lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, | 859 // operator. |
845 lsb_bits, | 860 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, |
846 RetExpression(gen, *cond.passed_), | |
847 RetExpression(gen, *cond.failed_)); | |
848 gen->JoinInstructions(lsb_head, lsb_tail); | |
849 } else { | |
850 // More than one bit is set in the LSB half. We need to combine | |
851 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact | |
852 // set in the system call argument. | |
853 gen->JoinInstructions(lsb_head, | |
854 gen->MakeInstruction(BPF_ALU+BPF_AND+BPF_K, | |
855 lsb_bits, | |
856 lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, | |
857 lsb_bits, | 861 lsb_bits, |
858 RetExpression(gen, *cond.passed_), | 862 RetExpression(gen, *cond.passed_), |
859 RetExpression(gen, *cond.failed_)))); | 863 RetExpression(gen, *cond.failed_)); |
| 864 gen->JoinInstructions(lsb_head, lsb_tail); |
| 865 } else { |
| 866 // More than one bit is set in the LSB half. We need to combine |
| 867 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact |
| 868 // set in the system call argument. |
| 869 gen->JoinInstructions( |
| 870 lsb_head, |
| 871 gen->MakeInstruction(BPF_ALU + BPF_AND + BPF_K, |
| 872 lsb_bits, |
| 873 lsb_tail = gen->MakeInstruction( |
| 874 BPF_JMP + BPF_JEQ + BPF_K, |
| 875 lsb_bits, |
| 876 RetExpression(gen, *cond.passed_), |
| 877 RetExpression(gen, *cond.failed_)))); |
| 878 } |
860 } | 879 } |
861 } | |
862 | 880 |
863 // If we are looking at a 64bit argument, we need to also check the bits | 881 // If we are looking at a 64bit argument, we need to also check the bits |
864 // in the MSB half of the system call argument. | 882 // in the MSB half of the system call argument. |
865 if (cond.width_ == ErrorCode::TP_64BIT) { | 883 if (cond.width_ == ErrorCode::TP_64BIT) { |
866 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); | 884 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); |
867 int msb_bit_count = popcount(msb_bits); | 885 int msb_bit_count = popcount(msb_bits); |
868 if (msb_bit_count == 0) { | 886 if (msb_bit_count == 0) { |
869 // No bits are set in the MSB half. The test will always pass. | 887 // No bits are set in the MSB half. The test will always pass. |
870 msb_head = lsb_head; | 888 msb_head = lsb_head; |
871 } else if (msb_bit_count == 1) { | 889 } else if (msb_bit_count == 1) { |
872 // Exactly one bit is set in the MSB half. We can use the BPF_JSET | 890 // Exactly one bit is set in the MSB half. We can use the BPF_JSET |
873 // operator. | 891 // operator. |
874 msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, | 892 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, |
875 msb_bits, | 893 msb_bits, |
876 lsb_head, | 894 lsb_head, |
877 RetExpression(gen, *cond.failed_)); | 895 RetExpression(gen, *cond.failed_)); |
878 gen->JoinInstructions(msb_head, msb_tail); | 896 gen->JoinInstructions(msb_head, msb_tail); |
879 } else { | 897 } else { |
880 // More than one bit is set in the MSB half. We need to combine | 898 // More than one bit is set in the MSB half. We need to combine |
881 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact | 899 // BPF_AND and BPF_JEQ to test whether all of these bits are in fact |
882 // set in the system call argument. | 900 // set in the system call argument. |
883 gen->JoinInstructions(msb_head, | 901 gen->JoinInstructions( |
884 gen->MakeInstruction(BPF_ALU+BPF_AND+BPF_K, | 902 msb_head, |
885 msb_bits, | 903 gen->MakeInstruction( |
886 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, | 904 BPF_ALU + BPF_AND + BPF_K, |
887 msb_bits, | 905 msb_bits, |
888 lsb_head, | 906 gen->MakeInstruction(BPF_JMP + BPF_JEQ + BPF_K, |
889 RetExpression(gen, *cond.failed_)))); | 907 msb_bits, |
| 908 lsb_head, |
| 909 RetExpression(gen, *cond.failed_)))); |
| 910 } |
890 } | 911 } |
891 } | 912 break; |
892 break; | 913 case ErrorCode::OP_HAS_ANY_BITS: |
893 case ErrorCode::OP_HAS_ANY_BITS: | 914 // Check the bits in the LSB half of the system call argument. Our |
894 // Check the bits in the LSB half of the system call argument. Our | 915 // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps |
895 // OP_HAS_ANY_BITS operator passes, iff any of the bits are set. This maps | 916 // nicely to the kernel's BPF_JSET operation. |
896 // nicely to the kernel's BPF_JSET operation. | 917 { |
897 { | 918 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); |
898 uint32_t lsb_bits = static_cast<uint32_t>(cond.value_); | 919 if (!lsb_bits) { |
899 if (!lsb_bits) { | 920 // No bits are set in the LSB half. The test will always fail. |
900 // No bits are set in the LSB half. The test will always fail. | 921 lsb_head = RetExpression(gen, *cond.failed_); |
901 lsb_head = RetExpression(gen, *cond.failed_); | 922 lsb_tail = NULL; |
902 lsb_tail = NULL; | 923 } else { |
903 } else { | 924 lsb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, |
904 lsb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, | 925 lsb_bits, |
905 lsb_bits, | 926 RetExpression(gen, *cond.passed_), |
906 RetExpression(gen, *cond.passed_), | 927 RetExpression(gen, *cond.failed_)); |
907 RetExpression(gen, *cond.failed_)); | 928 gen->JoinInstructions(lsb_head, lsb_tail); |
908 gen->JoinInstructions(lsb_head, lsb_tail); | 929 } |
909 } | 930 } |
910 } | |
911 | 931 |
912 // If we are looking at a 64bit argument, we need to also check the bits | 932 // If we are looking at a 64bit argument, we need to also check the bits |
913 // in the MSB half of the system call argument. | 933 // in the MSB half of the system call argument. |
914 if (cond.width_ == ErrorCode::TP_64BIT) { | 934 if (cond.width_ == ErrorCode::TP_64BIT) { |
915 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); | 935 uint32_t msb_bits = static_cast<uint32_t>(cond.value_ >> 32); |
916 if (!msb_bits) { | 936 if (!msb_bits) { |
917 // No bits are set in the MSB half. The test will always fail. | 937 // No bits are set in the MSB half. The test will always fail. |
918 msb_head = lsb_head; | 938 msb_head = lsb_head; |
919 } else { | 939 } else { |
920 msb_tail = gen->MakeInstruction(BPF_JMP+BPF_JSET+BPF_K, | 940 msb_tail = gen->MakeInstruction(BPF_JMP + BPF_JSET + BPF_K, |
921 msb_bits, | 941 msb_bits, |
922 RetExpression(gen, *cond.passed_), | 942 RetExpression(gen, *cond.passed_), |
923 lsb_head); | 943 lsb_head); |
924 gen->JoinInstructions(msb_head, msb_tail); | 944 gen->JoinInstructions(msb_head, msb_tail); |
| 945 } |
925 } | 946 } |
926 } | 947 break; |
927 break; | 948 default: |
928 default: | 949 // TODO(markus): Need to add support for OP_GREATER |
929 // TODO(markus): Need to add support for OP_GREATER | 950 SANDBOX_DIE("Not implemented"); |
930 SANDBOX_DIE("Not implemented"); | 951 break; |
931 break; | |
932 } | 952 } |
933 | 953 |
934 // Ensure that we never pass a 64bit value, when we only expect a 32bit | 954 // Ensure that we never pass a 64bit value, when we only expect a 32bit |
935 // value. This is somewhat complicated by the fact that on 64bit systems, | 955 // value. This is somewhat complicated by the fact that on 64bit systems, |
936 // callers could legitimately pass in a non-zero value in the MSB, iff the | 956 // callers could legitimately pass in a non-zero value in the MSB, iff the |
937 // LSB has been sign-extended into the MSB. | 957 // LSB has been sign-extended into the MSB. |
938 if (cond.width_ == ErrorCode::TP_32BIT) { | 958 if (cond.width_ == ErrorCode::TP_32BIT) { |
939 if (cond.value_ >> 32) { | 959 if (cond.value_ >> 32) { |
940 SANDBOX_DIE("Invalid comparison of a 32bit system call argument " | 960 SANDBOX_DIE( |
941 "against a 64bit constant; this test is always false."); | 961 "Invalid comparison of a 32bit system call argument " |
| 962 "against a 64bit constant; this test is always false."); |
942 } | 963 } |
943 | 964 |
944 Instruction *invalid_64bit = RetExpression(gen, Unexpected64bitArgument()); | 965 Instruction* invalid_64bit = RetExpression(gen, Unexpected64bitArgument()); |
945 #if __SIZEOF_POINTER__ > 4 | 966 #if __SIZEOF_POINTER__ > 4 |
946 invalid_64bit = | 967 invalid_64bit = gen->MakeInstruction( |
947 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, 0xFFFFFFFF, | 968 BPF_JMP + BPF_JEQ + BPF_K, |
948 gen->MakeInstruction(BPF_LD+BPF_W+BPF_ABS, | 969 0xFFFFFFFF, |
949 SECCOMP_ARG_LSB_IDX(cond.argno_), | 970 gen->MakeInstruction(BPF_LD + BPF_W + BPF_ABS, |
950 gen->MakeInstruction(BPF_JMP+BPF_JGE+BPF_K, 0x80000000, | 971 SECCOMP_ARG_LSB_IDX(cond.argno_), |
951 lsb_head, | 972 gen->MakeInstruction(BPF_JMP + BPF_JGE + BPF_K, |
952 invalid_64bit)), | 973 0x80000000, |
953 invalid_64bit); | 974 lsb_head, |
954 #endif | 975 invalid_64bit)), |
| 976 invalid_64bit); |
| 977 #endif |
955 gen->JoinInstructions( | 978 gen->JoinInstructions( |
956 msb_tail, | 979 msb_tail, |
957 gen->MakeInstruction(BPF_JMP+BPF_JEQ+BPF_K, 0, | 980 gen->MakeInstruction( |
958 lsb_head, | 981 BPF_JMP + BPF_JEQ + BPF_K, 0, lsb_head, invalid_64bit)); |
959 invalid_64bit)); | |
960 } | 982 } |
961 | 983 |
962 return msb_head; | 984 return msb_head; |
963 } | 985 } |
964 | 986 |
965 ErrorCode Sandbox::Unexpected64bitArgument() { | 987 ErrorCode Sandbox::Unexpected64bitArgument() { |
966 return Kill("Unexpected 64bit argument detected"); | 988 return Kill("Unexpected 64bit argument detected"); |
967 } | 989 } |
968 | 990 |
969 ErrorCode Sandbox::Trap(Trap::TrapFnc fnc, const void *aux) { | 991 ErrorCode Sandbox::Trap(Trap::TrapFnc fnc, const void* aux) { |
970 return Trap::MakeTrap(fnc, aux, true /* Safe Trap */); | 992 return Trap::MakeTrap(fnc, aux, true /* Safe Trap */); |
971 } | 993 } |
972 | 994 |
973 ErrorCode Sandbox::UnsafeTrap(Trap::TrapFnc fnc, const void *aux) { | 995 ErrorCode Sandbox::UnsafeTrap(Trap::TrapFnc fnc, const void* aux) { |
974 return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */); | 996 return Trap::MakeTrap(fnc, aux, false /* Unsafe Trap */); |
975 } | 997 } |
976 | 998 |
977 intptr_t Sandbox::ForwardSyscall(const struct arch_seccomp_data& args) { | 999 intptr_t Sandbox::ForwardSyscall(const struct arch_seccomp_data& args) { |
978 return SandboxSyscall(args.nr, | 1000 return SandboxSyscall(args.nr, |
979 static_cast<intptr_t>(args.args[0]), | 1001 static_cast<intptr_t>(args.args[0]), |
980 static_cast<intptr_t>(args.args[1]), | 1002 static_cast<intptr_t>(args.args[1]), |
981 static_cast<intptr_t>(args.args[2]), | 1003 static_cast<intptr_t>(args.args[2]), |
982 static_cast<intptr_t>(args.args[3]), | 1004 static_cast<intptr_t>(args.args[3]), |
983 static_cast<intptr_t>(args.args[4]), | 1005 static_cast<intptr_t>(args.args[4]), |
984 static_cast<intptr_t>(args.args[5])); | 1006 static_cast<intptr_t>(args.args[5])); |
985 } | 1007 } |
986 | 1008 |
987 ErrorCode Sandbox::Cond(int argno, ErrorCode::ArgType width, | 1009 ErrorCode Sandbox::Cond(int argno, |
988 ErrorCode::Operation op, uint64_t value, | 1010 ErrorCode::ArgType width, |
989 const ErrorCode& passed, const ErrorCode& failed) { | 1011 ErrorCode::Operation op, |
990 return ErrorCode(argno, width, op, value, | 1012 uint64_t value, |
| 1013 const ErrorCode& passed, |
| 1014 const ErrorCode& failed) { |
| 1015 return ErrorCode(argno, |
| 1016 width, |
| 1017 op, |
| 1018 value, |
991 &*conds_->insert(passed).first, | 1019 &*conds_->insert(passed).first, |
992 &*conds_->insert(failed).first); | 1020 &*conds_->insert(failed).first); |
993 } | 1021 } |
994 | 1022 |
995 ErrorCode Sandbox::Kill(const char *msg) { | 1023 ErrorCode Sandbox::Kill(const char* msg) { |
996 return Trap(BpfFailure, const_cast<char *>(msg)); | 1024 return Trap(BpfFailure, const_cast<char*>(msg)); |
997 } | 1025 } |
998 | 1026 |
999 Sandbox::SandboxStatus Sandbox::status_ = STATUS_UNKNOWN; | 1027 Sandbox::SandboxStatus Sandbox::status_ = STATUS_UNKNOWN; |
1000 | 1028 |
1001 } // namespace playground2 | 1029 } // namespace playground2 |
OLD | NEW |