OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "sandbox/linux/seccomp-bpf/codegen.h" | 5 #include "sandbox/linux/seccomp-bpf/codegen.h" |
6 | 6 |
7 #include <linux/filter.h> | 7 #include <linux/filter.h> |
8 | 8 |
9 #include <set> | 9 #include <set> |
10 | 10 |
11 #include "base/logging.h" | 11 #include "base/logging.h" |
12 #include "sandbox/linux/seccomp-bpf/basicblock.h" | 12 #include "sandbox/linux/seccomp-bpf/basicblock.h" |
13 #include "sandbox/linux/seccomp-bpf/die.h" | 13 #include "sandbox/linux/seccomp-bpf/die.h" |
14 #include "sandbox/linux/seccomp-bpf/instruction.h" | 14 #include "sandbox/linux/seccomp-bpf/instruction.h" |
15 | 15 |
16 namespace sandbox { | 16 namespace sandbox { |
17 | 17 |
| 18 // Unfortunately this needs to be defined out-of-line because inline |
| 19 // initializing a static member to "nullptr" requires "constexpr", |
| 20 // which is currently banned by the Chromium style guide. |
| 21 const CodeGen::Node CodeGen::kNullNode = nullptr; |
| 22 |
18 CodeGen::CodeGen() : compiled_(false) {} | 23 CodeGen::CodeGen() : compiled_(false) {} |
19 | 24 |
20 CodeGen::~CodeGen() { | 25 CodeGen::~CodeGen() { |
21 for (Instructions::iterator iter = instructions_.begin(); | 26 for (Instructions::iterator iter = instructions_.begin(); |
22 iter != instructions_.end(); | 27 iter != instructions_.end(); |
23 ++iter) { | 28 ++iter) { |
24 delete *iter; | 29 delete *iter; |
25 } | 30 } |
26 for (BasicBlocks::iterator iter = basic_blocks_.begin(); | 31 for (BasicBlocks::iterator iter = basic_blocks_.begin(); |
27 iter != basic_blocks_.end(); | 32 iter != basic_blocks_.end(); |
28 ++iter) { | 33 ++iter) { |
29 delete *iter; | 34 delete *iter; |
30 } | 35 } |
31 } | 36 } |
32 | 37 |
33 Instruction* CodeGen::MakeInstruction(uint16_t code, | 38 CodeGen::Node CodeGen::MakeInstruction(uint16_t code, |
34 uint32_t k, | 39 uint32_t k, |
35 Instruction* next) { | 40 Node jt, |
36 // We can handle non-jumping instructions and "always" jumps. Both of | 41 Node jf) { |
37 // them are followed by exactly one "next" instruction. | 42 Node insn; |
38 // We allow callers to defer specifying "next", but then they must call | 43 if (BPF_CLASS(code) == BPF_JMP) { |
39 // "joinInstructions" later. | 44 CHECK_NE(kNullNode, jt); |
40 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) { | 45 if (BPF_OP(code) == BPF_JA) { |
41 SANDBOX_DIE( | 46 CHECK_EQ(kNullNode, jf); |
42 "Must provide both \"true\" and \"false\" branch " | 47 } else { |
43 "for a BPF_JMP"); | 48 CHECK_NE(kNullNode, jf); |
| 49 } |
| 50 insn = new Instruction(code, k, jt, jf); |
| 51 } else { |
| 52 if (BPF_CLASS(code) == BPF_RET) { |
| 53 CHECK_EQ(kNullNode, jt); |
| 54 } else { |
| 55 CHECK_NE(kNullNode, jt); |
| 56 } |
| 57 CHECK_EQ(kNullNode, jf); |
| 58 insn = new Instruction(code, k, jt); |
44 } | 59 } |
45 if (next && BPF_CLASS(code) == BPF_RET) { | |
46 SANDBOX_DIE("Cannot append instructions after a return statement"); | |
47 } | |
48 if (BPF_CLASS(code) == BPF_JMP) { | |
49 // "Always" jumps use the "true" branch target, only. | |
50 Instruction* insn = new Instruction(code, 0, next, NULL); | |
51 instructions_.push_back(insn); | |
52 return insn; | |
53 } else { | |
54 // Non-jumping instructions do not use any of the branch targets. | |
55 Instruction* insn = new Instruction(code, k, next); | |
56 instructions_.push_back(insn); | |
57 return insn; | |
58 } | |
59 } | |
60 | |
61 Instruction* CodeGen::MakeInstruction(uint16_t code, | |
62 uint32_t k, | |
63 Instruction* jt, | |
64 Instruction* jf) { | |
65 // We can handle all conditional jumps. They are followed by both a | |
66 // "true" and a "false" branch. | |
67 if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) { | |
68 SANDBOX_DIE("Expected a BPF_JMP instruction"); | |
69 } | |
70 if (!jt || !jf) { | |
71 SANDBOX_DIE("Branches must jump to a valid instruction"); | |
72 } | |
73 Instruction* insn = new Instruction(code, k, jt, jf); | |
74 instructions_.push_back(insn); | 60 instructions_.push_back(insn); |
75 return insn; | 61 return insn; |
76 } | 62 } |
77 | 63 |
78 void CodeGen::FindBranchTargets(const Instruction& instructions, | 64 void CodeGen::FindBranchTargets(const Instruction& instructions, |
79 BranchTargets* branch_targets) { | 65 BranchTargets* branch_targets) { |
80 // Follow all possible paths through the "instructions" graph and compute | 66 // Follow all possible paths through the "instructions" graph and compute |
81 // a list of branch targets. This will later be needed to compute the | 67 // a list of branch targets. This will later be needed to compute the |
82 // boundaries of basic blocks. | 68 // boundaries of basic blocks. |
83 // We maintain a set of all instructions that we have previously seen. This | 69 // We maintain a set of all instructions that we have previously seen. This |
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
600 CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks); | 586 CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks); |
601 MergeTails(&all_blocks); | 587 MergeTails(&all_blocks); |
602 BasicBlocks basic_blocks; | 588 BasicBlocks basic_blocks; |
603 TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks); | 589 TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks); |
604 ComputeRelativeJumps(&basic_blocks, all_blocks); | 590 ComputeRelativeJumps(&basic_blocks, all_blocks); |
605 ConcatenateBasicBlocks(basic_blocks, program); | 591 ConcatenateBasicBlocks(basic_blocks, program); |
606 return; | 592 return; |
607 } | 593 } |
608 | 594 |
609 } // namespace sandbox | 595 } // namespace sandbox |
OLD | NEW |