Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(86)

Side by Side Diff: sandbox/linux/seccomp-bpf/codegen.cc

Issue 66723007: Make sandbox/linux/seccomp-bpf/ follow the style guide. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: (empty) rebase Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « sandbox/linux/seccomp-bpf/codegen.h ('k') | sandbox/linux/seccomp-bpf/die.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <stdio.h> 5 #include <stdio.h>
6 6
7 #include "sandbox/linux/seccomp-bpf/codegen.h" 7 #include "sandbox/linux/seccomp-bpf/codegen.h"
8 8
9
10 namespace { 9 namespace {
11 10
12 // Helper function for Traverse(). 11 // Helper function for Traverse().
13 void TraverseRecursively(std::set<playground2::Instruction *> *visited, 12 void TraverseRecursively(std::set<playground2::Instruction*>* visited,
14 playground2::Instruction *instruction) { 13 playground2::Instruction* instruction) {
15 if (visited->find(instruction) == visited->end()) { 14 if (visited->find(instruction) == visited->end()) {
16 visited->insert(instruction); 15 visited->insert(instruction);
17 switch (BPF_CLASS(instruction->code)) { 16 switch (BPF_CLASS(instruction->code)) {
18 case BPF_JMP: 17 case BPF_JMP:
19 if (BPF_OP(instruction->code) != BPF_JA) { 18 if (BPF_OP(instruction->code) != BPF_JA) {
20 TraverseRecursively(visited, instruction->jf_ptr); 19 TraverseRecursively(visited, instruction->jf_ptr);
21 } 20 }
22 TraverseRecursively(visited, instruction->jt_ptr); 21 TraverseRecursively(visited, instruction->jt_ptr);
23 break; 22 break;
24 case BPF_RET: 23 case BPF_RET:
25 break; 24 break;
26 default: 25 default:
27 TraverseRecursively(visited, instruction->next); 26 TraverseRecursively(visited, instruction->next);
28 break; 27 break;
29 } 28 }
30 } 29 }
31 } 30 }
32 31
33 } // namespace 32 } // namespace
34 33
35 namespace playground2 { 34 namespace playground2 {
36 35
37 CodeGen::CodeGen() 36 CodeGen::CodeGen() : compiled_(false) {}
38 : compiled_(false) {
39 }
40 37
41 CodeGen::~CodeGen() { 38 CodeGen::~CodeGen() {
42 for (Instructions::iterator iter = instructions_.begin(); 39 for (Instructions::iterator iter = instructions_.begin();
43 iter != instructions_.end(); 40 iter != instructions_.end();
44 ++iter) { 41 ++iter) {
45 delete *iter; 42 delete *iter;
46 } 43 }
47 for (BasicBlocks::iterator iter = basic_blocks_.begin(); 44 for (BasicBlocks::iterator iter = basic_blocks_.begin();
48 iter != basic_blocks_.end(); 45 iter != basic_blocks_.end();
49 ++iter) { 46 ++iter) {
50 delete *iter; 47 delete *iter;
51 } 48 }
52 } 49 }
53 50
54 void CodeGen::PrintProgram(const Sandbox::Program& program) { 51 void CodeGen::PrintProgram(const Sandbox::Program& program) {
55 for (Sandbox::Program::const_iterator iter = program.begin(); 52 for (Sandbox::Program::const_iterator iter = program.begin();
56 iter != program.end(); 53 iter != program.end();
57 ++iter) { 54 ++iter) {
58 int ip = (int)(iter - program.begin()); 55 int ip = (int)(iter - program.begin());
59 fprintf(stderr, "%3d) ", ip); 56 fprintf(stderr, "%3d) ", ip);
60 switch (BPF_CLASS(iter->code)) { 57 switch (BPF_CLASS(iter->code)) {
61 case BPF_LD: 58 case BPF_LD:
62 if (iter->code == BPF_LD+BPF_W+BPF_ABS) { 59 if (iter->code == BPF_LD + BPF_W + BPF_ABS) {
63 fprintf(stderr, "LOAD %d // ", (int)iter->k); 60 fprintf(stderr, "LOAD %d // ", (int)iter->k);
64 if (iter->k == offsetof(struct arch_seccomp_data, nr)) { 61 if (iter->k == offsetof(struct arch_seccomp_data, nr)) {
65 fprintf(stderr, "System call number\n"); 62 fprintf(stderr, "System call number\n");
66 } else if (iter->k == offsetof(struct arch_seccomp_data, arch)) { 63 } else if (iter->k == offsetof(struct arch_seccomp_data, arch)) {
67 fprintf(stderr, "Architecture\n"); 64 fprintf(stderr, "Architecture\n");
68 } else if (iter->k == offsetof(struct arch_seccomp_data, 65 } else if (iter->k ==
69 instruction_pointer)) { 66 offsetof(struct arch_seccomp_data, instruction_pointer)) {
70 fprintf(stderr, "Instruction pointer (LSB)\n"); 67 fprintf(stderr, "Instruction pointer (LSB)\n");
71 } else if (iter->k == offsetof(struct arch_seccomp_data, 68 } else if (iter->k ==
72 instruction_pointer) + 4) { 69 offsetof(struct arch_seccomp_data, instruction_pointer) +
73 fprintf(stderr, "Instruction pointer (MSB)\n"); 70 4) {
74 } else if (iter->k >= offsetof(struct arch_seccomp_data, args) && 71 fprintf(stderr, "Instruction pointer (MSB)\n");
75 iter->k < offsetof(struct arch_seccomp_data, args)+48 && 72 } else if (iter->k >= offsetof(struct arch_seccomp_data, args) &&
76 (iter->k-offsetof(struct arch_seccomp_data, args))%4 == 0) { 73 iter->k < offsetof(struct arch_seccomp_data, args) + 48 &&
77 fprintf(stderr, "Argument %d (%cSB)\n", 74 (iter->k - offsetof(struct arch_seccomp_data, args)) % 4 ==
78 (int)(iter->k-offsetof(struct arch_seccomp_data, args))/8, 75 0) {
79 (iter->k-offsetof(struct arch_seccomp_data, 76 fprintf(
80 args))%8 ? 'M' : 'L'); 77 stderr,
78 "Argument %d (%cSB)\n",
79 (int)(iter->k - offsetof(struct arch_seccomp_data, args)) / 8,
80 (iter->k - offsetof(struct arch_seccomp_data, args)) % 8 ? 'M'
81 : 'L');
82 } else {
83 fprintf(stderr, "???\n");
84 }
85 } else {
86 fprintf(stderr, "LOAD ???\n");
87 }
88 break;
89 case BPF_JMP:
90 if (BPF_OP(iter->code) == BPF_JA) {
91 fprintf(stderr, "JMP %d\n", ip + iter->k + 1);
92 } else {
93 fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n",
94 BPF_OP(iter->code) == BPF_JSET ? "&" :
95 BPF_OP(iter->code) == BPF_JEQ ? "==" :
96 BPF_OP(iter->code) == BPF_JGE ? ">=" :
97 BPF_OP(iter->code) == BPF_JGT ? ">" : "???",
98 (int)iter->k,
99 ip + iter->jt + 1, ip + iter->jf + 1);
100 }
101 break;
102 case BPF_RET:
103 fprintf(stderr, "RET 0x%x // ", iter->k);
104 if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
105 fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
106 } else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
107 fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
108 } else if (iter->k == SECCOMP_RET_ALLOW) {
109 fprintf(stderr, "Allowed\n");
81 } else { 110 } else {
82 fprintf(stderr, "???\n"); 111 fprintf(stderr, "???\n");
83 } 112 }
84 } else { 113 break;
85 fprintf(stderr, "LOAD ???\n"); 114 case BPF_ALU:
86 } 115 fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
87 break; 116 ? "A := -A\n" : "A := A %s 0x%x\n",
88 case BPF_JMP: 117 BPF_OP(iter->code) == BPF_ADD ? "+" :
89 if (BPF_OP(iter->code) == BPF_JA) { 118 BPF_OP(iter->code) == BPF_SUB ? "-" :
90 fprintf(stderr, "JMP %d\n", ip + iter->k + 1); 119 BPF_OP(iter->code) == BPF_MUL ? "*" :
91 } else { 120 BPF_OP(iter->code) == BPF_DIV ? "/" :
92 fprintf(stderr, "if A %s 0x%x; then JMP %d else JMP %d\n", 121 BPF_OP(iter->code) == BPF_MOD ? "%" :
93 BPF_OP(iter->code) == BPF_JSET ? "&" : 122 BPF_OP(iter->code) == BPF_OR ? "|" :
94 BPF_OP(iter->code) == BPF_JEQ ? "==" : 123 BPF_OP(iter->code) == BPF_XOR ? "^" :
95 BPF_OP(iter->code) == BPF_JGE ? ">=" : 124 BPF_OP(iter->code) == BPF_AND ? "&" :
96 BPF_OP(iter->code) == BPF_JGT ? ">" : "???", 125 BPF_OP(iter->code) == BPF_LSH ? "<<" :
97 (int)iter->k, 126 BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
98 ip + iter->jt + 1, ip + iter->jf + 1); 127 (int)iter->k);
99 } 128 break;
100 break; 129 default:
101 case BPF_RET:
102 fprintf(stderr, "RET 0x%x // ", iter->k);
103 if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_TRAP) {
104 fprintf(stderr, "Trap #%d\n", iter->k & SECCOMP_RET_DATA);
105 } else if ((iter->k & SECCOMP_RET_ACTION) == SECCOMP_RET_ERRNO) {
106 fprintf(stderr, "errno = %d\n", iter->k & SECCOMP_RET_DATA);
107 } else if (iter->k == SECCOMP_RET_ALLOW) {
108 fprintf(stderr, "Allowed\n");
109 } else {
110 fprintf(stderr, "???\n"); 130 fprintf(stderr, "???\n");
111 } 131 break;
112 break;
113 case BPF_ALU:
114 fprintf(stderr, BPF_OP(iter->code) == BPF_NEG
115 ? "A := -A\n" : "A := A %s 0x%x\n",
116 BPF_OP(iter->code) == BPF_ADD ? "+" :
117 BPF_OP(iter->code) == BPF_SUB ? "-" :
118 BPF_OP(iter->code) == BPF_MUL ? "*" :
119 BPF_OP(iter->code) == BPF_DIV ? "/" :
120 BPF_OP(iter->code) == BPF_MOD ? "%" :
121 BPF_OP(iter->code) == BPF_OR ? "|" :
122 BPF_OP(iter->code) == BPF_XOR ? "^" :
123 BPF_OP(iter->code) == BPF_AND ? "&" :
124 BPF_OP(iter->code) == BPF_LSH ? "<<" :
125 BPF_OP(iter->code) == BPF_RSH ? ">>" : "???",
126 (int)iter->k);
127 break;
128 default:
129 fprintf(stderr, "???\n");
130 break;
131 } 132 }
132 } 133 }
133 return; 134 return;
134 } 135 }
135 136
136 Instruction *CodeGen::MakeInstruction(uint16_t code, uint32_t k, 137 Instruction* CodeGen::MakeInstruction(uint16_t code,
137 Instruction *next) { 138 uint32_t k,
139 Instruction* next) {
138 // We can handle non-jumping instructions and "always" jumps. Both of 140 // We can handle non-jumping instructions and "always" jumps. Both of
139 // them are followed by exactly one "next" instruction. 141 // them are followed by exactly one "next" instruction.
140 // We allow callers to defer specifying "next", but then they must call 142 // We allow callers to defer specifying "next", but then they must call
141 // "joinInstructions" later. 143 // "joinInstructions" later.
142 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) { 144 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) {
143 SANDBOX_DIE("Must provide both \"true\" and \"false\" branch " 145 SANDBOX_DIE(
144 "for a BPF_JMP"); 146 "Must provide both \"true\" and \"false\" branch "
147 "for a BPF_JMP");
145 } 148 }
146 if (next && BPF_CLASS(code) == BPF_RET) { 149 if (next && BPF_CLASS(code) == BPF_RET) {
147 SANDBOX_DIE("Cannot append instructions after a return statement"); 150 SANDBOX_DIE("Cannot append instructions after a return statement");
148 } 151 }
149 if (BPF_CLASS(code) == BPF_JMP) { 152 if (BPF_CLASS(code) == BPF_JMP) {
150 // "Always" jumps use the "true" branch target, only. 153 // "Always" jumps use the "true" branch target, only.
151 Instruction *insn = new Instruction(code, 0, next, NULL); 154 Instruction* insn = new Instruction(code, 0, next, NULL);
152 instructions_.push_back(insn); 155 instructions_.push_back(insn);
153 return insn; 156 return insn;
154 } else { 157 } else {
155 // Non-jumping instructions do not use any of the branch targets. 158 // Non-jumping instructions do not use any of the branch targets.
156 Instruction *insn = new Instruction(code, k, next); 159 Instruction* insn = new Instruction(code, k, next);
157 instructions_.push_back(insn); 160 instructions_.push_back(insn);
158 return insn; 161 return insn;
159 } 162 }
160 } 163 }
161 164
162 Instruction *CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) { 165 Instruction* CodeGen::MakeInstruction(uint16_t code, const ErrorCode& err) {
163 if (BPF_CLASS(code) != BPF_RET) { 166 if (BPF_CLASS(code) != BPF_RET) {
164 SANDBOX_DIE("ErrorCodes can only be used in return expressions"); 167 SANDBOX_DIE("ErrorCodes can only be used in return expressions");
165 } 168 }
166 if (err.error_type_ != ErrorCode::ET_SIMPLE && 169 if (err.error_type_ != ErrorCode::ET_SIMPLE &&
167 err.error_type_ != ErrorCode::ET_TRAP) { 170 err.error_type_ != ErrorCode::ET_TRAP) {
168 SANDBOX_DIE("ErrorCode is not suitable for returning from a BPF program"); 171 SANDBOX_DIE("ErrorCode is not suitable for returning from a BPF program");
169 } 172 }
170 return MakeInstruction(code, err.err_); 173 return MakeInstruction(code, err.err_);
171 } 174 }
172 175
173 Instruction *CodeGen::MakeInstruction(uint16_t code, uint32_t k, 176 Instruction* CodeGen::MakeInstruction(uint16_t code,
174 Instruction *jt, Instruction *jf) { 177 uint32_t k,
178 Instruction* jt,
179 Instruction* jf) {
175 // We can handle all conditional jumps. They are followed by both a 180 // We can handle all conditional jumps. They are followed by both a
176 // "true" and a "false" branch. 181 // "true" and a "false" branch.
177 if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) { 182 if (BPF_CLASS(code) != BPF_JMP || BPF_OP(code) == BPF_JA) {
178 SANDBOX_DIE("Expected a BPF_JMP instruction"); 183 SANDBOX_DIE("Expected a BPF_JMP instruction");
179 } 184 }
180 if (!jt && !jf) { 185 if (!jt && !jf) {
181 // We allow callers to defer specifying exactly one of the branch 186 // We allow callers to defer specifying exactly one of the branch
182 // targets. It must then be set later by calling "JoinInstructions". 187 // targets. It must then be set later by calling "JoinInstructions".
183 SANDBOX_DIE("Branches must jump to a valid instruction"); 188 SANDBOX_DIE("Branches must jump to a valid instruction");
184 } 189 }
185 Instruction *insn = new Instruction(code, k, jt, jf); 190 Instruction* insn = new Instruction(code, k, jt, jf);
186 instructions_.push_back(insn); 191 instructions_.push_back(insn);
187 return insn; 192 return insn;
188 } 193 }
189 194
190 void CodeGen::JoinInstructions(Instruction *head, Instruction *tail) { 195 void CodeGen::JoinInstructions(Instruction* head, Instruction* tail) {
191 // Merge two instructions, or set the branch target for an "always" jump. 196 // Merge two instructions, or set the branch target for an "always" jump.
192 // This function should be called, if the caller didn't initially provide 197 // This function should be called, if the caller didn't initially provide
193 // a value for "next" when creating the instruction. 198 // a value for "next" when creating the instruction.
194 if (BPF_CLASS(head->code) == BPF_JMP) { 199 if (BPF_CLASS(head->code) == BPF_JMP) {
195 if (BPF_OP(head->code) == BPF_JA) { 200 if (BPF_OP(head->code) == BPF_JA) {
196 if (head->jt_ptr) { 201 if (head->jt_ptr) {
197 SANDBOX_DIE("Cannot append instructions in the middle of a sequence"); 202 SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
198 } 203 }
199 head->jt_ptr = tail; 204 head->jt_ptr = tail;
200 } else { 205 } else {
201 if (!head->jt_ptr && head->jf_ptr) { 206 if (!head->jt_ptr && head->jf_ptr) {
202 head->jt_ptr = tail; 207 head->jt_ptr = tail;
203 } else if (!head->jf_ptr && head->jt_ptr) { 208 } else if (!head->jf_ptr && head->jt_ptr) {
204 head->jf_ptr = tail; 209 head->jf_ptr = tail;
205 } else { 210 } else {
206 SANDBOX_DIE("Cannot append instructions after a jump"); 211 SANDBOX_DIE("Cannot append instructions after a jump");
207 } 212 }
208 } 213 }
209 } else if (BPF_CLASS(head->code) == BPF_RET) { 214 } else if (BPF_CLASS(head->code) == BPF_RET) {
210 SANDBOX_DIE("Cannot append instructions after a return statement"); 215 SANDBOX_DIE("Cannot append instructions after a return statement");
211 } else if (head->next) { 216 } else if (head->next) {
212 SANDBOX_DIE("Cannot append instructions in the middle of a sequence"); 217 SANDBOX_DIE("Cannot append instructions in the middle of a sequence");
213 } else { 218 } else {
214 head->next = tail; 219 head->next = tail;
215 } 220 }
216 return; 221 return;
217 } 222 }
218 223
219 void CodeGen::Traverse(Instruction *instruction, 224 void CodeGen::Traverse(Instruction* instruction,
220 void (*fnc)(Instruction *, void *), void *aux) { 225 void (*fnc)(Instruction*, void*),
221 std::set<Instruction *> visited; 226 void* aux) {
227 std::set<Instruction*> visited;
222 TraverseRecursively(&visited, instruction); 228 TraverseRecursively(&visited, instruction);
223 for (std::set<Instruction *>::const_iterator iter = visited.begin(); 229 for (std::set<Instruction*>::const_iterator iter = visited.begin();
224 iter != visited.end(); 230 iter != visited.end();
225 ++iter) { 231 ++iter) {
226 fnc(*iter, aux); 232 fnc(*iter, aux);
227 } 233 }
228 } 234 }
229 235
230 void CodeGen::FindBranchTargets(const Instruction& instructions, 236 void CodeGen::FindBranchTargets(const Instruction& instructions,
231 BranchTargets *branch_targets) { 237 BranchTargets* branch_targets) {
232 // Follow all possible paths through the "instructions" graph and compute 238 // Follow all possible paths through the "instructions" graph and compute
233 // a list of branch targets. This will later be needed to compute the 239 // a list of branch targets. This will later be needed to compute the
234 // boundaries of basic blocks. 240 // boundaries of basic blocks.
235 // We maintain a set of all instructions that we have previously seen. This 241 // We maintain a set of all instructions that we have previously seen. This
236 // set ultimately converges on all instructions in the program. 242 // set ultimately converges on all instructions in the program.
237 std::set<const Instruction *> seen_instructions; 243 std::set<const Instruction*> seen_instructions;
238 Instructions stack; 244 Instructions stack;
239 for (const Instruction *insn = &instructions; insn; ) { 245 for (const Instruction* insn = &instructions; insn;) {
240 seen_instructions.insert(insn); 246 seen_instructions.insert(insn);
241 if (BPF_CLASS(insn->code) == BPF_JMP) { 247 if (BPF_CLASS(insn->code) == BPF_JMP) {
242 // Found a jump. Increase count of incoming edges for each of the jump 248 // Found a jump. Increase count of incoming edges for each of the jump
243 // targets. 249 // targets.
244 ++(*branch_targets)[insn->jt_ptr]; 250 ++(*branch_targets)[insn->jt_ptr];
245 if (BPF_OP(insn->code) != BPF_JA) { 251 if (BPF_OP(insn->code) != BPF_JA) {
246 ++(*branch_targets)[insn->jf_ptr]; 252 ++(*branch_targets)[insn->jf_ptr];
247 stack.push_back(const_cast<Instruction *>(insn)); 253 stack.push_back(const_cast<Instruction*>(insn));
248 } 254 }
249 // Start a recursive decent for depth-first traversal. 255 // Start a recursive decent for depth-first traversal.
250 if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) { 256 if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
251 // We haven't seen the "true" branch yet. Traverse it now. We have 257 // We haven't seen the "true" branch yet. Traverse it now. We have
252 // already remembered the "false" branch on the stack and will 258 // already remembered the "false" branch on the stack and will
253 // traverse it later. 259 // traverse it later.
254 insn = insn->jt_ptr; 260 insn = insn->jt_ptr;
255 continue; 261 continue;
256 } else { 262 } else {
257 // Now try traversing the "false" branch. 263 // Now try traversing the "false" branch.
258 insn = NULL; 264 insn = NULL;
259 } 265 }
260 } else { 266 } else {
261 // This is a non-jump instruction, just continue to the next instruction 267 // This is a non-jump instruction, just continue to the next instruction
262 // (if any). It's OK if "insn" becomes NULL when reaching a return 268 // (if any). It's OK if "insn" becomes NULL when reaching a return
263 // instruction. 269 // instruction.
264 if (!insn->next != (BPF_CLASS(insn->code) == BPF_RET)) { 270 if (!insn->next != (BPF_CLASS(insn->code) == BPF_RET)) {
265 SANDBOX_DIE("Internal compiler error; return instruction must be at " 271 SANDBOX_DIE(
266 "the end of the BPF program"); 272 "Internal compiler error; return instruction must be at "
273 "the end of the BPF program");
267 } 274 }
268 if (seen_instructions.find(insn->next) == seen_instructions.end()) { 275 if (seen_instructions.find(insn->next) == seen_instructions.end()) {
269 insn = insn->next; 276 insn = insn->next;
270 } else { 277 } else {
271 // We have seen this instruction before. That could happen if it is 278 // We have seen this instruction before. That could happen if it is
272 // a branch target. No need to continue processing. 279 // a branch target. No need to continue processing.
273 insn = NULL; 280 insn = NULL;
274 } 281 }
275 } 282 }
276 while (!insn && !stack.empty()) { 283 while (!insn && !stack.empty()) {
277 // We are done processing all the way to a leaf node, backtrack up the 284 // We are done processing all the way to a leaf node, backtrack up the
278 // stack to any branches that we haven't processed yet. By definition, 285 // stack to any branches that we haven't processed yet. By definition,
279 // this has to be a "false" branch, as we always process the "true" 286 // this has to be a "false" branch, as we always process the "true"
280 // branches right away. 287 // branches right away.
281 insn = stack.back(); 288 insn = stack.back();
282 stack.pop_back(); 289 stack.pop_back();
283 if (seen_instructions.find(insn->jf_ptr) == seen_instructions.end()) { 290 if (seen_instructions.find(insn->jf_ptr) == seen_instructions.end()) {
284 // We haven't seen the "false" branch yet. So, that's where we'll 291 // We haven't seen the "false" branch yet. So, that's where we'll
285 // go now. 292 // go now.
286 insn = insn->jf_ptr; 293 insn = insn->jf_ptr;
287 } else { 294 } else {
288 // We have seen both the "true" and the "false" branch, continue 295 // We have seen both the "true" and the "false" branch, continue
289 // up the stack. 296 // up the stack.
290 if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) { 297 if (seen_instructions.find(insn->jt_ptr) == seen_instructions.end()) {
291 SANDBOX_DIE("Internal compiler error; cannot find all " 298 SANDBOX_DIE(
292 "branch targets"); 299 "Internal compiler error; cannot find all "
300 "branch targets");
293 } 301 }
294 insn = NULL; 302 insn = NULL;
295 } 303 }
296 } 304 }
297 } 305 }
298 return; 306 return;
299 } 307 }
300 308
301 BasicBlock *CodeGen::MakeBasicBlock(Instruction *head, 309 BasicBlock* CodeGen::MakeBasicBlock(Instruction* head, Instruction* tail) {
302 Instruction *tail) {
303 // Iterate over all the instructions between "head" and "tail" and 310 // Iterate over all the instructions between "head" and "tail" and
304 // insert them into a new basic block. 311 // insert them into a new basic block.
305 BasicBlock *bb = new BasicBlock; 312 BasicBlock* bb = new BasicBlock;
306 for (;; head = head->next) { 313 for (;; head = head->next) {
307 bb->instructions.push_back(head); 314 bb->instructions.push_back(head);
308 if (head == tail) { 315 if (head == tail) {
309 break; 316 break;
310 } 317 }
311 if (BPF_CLASS(head->code) == BPF_JMP) { 318 if (BPF_CLASS(head->code) == BPF_JMP) {
312 SANDBOX_DIE("Found a jump inside of a basic block"); 319 SANDBOX_DIE("Found a jump inside of a basic block");
313 } 320 }
314 } 321 }
315 basic_blocks_.push_back(bb); 322 basic_blocks_.push_back(bb);
316 return bb; 323 return bb;
317 } 324 }
318 325
319 void CodeGen::AddBasicBlock(Instruction *head, 326 void CodeGen::AddBasicBlock(Instruction* head,
320 Instruction *tail, 327 Instruction* tail,
321 const BranchTargets& branch_targets, 328 const BranchTargets& branch_targets,
322 TargetsToBlocks *basic_blocks, 329 TargetsToBlocks* basic_blocks,
323 BasicBlock **firstBlock) { 330 BasicBlock** firstBlock) {
324 // Add a new basic block to "basic_blocks". Also set "firstBlock", if it 331 // Add a new basic block to "basic_blocks". Also set "firstBlock", if it
325 // has not been set before. 332 // has not been set before.
326 BranchTargets::const_iterator iter = branch_targets.find(head); 333 BranchTargets::const_iterator iter = branch_targets.find(head);
327 if ((iter == branch_targets.end()) != !*firstBlock || 334 if ((iter == branch_targets.end()) != !*firstBlock ||
328 !*firstBlock != basic_blocks->empty()) { 335 !*firstBlock != basic_blocks->empty()) {
329 SANDBOX_DIE("Only the very first basic block should have no " 336 SANDBOX_DIE(
330 "incoming jumps"); 337 "Only the very first basic block should have no "
338 "incoming jumps");
331 } 339 }
332 BasicBlock *bb = MakeBasicBlock(head, tail); 340 BasicBlock* bb = MakeBasicBlock(head, tail);
333 if (!*firstBlock) { 341 if (!*firstBlock) {
334 *firstBlock = bb; 342 *firstBlock = bb;
335 } 343 }
336 (*basic_blocks)[head] = bb; 344 (*basic_blocks)[head] = bb;
337 return; 345 return;
338 } 346 }
339 347
340 BasicBlock *CodeGen::CutGraphIntoBasicBlocks( 348 BasicBlock* CodeGen::CutGraphIntoBasicBlocks(
341 Instruction *instructions, const BranchTargets& branch_targets, 349 Instruction* instructions,
342 TargetsToBlocks *basic_blocks) { 350 const BranchTargets& branch_targets,
351 TargetsToBlocks* basic_blocks) {
343 // Textbook implementation of a basic block generator. All basic blocks 352 // Textbook implementation of a basic block generator. All basic blocks
344 // start with a branch target and end with either a return statement or 353 // start with a branch target and end with either a return statement or
345 // a jump (or are followed by an instruction that forms the beginning of a 354 // a jump (or are followed by an instruction that forms the beginning of a
346 // new block). Both conditional and "always" jumps are supported. 355 // new block). Both conditional and "always" jumps are supported.
347 BasicBlock *first_block = NULL; 356 BasicBlock* first_block = NULL;
348 std::set<const Instruction *> seen_instructions; 357 std::set<const Instruction*> seen_instructions;
349 Instructions stack; 358 Instructions stack;
350 Instruction *tail = NULL; 359 Instruction* tail = NULL;
351 Instruction *head = instructions; 360 Instruction* head = instructions;
352 for (Instruction *insn = head; insn; ) { 361 for (Instruction* insn = head; insn;) {
353 if (seen_instructions.find(insn) != seen_instructions.end()) { 362 if (seen_instructions.find(insn) != seen_instructions.end()) {
354 // We somehow went in a circle. This should never be possible. Not even 363 // We somehow went in a circle. This should never be possible. Not even
355 // cyclic graphs are supposed to confuse us this much. 364 // cyclic graphs are supposed to confuse us this much.
356 SANDBOX_DIE("Internal compiler error; cannot compute basic blocks"); 365 SANDBOX_DIE("Internal compiler error; cannot compute basic blocks");
357 } 366 }
358 seen_instructions.insert(insn); 367 seen_instructions.insert(insn);
359 if (tail && branch_targets.find(insn) != branch_targets.end()) { 368 if (tail && branch_targets.find(insn) != branch_targets.end()) {
360 // We reached a branch target. Start a new basic block (this means, 369 // We reached a branch target. Start a new basic block (this means,
361 // flushing the previous basic block first). 370 // flushing the previous basic block first).
362 AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block); 371 AddBasicBlock(head, tail, branch_targets, basic_blocks, &first_block);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 } 412 }
404 } 413 }
405 return first_block; 414 return first_block;
406 } 415 }
407 416
408 // We define a comparator that inspects the sequence of instructions in our 417 // We define a comparator that inspects the sequence of instructions in our
409 // basic block and any blocks referenced by this block. This function can be 418 // basic block and any blocks referenced by this block. This function can be
410 // used in a "less" comparator for the purpose of storing pointers to basic 419 // used in a "less" comparator for the purpose of storing pointers to basic
411 // blocks in STL containers; this gives an easy option to use STL to find 420 // blocks in STL containers; this gives an easy option to use STL to find
412 // shared tail sequences of basic blocks. 421 // shared tail sequences of basic blocks.
413 static int PointerCompare(const BasicBlock *block1, const BasicBlock *block2, 422 static int PointerCompare(const BasicBlock* block1,
423 const BasicBlock* block2,
414 const TargetsToBlocks& blocks) { 424 const TargetsToBlocks& blocks) {
415 // Return <0, 0, or >0 depending on the ordering of "block1" and "block2". 425 // Return <0, 0, or >0 depending on the ordering of "block1" and "block2".
416 // If we are looking at the exact same block, this is trivial and we don't 426 // If we are looking at the exact same block, this is trivial and we don't
417 // need to do a full comparison. 427 // need to do a full comparison.
418 if (block1 == block2) { 428 if (block1 == block2) {
419 return 0; 429 return 0;
420 } 430 }
421 431
422 // We compare the sequence of instructions in both basic blocks. 432 // We compare the sequence of instructions in both basic blocks.
423 const Instructions& insns1 = block1->instructions; 433 const Instructions& insns1 = block1->instructions;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
479 } 489 }
480 } else { 490 } else {
481 return insn1.k - insn2.k; 491 return insn1.k - insn2.k;
482 } 492 }
483 } else { 493 } else {
484 return insn1.code - insn2.code; 494 return insn1.code - insn2.code;
485 } 495 }
486 } 496 }
487 } 497 }
488 498
489 void CodeGen::MergeTails(TargetsToBlocks *blocks) { 499 void CodeGen::MergeTails(TargetsToBlocks* blocks) {
490 // We enter all of our basic blocks into a set using the BasicBlock::Less() 500 // We enter all of our basic blocks into a set using the BasicBlock::Less()
491 // comparator. This naturally results in blocks with identical tails of 501 // comparator. This naturally results in blocks with identical tails of
492 // instructions to map to the same entry in the set. Whenever we discover 502 // instructions to map to the same entry in the set. Whenever we discover
493 // that a particular chain of instructions is already in the set, we merge 503 // that a particular chain of instructions is already in the set, we merge
494 // the basic blocks and update the pointer in the "blocks" map. 504 // the basic blocks and update the pointer in the "blocks" map.
495 // Returns the number of unique basic blocks. 505 // Returns the number of unique basic blocks.
496 // N.B. We don't merge instructions on a granularity that is finer than 506 // N.B. We don't merge instructions on a granularity that is finer than
497 // a basic block. In practice, this is sufficiently rare that we don't 507 // a basic block. In practice, this is sufficiently rare that we don't
498 // incur a big cost. 508 // incur a big cost.
499 // Similarly, we currently don't merge anything other than tails. In 509 // Similarly, we currently don't merge anything other than tails. In
500 // the future, we might decide to revisit this decision and attempt to 510 // the future, we might decide to revisit this decision and attempt to
501 // merge arbitrary sub-sequences of instructions. 511 // merge arbitrary sub-sequences of instructions.
502 BasicBlock::Less<TargetsToBlocks> less(*blocks, PointerCompare); 512 BasicBlock::Less<TargetsToBlocks> less(*blocks, PointerCompare);
503 typedef std::set<BasicBlock *, BasicBlock::Less<TargetsToBlocks> > Set; 513 typedef std::set<BasicBlock*, BasicBlock::Less<TargetsToBlocks> > Set;
504 Set seen_basic_blocks(less); 514 Set seen_basic_blocks(less);
505 for (TargetsToBlocks::iterator iter = blocks->begin(); 515 for (TargetsToBlocks::iterator iter = blocks->begin(); iter != blocks->end();
506 iter != blocks->end();
507 ++iter) { 516 ++iter) {
508 BasicBlock *bb = iter->second; 517 BasicBlock* bb = iter->second;
509 Set::const_iterator entry = seen_basic_blocks.find(bb); 518 Set::const_iterator entry = seen_basic_blocks.find(bb);
510 if (entry == seen_basic_blocks.end()) { 519 if (entry == seen_basic_blocks.end()) {
511 // This is the first time we see this particular sequence of 520 // This is the first time we see this particular sequence of
512 // instructions. Enter the basic block into the set of known 521 // instructions. Enter the basic block into the set of known
513 // basic blocks. 522 // basic blocks.
514 seen_basic_blocks.insert(bb); 523 seen_basic_blocks.insert(bb);
515 } else { 524 } else {
516 // We have previously seen another basic block that defines the same 525 // We have previously seen another basic block that defines the same
517 // sequence of instructions. Merge the two blocks and update the 526 // sequence of instructions. Merge the two blocks and update the
518 // pointer in the "blocks" map. 527 // pointer in the "blocks" map.
519 iter->second = *entry; 528 iter->second = *entry;
520 } 529 }
521 } 530 }
522 } 531 }
523 532
524 void CodeGen::ComputeIncomingBranches(BasicBlock *block, 533 void CodeGen::ComputeIncomingBranches(BasicBlock* block,
525 const TargetsToBlocks& targets_to_blocks, 534 const TargetsToBlocks& targets_to_blocks,
526 IncomingBranches *incoming_branches) { 535 IncomingBranches* incoming_branches) {
527 // We increment the number of incoming branches each time we encounter a 536 // We increment the number of incoming branches each time we encounter a
528 // basic block. But we only traverse recursively the very first time we 537 // basic block. But we only traverse recursively the very first time we
529 // encounter a new block. This is necessary to make topological sorting 538 // encounter a new block. This is necessary to make topological sorting
530 // work correctly. 539 // work correctly.
531 if (++(*incoming_branches)[block] == 1) { 540 if (++(*incoming_branches)[block] == 1) {
532 Instruction *last_insn = block->instructions.back(); 541 Instruction* last_insn = block->instructions.back();
533 if (BPF_CLASS(last_insn->code) == BPF_JMP) { 542 if (BPF_CLASS(last_insn->code) == BPF_JMP) {
534 ComputeIncomingBranches( 543 ComputeIncomingBranches(targets_to_blocks.find(last_insn->jt_ptr)->second,
535 targets_to_blocks.find(last_insn->jt_ptr)->second, 544 targets_to_blocks,
536 targets_to_blocks, incoming_branches); 545 incoming_branches);
537 if (BPF_OP(last_insn->code) != BPF_JA) { 546 if (BPF_OP(last_insn->code) != BPF_JA) {
538 ComputeIncomingBranches( 547 ComputeIncomingBranches(
539 targets_to_blocks.find(last_insn->jf_ptr)->second, 548 targets_to_blocks.find(last_insn->jf_ptr)->second,
540 targets_to_blocks, incoming_branches); 549 targets_to_blocks,
550 incoming_branches);
541 } 551 }
542 } else if (BPF_CLASS(last_insn->code) != BPF_RET) { 552 } else if (BPF_CLASS(last_insn->code) != BPF_RET) {
543 ComputeIncomingBranches(targets_to_blocks.find(last_insn->next)->second, 553 ComputeIncomingBranches(targets_to_blocks.find(last_insn->next)->second,
544 targets_to_blocks, incoming_branches); 554 targets_to_blocks,
555 incoming_branches);
545 } 556 }
546 } 557 }
547 } 558 }
548 559
549 void CodeGen::TopoSortBasicBlocks(BasicBlock *first_block, 560 void CodeGen::TopoSortBasicBlocks(BasicBlock* first_block,
550 const TargetsToBlocks& blocks, 561 const TargetsToBlocks& blocks,
551 BasicBlocks *basic_blocks) { 562 BasicBlocks* basic_blocks) {
552 // Textbook implementation of a toposort. We keep looking for basic blocks 563 // Textbook implementation of a toposort. We keep looking for basic blocks
553 // that don't have any incoming branches (initially, this is just the 564 // that don't have any incoming branches (initially, this is just the
554 // "first_block") and add them to the topologically sorted list of 565 // "first_block") and add them to the topologically sorted list of
555 // "basic_blocks". As we do so, we remove outgoing branches. This potentially 566 // "basic_blocks". As we do so, we remove outgoing branches. This potentially
556 // ends up making our descendants eligible for the sorted list. The 567 // ends up making our descendants eligible for the sorted list. The
557 // sorting algorithm terminates when there are no more basic blocks that have 568 // sorting algorithm terminates when there are no more basic blocks that have
558 // no incoming branches. If we didn't move all blocks from the set of 569 // no incoming branches. If we didn't move all blocks from the set of
559 // "unordered_blocks" to the sorted list of "basic_blocks", there must have 570 // "unordered_blocks" to the sorted list of "basic_blocks", there must have
560 // been a cyclic dependency. This should never happen in a BPF program, as 571 // been a cyclic dependency. This should never happen in a BPF program, as
561 // well-formed BPF programs only ever have forward branches. 572 // well-formed BPF programs only ever have forward branches.
562 IncomingBranches unordered_blocks; 573 IncomingBranches unordered_blocks;
563 ComputeIncomingBranches(first_block, blocks, &unordered_blocks); 574 ComputeIncomingBranches(first_block, blocks, &unordered_blocks);
564 575
565 std::set<BasicBlock *> heads; 576 std::set<BasicBlock*> heads;
566 for (;;) { 577 for (;;) {
567 // Move block from "unordered_blocks" to "basic_blocks". 578 // Move block from "unordered_blocks" to "basic_blocks".
568 basic_blocks->push_back(first_block); 579 basic_blocks->push_back(first_block);
569 580
570 // Inspect last instruction in the basic block. This is typically either a 581 // Inspect last instruction in the basic block. This is typically either a
571 // jump or a return statement. But it could also be a "normal" instruction 582 // jump or a return statement. But it could also be a "normal" instruction
572 // that is followed by a jump target. 583 // that is followed by a jump target.
573 Instruction *last_insn = first_block->instructions.back(); 584 Instruction* last_insn = first_block->instructions.back();
574 if (BPF_CLASS(last_insn->code) == BPF_JMP) { 585 if (BPF_CLASS(last_insn->code) == BPF_JMP) {
575 // Remove outgoing branches. This might end up moving our descendants 586 // Remove outgoing branches. This might end up moving our descendants
576 // into set of "head" nodes that no longer have any incoming branches. 587 // into set of "head" nodes that no longer have any incoming branches.
577 TargetsToBlocks::const_iterator iter; 588 TargetsToBlocks::const_iterator iter;
578 if (BPF_OP(last_insn->code) != BPF_JA) { 589 if (BPF_OP(last_insn->code) != BPF_JA) {
579 iter = blocks.find(last_insn->jf_ptr); 590 iter = blocks.find(last_insn->jf_ptr);
580 if (!--unordered_blocks[iter->second]) { 591 if (!--unordered_blocks[iter->second]) {
581 heads.insert(iter->second); 592 heads.insert(iter->second);
582 } 593 }
583 } 594 }
584 iter = blocks.find(last_insn->jt_ptr); 595 iter = blocks.find(last_insn->jt_ptr);
585 if (!--unordered_blocks[iter->second]) { 596 if (!--unordered_blocks[iter->second]) {
586 first_block = iter->second; 597 first_block = iter->second;
587 continue; 598 continue;
588 } 599 }
589 } else if (BPF_CLASS(last_insn->code) != BPF_RET) { 600 } else if (BPF_CLASS(last_insn->code) != BPF_RET) {
590 // We encountered an instruction that doesn't change code flow. Try to 601 // We encountered an instruction that doesn't change code flow. Try to
591 // pick the next "first_block" from "last_insn->next", if possible. 602 // pick the next "first_block" from "last_insn->next", if possible.
592 TargetsToBlocks::const_iterator iter; 603 TargetsToBlocks::const_iterator iter;
593 iter = blocks.find(last_insn->next); 604 iter = blocks.find(last_insn->next);
594 if (!--unordered_blocks[iter->second]) { 605 if (!--unordered_blocks[iter->second]) {
595 first_block = iter->second; 606 first_block = iter->second;
596 continue; 607 continue;
597 } else { 608 } else {
598 // Our basic block is supposed to be followed by "last_insn->next", 609 // Our basic block is supposed to be followed by "last_insn->next",
599 // but dependencies prevent this from happening. Insert a BPF_JA 610 // but dependencies prevent this from happening. Insert a BPF_JA
600 // instruction to correct the code flow. 611 // instruction to correct the code flow.
601 Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, last_insn->next); 612 Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, last_insn->next);
602 first_block->instructions.push_back(ja); 613 first_block->instructions.push_back(ja);
603 last_insn->next = ja; 614 last_insn->next = ja;
604 } 615 }
605 } 616 }
606 if (heads.empty()) { 617 if (heads.empty()) {
607 if (unordered_blocks.size() != basic_blocks->size()) { 618 if (unordered_blocks.size() != basic_blocks->size()) {
608 SANDBOX_DIE("Internal compiler error; cyclic graph detected"); 619 SANDBOX_DIE("Internal compiler error; cyclic graph detected");
609 } 620 }
610 return; 621 return;
611 } 622 }
612 // Proceed by picking an arbitrary node from the set of basic blocks that 623 // Proceed by picking an arbitrary node from the set of basic blocks that
613 // do not have any incoming branches. 624 // do not have any incoming branches.
614 first_block = *heads.begin(); 625 first_block = *heads.begin();
615 heads.erase(heads.begin()); 626 heads.erase(heads.begin());
616 } 627 }
617 } 628 }
618 629
619 void CodeGen::ComputeRelativeJumps(BasicBlocks *basic_blocks, 630 void CodeGen::ComputeRelativeJumps(BasicBlocks* basic_blocks,
620 const TargetsToBlocks& targets_to_blocks) { 631 const TargetsToBlocks& targets_to_blocks) {
621 // While we previously used pointers in jt_ptr and jf_ptr to link jump 632 // While we previously used pointers in jt_ptr and jf_ptr to link jump
622 // instructions to their targets, we now convert these jumps to relative 633 // instructions to their targets, we now convert these jumps to relative
623 // jumps that are suitable for loading the BPF program into the kernel. 634 // jumps that are suitable for loading the BPF program into the kernel.
624 int offset = 0; 635 int offset = 0;
625 636
626 // Since we just completed a toposort, all jump targets are guaranteed to 637 // Since we just completed a toposort, all jump targets are guaranteed to
627 // go forward. This means, iterating over the basic blocks in reverse makes 638 // go forward. This means, iterating over the basic blocks in reverse makes
628 // it trivial to compute the correct offsets. 639 // it trivial to compute the correct offsets.
629 BasicBlock *bb = NULL; 640 BasicBlock* bb = NULL;
630 BasicBlock *last_bb = NULL; 641 BasicBlock* last_bb = NULL;
631 for (BasicBlocks::reverse_iterator iter = basic_blocks->rbegin(); 642 for (BasicBlocks::reverse_iterator iter = basic_blocks->rbegin();
632 iter != basic_blocks->rend(); 643 iter != basic_blocks->rend();
633 ++iter) { 644 ++iter) {
634 last_bb = bb; 645 last_bb = bb;
635 bb = *iter; 646 bb = *iter;
636 Instruction *insn = bb->instructions.back(); 647 Instruction* insn = bb->instructions.back();
637 if (BPF_CLASS(insn->code) == BPF_JMP) { 648 if (BPF_CLASS(insn->code) == BPF_JMP) {
638 // Basic block ended in a jump instruction. We can now compute the 649 // Basic block ended in a jump instruction. We can now compute the
639 // appropriate offsets. 650 // appropriate offsets.
640 if (BPF_OP(insn->code) == BPF_JA) { 651 if (BPF_OP(insn->code) == BPF_JA) {
641 // "Always" jumps use the 32bit "k" field for the offset, instead 652 // "Always" jumps use the 32bit "k" field for the offset, instead
642 // of the 8bit "jt" and "jf" fields. 653 // of the 8bit "jt" and "jf" fields.
643 int jmp = 654 int jmp = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
644 offset - targets_to_blocks.find(insn->jt_ptr)->second->offset; 655 insn->k = jmp;
645 insn->k = jmp;
646 insn->jt = insn->jf = 0; 656 insn->jt = insn->jf = 0;
647 } else { 657 } else {
648 // The offset computations for conditional jumps are just the same 658 // The offset computations for conditional jumps are just the same
649 // as for "always" jumps. 659 // as for "always" jumps.
650 int jt = offset-targets_to_blocks.find(insn->jt_ptr)->second->offset; 660 int jt = offset - targets_to_blocks.find(insn->jt_ptr)->second->offset;
651 int jf = offset-targets_to_blocks.find(insn->jf_ptr)->second->offset; 661 int jf = offset - targets_to_blocks.find(insn->jf_ptr)->second->offset;
652 662
653 // There is an added complication, because conditional relative jumps 663 // There is an added complication, because conditional relative jumps
654 // can only jump at most 255 instructions forward. If we have to jump 664 // can only jump at most 255 instructions forward. If we have to jump
655 // further, insert an extra "always" jump. 665 // further, insert an extra "always" jump.
656 Instructions::size_type jmp = bb->instructions.size(); 666 Instructions::size_type jmp = bb->instructions.size();
657 if (jt > 255 || (jt == 255 && jf > 255)) { 667 if (jt > 255 || (jt == 255 && jf > 255)) {
658 Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, insn->jt_ptr); 668 Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jt_ptr);
659 bb->instructions.push_back(ja); 669 bb->instructions.push_back(ja);
660 ja->k = jt; 670 ja->k = jt;
661 ja->jt = ja->jf = 0; 671 ja->jt = ja->jf = 0;
662 672
663 // The newly inserted "always" jump, of course, requires us to adjust 673 // The newly inserted "always" jump, of course, requires us to adjust
664 // the jump targets in the original conditional jump. 674 // the jump targets in the original conditional jump.
665 jt = 0; 675 jt = 0;
666 ++jf; 676 ++jf;
667 } 677 }
668 if (jf > 255) { 678 if (jf > 255) {
669 Instruction *ja = MakeInstruction(BPF_JMP+BPF_JA, 0, insn->jf_ptr); 679 Instruction* ja = MakeInstruction(BPF_JMP + BPF_JA, 0, insn->jf_ptr);
670 bb->instructions.insert(bb->instructions.begin() + jmp, ja); 680 bb->instructions.insert(bb->instructions.begin() + jmp, ja);
671 ja->k = jf; 681 ja->k = jf;
672 ja->jt = ja->jf = 0; 682 ja->jt = ja->jf = 0;
673 683
674 // Again, we have to adjust the jump targets in the original 684 // Again, we have to adjust the jump targets in the original
675 // conditional jump. 685 // conditional jump.
676 ++jt; 686 ++jt;
677 jf = 0; 687 jf = 0;
678 } 688 }
679 689
680 // Now we can finally set the relative jump targets in the conditional 690 // Now we can finally set the relative jump targets in the conditional
681 // jump instruction. Afterwards, we must no longer access the jt_ptr 691 // jump instruction. Afterwards, we must no longer access the jt_ptr
682 // and jf_ptr fields. 692 // and jf_ptr fields.
683 insn->jt = jt; 693 insn->jt = jt;
684 insn->jf = jf; 694 insn->jf = jf;
685 } 695 }
686 } else if (BPF_CLASS(insn->code) != BPF_RET && 696 } else if (BPF_CLASS(insn->code) != BPF_RET &&
687 targets_to_blocks.find(insn->next)->second != last_bb) { 697 targets_to_blocks.find(insn->next)->second != last_bb) {
688 SANDBOX_DIE("Internal compiler error; invalid basic block encountered"); 698 SANDBOX_DIE("Internal compiler error; invalid basic block encountered");
689 } 699 }
690 700
691 // Proceed to next basic block. 701 // Proceed to next basic block.
692 offset += bb->instructions.size(); 702 offset += bb->instructions.size();
693 bb->offset = offset; 703 bb->offset = offset;
694 } 704 }
695 return; 705 return;
696 } 706 }
697 707
698 void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks, 708 void CodeGen::ConcatenateBasicBlocks(const BasicBlocks& basic_blocks,
699 Sandbox::Program *program) { 709 Sandbox::Program* program) {
700 // Our basic blocks have been sorted and relative jump offsets have been 710 // Our basic blocks have been sorted and relative jump offsets have been
701 // computed. The last remaining step is for all the instructions in our 711 // computed. The last remaining step is for all the instructions in our
702 // basic blocks to be concatenated into a BPF program. 712 // basic blocks to be concatenated into a BPF program.
703 program->clear(); 713 program->clear();
704 for (BasicBlocks::const_iterator bb_iter = basic_blocks.begin(); 714 for (BasicBlocks::const_iterator bb_iter = basic_blocks.begin();
705 bb_iter != basic_blocks.end(); 715 bb_iter != basic_blocks.end();
706 ++bb_iter) { 716 ++bb_iter) {
707 const BasicBlock& bb = **bb_iter; 717 const BasicBlock& bb = **bb_iter;
708 for (Instructions::const_iterator insn_iter = bb.instructions.begin(); 718 for (Instructions::const_iterator insn_iter = bb.instructions.begin();
709 insn_iter != bb.instructions.end(); 719 insn_iter != bb.instructions.end();
710 ++insn_iter) { 720 ++insn_iter) {
711 const Instruction& insn = **insn_iter; 721 const Instruction& insn = **insn_iter;
712 program->push_back( 722 program->push_back(
713 (struct sock_filter) { insn.code, insn.jt, insn.jf, insn.k }); 723 (struct sock_filter) {insn.code, insn.jt, insn.jf, insn.k});
714 } 724 }
715 } 725 }
716 return; 726 return;
717 } 727 }
718 728
719 void CodeGen::Compile(Instruction *instructions, Sandbox::Program *program) { 729 void CodeGen::Compile(Instruction* instructions, Sandbox::Program* program) {
720 if (compiled_) { 730 if (compiled_) {
721 SANDBOX_DIE("Cannot call Compile() multiple times. Create a new code " 731 SANDBOX_DIE(
722 "generator instead"); 732 "Cannot call Compile() multiple times. Create a new code "
733 "generator instead");
723 } 734 }
724 compiled_ = true; 735 compiled_ = true;
725 736
726 BranchTargets branch_targets; 737 BranchTargets branch_targets;
727 FindBranchTargets(*instructions, &branch_targets); 738 FindBranchTargets(*instructions, &branch_targets);
728 TargetsToBlocks all_blocks; 739 TargetsToBlocks all_blocks;
729 BasicBlock *first_block = 740 BasicBlock* first_block =
730 CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks); 741 CutGraphIntoBasicBlocks(instructions, branch_targets, &all_blocks);
731 MergeTails(&all_blocks); 742 MergeTails(&all_blocks);
732 BasicBlocks basic_blocks; 743 BasicBlocks basic_blocks;
733 TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks); 744 TopoSortBasicBlocks(first_block, all_blocks, &basic_blocks);
734 ComputeRelativeJumps(&basic_blocks, all_blocks); 745 ComputeRelativeJumps(&basic_blocks, all_blocks);
735 ConcatenateBasicBlocks(basic_blocks, program); 746 ConcatenateBasicBlocks(basic_blocks, program);
736 return; 747 return;
737 } 748 }
738 749
739 } // namespace 750 } // namespace
OLDNEW
« no previous file with comments | « sandbox/linux/seccomp-bpf/codegen.h ('k') | sandbox/linux/seccomp-bpf/die.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698