OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
3 * Use of this source code is governed by a BSD-style license that can be | |
4 * found in the LICENSE file. | |
5 */ | |
6 | |
7 /* | |
8 * nc_jumps.c - Validate where valid jumps can occur. | |
9 */ | |
10 | |
11 #include <assert.h> | |
12 #include <stdlib.h> | |
13 | |
14 #include "native_client/src/trusted/validator/x86/ncval_reg_sfi/nc_jumps.h" | |
15 | |
16 #include "native_client/src/shared/platform/nacl_log.h" | |
17 #include "native_client/src/trusted/validator/x86/decoder/nc_inst_state_internal
.h" | |
18 #include "native_client/src/trusted/validator/x86/decoder/nc_inst_trans.h" | |
19 #include "native_client/src/trusted/validator/x86/ncval_reg_sfi/address_sets.h" | |
20 #include "native_client/src/trusted/validator/x86/ncval_reg_sfi/ncvalidate_iter.
h" | |
21 #include "native_client/src/trusted/validator/x86/ncval_reg_sfi/ncvalidate_iter_
internal.h" | |
22 | |
23 /* To turn on debugging of instruction decoding, change value of | |
24 * DEBUGGING to 1. | |
25 */ | |
26 #define DEBUGGING 0 | |
27 | |
28 #include "native_client/src/shared/utils/debugging.h" | |
29 | |
30 #include "native_client/src/trusted/validator/x86/decoder/ncop_exps_inl.c" | |
31 #include "native_client/src/trusted/validator/x86/decoder/nc_inst_iter_inl.c" | |
32 #include "native_client/src/trusted/validator/x86/ncval_reg_sfi/address_sets_inl
.c" | |
33 | |
34 Bool NACL_FLAGS_identity_mask = FALSE; | |
35 | |
36 static INLINE uint8_t NaClGetJumpMask(NaClValidatorState* vstate) { | |
37 return NACL_FLAGS_identity_mask | |
38 ? (uint8_t) 0xFF | |
39 : (uint8_t) (~vstate->bundle_mask); | |
40 } | |
41 | |
42 /* Generates a jump validator. */ | |
43 Bool NaClJumpValidatorInitialize(NaClValidatorState* vstate) { | |
44 NaClJumpSets* jump_sets = &vstate->jump_sets; | |
45 jump_sets->actual_targets = NaClAddressSetCreate(vstate->codesize); | |
46 jump_sets->possible_targets = NaClAddressSetCreate(vstate->codesize); | |
47 jump_sets->removed_targets = NaClAddressSetCreate(vstate->codesize); | |
48 if (jump_sets->actual_targets == NULL || | |
49 jump_sets->possible_targets == NULL || | |
50 jump_sets->removed_targets == NULL) { | |
51 NaClValidatorMessage(LOG_ERROR, vstate, "unable to allocate jump sets"); | |
52 NaClJumpValidatorCleanUp(vstate); | |
53 return FALSE; | |
54 } | |
55 jump_sets->set_array_size = NaClAddressSetArraySize(vstate->codesize); | |
56 return TRUE; | |
57 } | |
58 | |
59 /* Record that there is an explicit jump from the from_address to the | |
60 * to_address, for the validation defined by the validator state. | |
61 * Parameters: | |
62 * vstate - The state of the validator. | |
63 * inst - The instruction that does the jump. | |
64 * jump_offset - The jump offset, relative to the end of the instruction. | |
65 */ | |
66 static void NaClAddJumpToJumpSets(NaClValidatorState* vstate, | |
67 NaClInstState* inst, | |
68 NaClPcNumber jump_offset) { | |
69 NaClPcAddress to_address = inst->inst_addr + inst->bytes.length + jump_offset; | |
70 /* If the address is in the code segment, assume good (unless we later find it | |
71 * jumping into a pseudo instruction). Otherwise, only allow if 0 mod 32. | |
72 */ | |
73 DEBUG(NaClLog(LOG_INFO, "Add jump to jump sets: %" | |
74 NACL_PRIxNaClPcAddress" -> %"NACL_PRIxNaClPcAddress"\n", | |
75 inst->inst_addr, to_address)); | |
76 if (to_address < vstate->codesize) { | |
77 /* Remember address for checking later. */ | |
78 DEBUG(NaClLog(LOG_INFO, "Add jump to target: %"NACL_PRIxNaClPcAddress | |
79 " -> %"NACL_PRIxNaClPcAddress"\n", | |
80 inst->inst_addr, to_address)); | |
81 NaClAddressSetAddInline(vstate->jump_sets.actual_targets, | |
82 to_address, vstate); | |
83 } else if ((to_address & vstate->bundle_mask) == 0) { | |
84 /* Allow bundle-aligned jump. If the jump overflows or underflows the | |
85 * 4GB untrusted address space it will hit the guard regions. The largest | |
86 * potential jump offset is +/-2GB. We could allow direct jumps only within | |
87 * the 4GB untrusted address space, but there is no need for this | |
88 * restriction and it would make validation judgements position-dependent. | |
89 */ | |
90 } else if (inst->unchanged) { | |
91 /* If we are replacing this instruction during dynamic code modification | |
92 * and it has not changed, the jump target must be valid because the | |
93 * instruction has been previously validated. However, we may be only | |
94 * replacing a subsection of the code segment and therefore may not have | |
95 * information about instruction boundaries outside of the code being | |
96 * replaced. Therefore, we allow unaligned direct jumps outside of the code | |
97 * being validated if and only if the instruction is unchanged. | |
98 * If dynamic code replacement is not being performed, inst->unchanged | |
99 * should always be false. | |
100 */ | |
101 } else { | |
102 if (!NACL_FLAGS_unsafe_single_inst_mode) { | |
103 NaClValidatorInstMessage(LOG_ERROR, vstate, inst, | |
104 "Instruction jumps to bad address\n"); | |
105 } | |
106 } | |
107 } | |
108 | |
109 static Bool NaClExtractBinaryOperandIndices( | |
110 NaClInstState* inst, | |
111 int* op_1, | |
112 int* op_2) { | |
113 uint32_t index; | |
114 NaClExpVector* nodes = NaClInstStateExpVector(inst); | |
115 *op_1 = -1; | |
116 *op_2 = -1; | |
117 | |
118 for (index = 0; index < nodes->number_expr_nodes; ++index) { | |
119 if (OperandReference == nodes->node[index].kind) { | |
120 if (-1 == *op_1) { | |
121 *op_1 = index + 1; | |
122 } else { | |
123 *op_2 = index + 1; | |
124 return TRUE; | |
125 } | |
126 } | |
127 } | |
128 return FALSE; | |
129 } | |
130 | |
131 /* Returns the 32-bit register for instructions of the form | |
132 * | |
133 * and %reg32, MASK | |
134 * | |
135 * where MASK is all 1/s except for the alignment mask bits, which must be zero. | |
136 * | |
137 * It is assumed that opcode 0x83 is used for the AND operation, and hence, the | |
138 * mask is a single byte. | |
139 * | |
140 * Returns RegUnknown if the instruction doesn't match the form listed above. | |
141 */ | |
142 static NaClOpKind NaClGetAndMaskReg32(NaClValidatorState* vstate, | |
143 size_t distance) { | |
144 NaClInstState* state; | |
145 const NaClInst* inst; | |
146 int op_1, op_2; | |
147 NaClExpVector* nodes; | |
148 NaClExp* node; | |
149 uint8_t mask; | |
150 NaClOpKind reg32; | |
151 NaClInstIter* iter = vstate->cur_iter; | |
152 | |
153 /* Get the corresponding and instruction. */ | |
154 if (!NaClInstIterHasLookbackStateInline(iter, distance)) return RegUnknown; | |
155 state = NaClInstIterGetLookbackStateInline(iter, distance); | |
156 inst = NaClInstStateInst(state); | |
157 if ((InstAnd != inst->name) || | |
158 (state->num_opcode_bytes == 0) || | |
159 (0x83 != state->bytes.byte[state->num_prefix_bytes])) return RegUnknown; | |
160 DEBUG(NaClLog(LOG_INFO, "inst(%d): and mask: ", (int) distance); | |
161 NaClInstStateInstPrint(NaClLogGetGio(), state)); | |
162 | |
163 /* Extract the values of the two operands for the and. */ | |
164 if (!NaClExtractBinaryOperandIndices(state, &op_1, &op_2)) return RegUnknown; | |
165 | |
166 /* Extract the destination register of the and. */ | |
167 nodes = NaClInstStateExpVector(state); | |
168 node = &nodes->node[op_1]; | |
169 if (ExprRegister != node->kind) return RegUnknown; | |
170 | |
171 reg32 = NaClGetExpRegisterInline(node); | |
172 DEBUG(NaClLog(LOG_INFO, "and mask reg = %s\n", NaClOpKindName(reg32))); | |
173 | |
174 /* Check that the mask is ok. */ | |
175 mask = NaClGetJumpMask(vstate); | |
176 DEBUG(NaClLog(LOG_INFO, "mask = %"NACL_PRIx8"\n", mask)); | |
177 | |
178 assert(0xf0 == mask || 0xe0 == mask); /* alignment must be either 16 or 32. */ | |
179 node = &nodes->node[op_2]; | |
180 /* Technically the operand is a signed value, but "mask" has not been sign | |
181 * extended, so treat the value as an unsigned byte. | |
182 */ | |
183 if (ExprConstant != node->kind || mask != NaClGetExprUnsignedValue(node)) | |
184 return RegUnknown; | |
185 DEBUG(NaClLog(LOG_INFO, "is mask constant\n")); | |
186 | |
187 return reg32; | |
188 } | |
189 | |
190 /* Returns true if the 64-bit register reg64 set by an instruction of the form | |
191 * | |
192 * add %reg64 %RBASE | |
193 * | |
194 * The instruction checked is the "distance" instruction from the current | |
195 * instruction being looked at by the specified iterator. | |
196 */ | |
197 static Bool NaClIsAddRbaseToReg64(NaClValidatorState* vstate, | |
198 size_t distance, | |
199 NaClOpKind reg64) { | |
200 NaClInstState* state; | |
201 const NaClInst* inst; | |
202 int op_1, op_2; | |
203 NaClExpVector* nodes; | |
204 NaClExp* node; | |
205 NaClOpKind reg; | |
206 NaClInstIter* iter = vstate->cur_iter; | |
207 | |
208 /* Get the corresponding instruction. */ | |
209 if (!NaClInstIterHasLookbackStateInline(iter, distance)) return FALSE; | |
210 state = NaClInstIterGetLookbackStateInline(iter, distance); | |
211 inst = NaClInstStateInst(state); | |
212 if (InstAdd != inst->name) return FALSE; | |
213 DEBUG(NaClLog(LOG_INFO, "inst(%d): add rbase: ", (int) distance); | |
214 NaClInstStateInstPrint(NaClLogGetGio(), state)); | |
215 | |
216 /* Extract the values of the two operands for the and. */ | |
217 if (!NaClExtractBinaryOperandIndices(state, &op_1, &op_2)) return FALSE; | |
218 | |
219 /* Extract the destination register of the and. */ | |
220 nodes = NaClInstStateExpVector(state); | |
221 node = &nodes->node[op_1]; | |
222 if (ExprRegister != node->kind) return FALSE; | |
223 | |
224 /* Check that destination register matches wanted register. */ | |
225 reg = NaClGetExpRegisterInline(node); | |
226 if (reg != reg64) return FALSE; | |
227 | |
228 /* Check that source register is the base register. */ | |
229 return NaClGetExpVectorRegister(nodes, op_2) == vstate->base_register; | |
230 } | |
231 | |
232 /* Checks if an indirect jump (in 64-bit mode) is native client compliant. | |
233 * | |
234 * Expects pattern: | |
235 * | |
236 * and %REG32, MASK | |
237 * add %REG64, %RBASE | |
238 * jmp %REG64 | |
239 * | |
240 * where MASK is all 1/s except for the alignment mask bits, which must be zero. | |
241 * | |
242 * REG32 is the corresponding 32-bit register that whose value will get zero | |
243 * extended by the AND operation into the corresponding 64-bit register REG64. | |
244 * | |
245 * It is assumed that opcode 0x83 is used for the AND operation, and hence, the | |
246 * mask is a single byte. | |
247 * | |
248 * Note: applies to all kinds of jumps and calls. | |
249 * | |
250 * Parameters: | |
251 * vstate - The state of the validator. | |
252 * reg - The register used in the jump instruction. | |
253 */ | |
254 static void NaClAddRegisterJumpIndirect64(NaClValidatorState* vstate, | |
255 NaClExp* reg) { | |
256 NaClOpKind jump_reg, and_reg32, and_reg64; | |
257 | |
258 /* Do the following block exactly once. Use loop so that "break" can | |
259 * be used for premature exit of block. | |
260 */ | |
261 do { | |
262 /* Check that jump register is 64-bit. */ | |
263 if (!NaClHasBit(reg->flags, NACL_EFLAG(ExprSize64))) break; | |
264 jump_reg = NaClGetExpRegisterInline(reg); | |
265 if (RegUnknown == jump_reg) break; | |
266 DEBUG(NaClLog(LOG_INFO, "checking indirect jump: "); | |
267 NaClInstStateInstPrint(NaClLogGetGio(), vstate->cur_inst_state); | |
268 gprintf(NaClLogGetGio(), "jump_reg = %s\n", | |
269 NaClOpKindName(jump_reg))); | |
270 | |
271 /* Check that sequence begins with an appropriate and instruction. */ | |
272 and_reg32 = NaClGetAndMaskReg32(vstate, 2); | |
273 if (RegUnknown == and_reg32) break; | |
274 | |
275 /* Get corresponding 64-bit register for 32-bit result of 'and', | |
276 * and make sure it matches the jump register. | |
277 */ | |
278 and_reg64 = NaClGet64For32BitReg(and_reg32); | |
279 if (and_reg64 != jump_reg) break; | |
280 | |
281 /* Check that the middle instruction is an appropriate add instruction. */ | |
282 if (!NaClIsAddRbaseToReg64(vstate, 1, and_reg64)) break; | |
283 | |
284 /* If reached, indirect jump is properly masked. */ | |
285 DEBUG(NaClLog(LOG_INFO, "Protect indirect jump instructions\n")); | |
286 NaClMarkInstructionJumpIllegal( | |
287 vstate, NaClInstIterGetLookbackStateInline(vstate->cur_iter, 1)); | |
288 NaClMarkInstructionJumpIllegal(vstate, vstate->cur_inst_state); | |
289 return; | |
290 } while(0); | |
291 | |
292 /* If reached, mask was not found. */ | |
293 NaClValidatorInstMessage(LOG_ERROR, vstate, vstate->cur_inst_state, | |
294 "Invalid indirect jump\n"); | |
295 } | |
296 | |
297 /* Checks if an indirect jump (in 32-bit mode) is native client compliant. | |
298 * | |
299 * Expects pattern: | |
300 * and %REG, MASK | |
301 * jmp %REG | |
302 * | |
303 * where the MASK is all 1's except for the alignment mask bits, which must | |
304 * be zero. | |
305 * | |
306 * It is assumed that opcode 0x83 is used for the AND operation, and hence, the | |
307 * mask is a single byte. | |
308 * | |
309 * Note: applies to all kinds of jumps and calls. | |
310 * | |
311 * Parameters: | |
312 * state - The state of the validator. | |
313 * reg - The register used in the jump instruction. | |
314 */ | |
315 static void NaClAddRegisterJumpIndirect32(NaClValidatorState* vstate, | |
316 NaClExp* reg) { | |
317 NaClOpKind jump_reg, and_reg; | |
318 | |
319 /* Do the following block exactly once. Use loop so that "break" can | |
320 * be used for premature exit of block. | |
321 */ | |
322 do { | |
323 /* Check that jump register is 32-bit. */ | |
324 if (!NaClHasBit(reg->flags, NACL_EFLAG(ExprSize32))) break; | |
325 jump_reg = NaClGetExpRegisterInline(reg); | |
326 if (RegUnknown == jump_reg) break; | |
327 DEBUG(NaClLog(LOG_INFO, "checking indirect jump: "); | |
328 NaClInstStateInstPrint(NaClLogGetGio(), vstate->cur_inst_state); | |
329 gprintf(NaClLogGetGio(), "jump_reg = %s\n", | |
330 NaClOpKindName(jump_reg))); | |
331 | |
332 /* Check that sequence begins with an appropriate and instruction. */ | |
333 and_reg = NaClGetAndMaskReg32(vstate, 1); | |
334 if (jump_reg != and_reg) break; | |
335 | |
336 /* If reached, indirect jump is properly masked. */ | |
337 DEBUG(NaClLog(LOG_INFO, "Protect register jump indirect\n")); | |
338 NaClMarkInstructionJumpIllegal(vstate, vstate->cur_inst_state); | |
339 return; | |
340 } while(0); | |
341 | |
342 /* If reached, mask was not found. */ | |
343 NaClValidatorInstMessage(LOG_ERROR, vstate, vstate->cur_inst_state, | |
344 "Invalid indirect jump\n"); | |
345 } | |
346 | |
347 /* Given a jump statement, add the corresponding (explicit) jump value | |
348 * to the set of actual jump targets. | |
349 * Parameters: | |
350 * vstate - The state of the validator. | |
351 */ | |
352 static void NaClAddExprJumpTarget(NaClValidatorState* vstate) { | |
353 uint32_t i; | |
354 NaClInstState* inst_state = vstate->cur_inst_state; | |
355 NaClExpVector* vector = vstate->cur_inst_vector; | |
356 DEBUG(NaClLog(LOG_INFO, "jump checking: "); | |
357 NaClInstStateInstPrint(NaClLogGetGio(), inst_state)); | |
358 for (i = 0; i < vector->number_expr_nodes; ++i) { | |
359 NaClExp* node = &vector->node[i]; | |
360 if (!NaClHasBit(node->flags, NACL_EFLAG(ExprJumpTarget))) | |
361 continue; | |
362 switch (node->kind) { | |
363 case ExprRegister: | |
364 if (64 == NACL_TARGET_SUBARCH) { | |
365 NaClAddRegisterJumpIndirect64(vstate, node); | |
366 } else { | |
367 NaClAddRegisterJumpIndirect32(vstate, node); | |
368 } | |
369 break; | |
370 case ExprConstant: | |
371 /* Direct jump. */ | |
372 NaClAddJumpToJumpSets(vstate, inst_state, | |
373 (NaClPcNumber) NaClGetExprSignedValue(node)); | |
374 break; | |
375 default: | |
376 NaClValidatorInstMessage( | |
377 LOG_ERROR, vstate, inst_state, | |
378 "Jump not native client compliant\n"); | |
379 } | |
380 } | |
381 } | |
382 | |
383 /* Given an instruction corresponding to a call, validate that the generated | |
384 * return address is safe. | |
385 * Parameters: | |
386 * vstate - The state of the validator. | |
387 */ | |
388 static void NaClValidateCallAlignment(NaClValidatorState* vstate) { | |
389 /* The return is safe only if it begins at an aligned address (since | |
390 * return instructions are not explicit jumps). | |
391 */ | |
392 NaClPcAddress next_addr = vstate->cur_inst_state->inst_addr | |
393 + NaClInstStateLength(vstate->cur_inst_state); | |
394 if (next_addr & vstate->bundle_mask) { | |
395 NaClPcAddress printable_next_addr = | |
396 NaClInstStatePrintableAddress(vstate->cur_inst_state) + | |
397 NaClInstStateLength(vstate->cur_inst_state); | |
398 /* NOTE: Previously the validator recorded an error for call instructions | |
399 * that were not aligned against the end of a bundle, as these, while | |
400 * safe, are not correct with the current code generation idioms. | |
401 * This #if defined(ERROR_ON_CALL_BUNDLE_ALIGNMENT) was added to allow | |
402 * experimentation with different call/return idioms. | |
403 */ | |
404 if (!NACL_FLAGS_unsafe_single_inst_mode) { | |
405 NaClValidatorInstMessage( | |
406 #if defined(ERROR_ON_CALL_BUNDLE_ALIGNMENT) | |
407 LOG_ERROR, | |
408 #else | |
409 LOG_WARNING, | |
410 #endif | |
411 vstate, vstate->cur_inst_state, | |
412 "Bad call alignment, return pc = %"NACL_PRIxNaClPcAddress"\n", | |
413 printable_next_addr); | |
414 } | |
415 } | |
416 } | |
417 | |
418 /* TODO(ncbray) prove that all instructions the decoder generates are in the | |
419 * code segment and remove this check. | |
420 */ | |
421 static INLINE Bool NaClInstructionInCodeSegment(NaClValidatorState* vstate, | |
422 NaClInstState *inst) { | |
423 return inst->inst_addr < vstate->codesize; | |
424 } | |
425 | |
426 /* Record that the given address of the given instruction is the beginning of | |
427 * a disassembled instruction. | |
428 * Parameters: | |
429 * vstate - The state of the validator. | |
430 * inst - The instruction. | |
431 */ | |
432 static void NaClRememberInstructionBoundary(NaClValidatorState* vstate, | |
433 NaClInstState* inst) { | |
434 if (!NaClInstructionInCodeSegment(vstate, inst)) { | |
435 NaClValidatorInstMessage(LOG_ERROR, vstate, inst, | |
436 "Instruction pc out of range\n"); | |
437 } else { | |
438 DEBUG(NaClLog(LOG_INFO, | |
439 "Add possible jump address: %"NACL_PRIxNaClPcAddress"\n", | |
440 inst->inst_addr)); | |
441 NaClAddressSetAddInline(vstate->jump_sets.possible_targets, inst->inst_addr, | |
442 vstate); | |
443 } | |
444 } | |
445 | |
446 void NaClJumpValidatorRememberIpOnly(NaClValidatorState* vstate) { | |
447 NaClRememberInstructionBoundary(vstate, vstate->cur_inst_state); | |
448 } | |
449 | |
450 void NaClJumpValidator(NaClValidatorState* vstate) { | |
451 NaClRememberInstructionBoundary(vstate, vstate->cur_inst_state); | |
452 if (vstate->cur_inst->flags & | |
453 (NACL_IFLAG(JumpInstruction) | NACL_IFLAG(ConditionalJump))) { | |
454 NaClAddExprJumpTarget(vstate); | |
455 if (vstate->cur_inst->name == InstCall) { | |
456 NaClValidateCallAlignment(vstate); | |
457 } | |
458 } | |
459 } | |
460 | |
461 /* Returns true if the given address corresponds to the beginning | |
462 * of an atomic sequence of instructions, and hence can be branched to. | |
463 */ | |
464 static Bool IsNaClReachableAddress(NaClValidatorState* vstate, | |
465 NaClPcAddress addr) { | |
466 DEBUG(NaClLog(LOG_INFO, "possible contains: %d\n", | |
467 (int) NaClAddressSetContains(vstate->jump_sets.possible_targets, | |
468 addr, vstate))); | |
469 DEBUG(NaClLog(LOG_INFO, "removed contains: %d\n", | |
470 (int) NaClAddressSetContains(vstate->jump_sets.removed_targets, | |
471 addr, vstate))); | |
472 return NaClAddressSetContains(vstate->jump_sets.possible_targets, | |
473 addr, vstate) && | |
474 !NaClAddressSetContains(vstate->jump_sets.removed_targets, addr, vstate); | |
475 } | |
476 | |
477 void NaClJumpValidatorSummarize(NaClValidatorState* vstate) { | |
478 /* Check that any explicit jump is to a possible (atomic) sequence | |
479 * of disassembled instructions. | |
480 */ | |
481 NaClJumpSets* jump_sets; | |
482 NaClPcAddress addr; | |
483 size_t i; | |
484 if (vstate->quit) return; | |
485 jump_sets = &vstate->jump_sets; | |
486 NaClValidatorMessage( | |
487 LOG_INFO, vstate, | |
488 "Checking jump targets: %"NACL_PRIxNaClPcAddress | |
489 " to %"NACL_PRIxNaClPcAddress"\n", | |
490 vstate->vbase, vstate->vbase + vstate->codesize); | |
491 | |
492 /* (Low level) Walk the collected sets to find address that correspond | |
493 * to branches into an atomic sequence of instructions. | |
494 */ | |
495 for (i = 0; i < jump_sets->set_array_size; ++i) { | |
496 uint8_t problem = jump_sets->actual_targets[i] & | |
497 (~jump_sets->possible_targets[i] | | |
498 jump_sets->removed_targets[i]); | |
499 if (problem) { | |
500 /* Some bit in this range is a problem, so we will convert back | |
501 * to code like the above and test each bit separately. | |
502 */ | |
503 NaClPcAddress j; | |
504 NaClPcAddress base = (i << 3); | |
505 for (j = 0; j < 8; ++j) { | |
506 addr = base + j; | |
507 if (addr < vstate->codesize) { | |
508 if (NaClAddressSetContains(jump_sets->actual_targets, addr, vstate)) { | |
509 DEBUG(NaClLog(LOG_INFO, | |
510 "Checking jump address: %"NACL_PRIxNaClPcAddress"\n", | |
511 addr)); | |
512 if (!IsNaClReachableAddress(vstate, addr)) { | |
513 NaClValidatorPcAddressMessage(LOG_ERROR, vstate, addr, | |
514 "Bad jump target\n"); | |
515 } | |
516 } | |
517 } | |
518 } | |
519 } | |
520 } | |
521 | |
522 /* Check that all block boundaries are accessable at an aligned address. */ | |
523 NaClValidatorMessage( | |
524 LOG_INFO, vstate, "Checking that basic blocks are aligned\n"); | |
525 if (vstate->vbase & vstate->bundle_mask) { | |
526 NaClValidatorMessage(LOG_ERROR, vstate, | |
527 "Code segment starts at 0x%"NACL_PRIxNaClPcAddress", " | |
528 "which isn't aligned properly.\n", | |
529 vstate->vbase); | |
530 } else { | |
531 for (addr = 0; addr < vstate->codesize; addr += vstate->bundle_size) { | |
532 DEBUG(NaClLog(LOG_INFO, | |
533 "Checking block address: %"NACL_PRIxNaClPcAddress"\n", | |
534 addr)); | |
535 if (!IsNaClReachableAddress(vstate, addr)) { | |
536 NaClValidatorPcAddressMessage(LOG_ERROR, vstate, addr, | |
537 "Bad basic block alignment.\n"); | |
538 } | |
539 } | |
540 } | |
541 } | |
542 | |
543 void NaClJumpValidatorCleanUp(NaClValidatorState* vstate) { | |
544 if (NULL != vstate) { | |
545 NaClJumpSets* jump_sets = &vstate->jump_sets; | |
546 NaClAddressSetDestroy(jump_sets->actual_targets); | |
547 NaClAddressSetDestroy(jump_sets->possible_targets); | |
548 NaClAddressSetDestroy(jump_sets->removed_targets); | |
549 jump_sets->actual_targets = NULL; | |
550 jump_sets->possible_targets = NULL; | |
551 jump_sets->removed_targets = NULL; | |
552 } | |
553 } | |
554 | |
555 static INLINE void NaClMarkInstructionJumpIllegalInline( | |
556 struct NaClValidatorState* vstate, | |
557 struct NaClInstState* inst) { | |
558 if (!NaClInstructionInCodeSegment(vstate, inst)) { | |
559 /* ERROR instruction out of range. | |
560 * Note: Not reported here, because this will already be reported by | |
561 * the call to NaClRememberIp in JumpValidator. | |
562 */ | |
563 } else { | |
564 DEBUG(NaClLog(LOG_INFO, | |
565 "Mark instruction as jump illegal: %"NACL_PRIxNaClPcAddress | |
566 "\n", | |
567 inst->inst_addr)); | |
568 NaClAddressSetAddInline(vstate->jump_sets.removed_targets, inst->inst_addr, | |
569 vstate); | |
570 } | |
571 } | |
572 | |
573 void NaClMarkInstructionJumpIllegal(struct NaClValidatorState* vstate, | |
574 struct NaClInstState* inst) { | |
575 NaClMarkInstructionJumpIllegalInline(vstate, inst); | |
576 } | |
577 | |
578 void NaClMarkInstructionsJumpRangeIllegal(struct NaClValidatorState* vstate, | |
579 int distance) { | |
580 int i; | |
581 for (i = 0; i < distance; i++) { | |
582 struct NaClInstState* inst = | |
583 NaClInstIterGetLookbackStateInline(vstate->cur_iter, i); | |
584 NaClMarkInstructionJumpIllegalInline(vstate, inst); | |
585 } | |
586 } | |
587 | |
588 void NaClMarkInstructionJumpIllegalLookback( | |
589 struct NaClInstIter* iter, | |
590 struct NaClValidatorState* state, | |
591 size_t n) { | |
592 NaClMarkInstructionJumpIllegal( | |
593 state, | |
594 (n == 0) | |
595 ? state->cur_inst_state | |
596 : NaClInstIterGetLookbackStateInline(iter, n)); | |
597 } | |
OLD | NEW |