Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/trusted/validator_arm/validator.cc

Issue 7799013: Intial Thumb2 Sandbox (naclrev 6680) Base URL: svn://svn.chromium.org/native_client/trunk/src/native_client
Patch Set: fix comma Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright 2009 The Native Client Authors. All rights reserved. 2 * Copyright (c) 2011 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can 3 * Use of this source code is governed by a BSD-style license that can
4 * be found in the LICENSE file. 4 * be found in the LICENSE file.
5 * Copyright 2009, Google Inc. 5 * Copyright (c) 2011, Google Inc.
6 */ 6 */
7 7
8 #include "native_client/src/trusted/service_runtime/nacl_config.h" 8 #include "native_client/src/trusted/service_runtime/nacl_config.h"
9 #include "native_client/src/trusted/validator_arm/validator.h" 9 #include "native_client/src/trusted/validator_arm/validator.h"
10 #include "native_client/src/include/nacl_macros.h" 10 #include "native_client/src/include/nacl_macros.h"
11 11 #include <assert.h>
12 using nacl_arm_dec::Instruction; 12 using nacl_arm_dec::Instruction;
13 using nacl_arm_dec::ClassDecoder; 13 using nacl_arm_dec::ClassDecoder;
14 using nacl_arm_dec::Register; 14 using nacl_arm_dec::Register;
15 using nacl_arm_dec::RegisterList; 15 using nacl_arm_dec::RegisterList;
16 using nacl_arm_dec::kRegisterNone; 16 using nacl_arm_dec::kRegisterNone;
17 using nacl_arm_dec::kRegisterPc; 17 using nacl_arm_dec::kRegisterPc;
18 using nacl_arm_dec::kRegisterLink; 18 using nacl_arm_dec::kRegisterLink;
19 19
20 using std::vector; 20 using std::vector;
21 21
22 namespace nacl_arm_val { 22 namespace nacl_arm_val {
23 23
24 /********************************************************* 24 /*********************************************************
25 * Implementations of patterns used in the first pass. 25 * Implementations of patterns used in the first pass.
26 * 26 *
27 * N.B. IF YOU ADD A PATTERN HERE, REGISTER IT BELOW. 27 * N.B. IF YOU ADD A PATTERN HERE, REGISTER IT BELOW.
28 * See the list in apply_patterns. 28 * See the list in apply_patterns.
29 *********************************************************/ 29 *********************************************************/
30 30
31 // A possible result from a validator pattern. 31 // A few convenience items for return values.
32 enum PatternMatch { 32 PatternMatch NO_MATCH = {NO_MATCH_MODE, 0};
33 // The pattern does not apply to the instructions it was given. 33 PatternMatch PATTERN_UNSAFE = {PATTERN_UNSAFE_MODE, 0};
34 NO_MATCH, 34 PatternMatch PATTERN_SAFE_3 = {PATTERN_SAFE_MODE, 3};
35 // The pattern matches, and is safe; do not allow jumps to split it. 35 PatternMatch PATTERN_SAFE_2 = {PATTERN_SAFE_MODE, 2};
36 PATTERN_SAFE, 36 PatternMatch PATTERN_SAFE_1 = {PATTERN_SAFE_MODE, 1};
37 // The pattern matches, and has detected a problem.
38 PATTERN_UNSAFE
39 };
40 37
41 /* 38 /*
42 * Ensures that all stores use a safe base address. A base address is safe if 39 * Ensures that all stores use a safe base address. A base address is safe if
43 * it 40 * it
44 * 1. Has specific bits masked off by its immediate predecessor, or 41 * 1. Has specific bits masked off by its immediate predecessor, or
45 * 2. Is predicated on those bits being clear, as tested by its immediate 42 * 2. Is predicated on those bits being clear, as tested by its immediate
46 * predecessor, or 43 * predecessor, or
47 * 3. Is in a register defined as always containing a safe address. 44 * 3. Is in a register defined as always containing a safe address.
48 * 45 *
49 * This pattern concerns itself with case #1, early-exiting if it finds #2. 46 * This pattern concerns itself with case #1, early-exiting if it finds #2.
50 */ 47 */
51 static PatternMatch check_store_mask(const SfiValidator &sfi, 48 static PatternMatch check_store_mask(const SfiValidator &sfi,
52 const DecodedInstruction &first, 49 DecodedInstruction insns[],
53 const DecodedInstruction &second,
54 ProblemSink *out) { 50 ProblemSink *out) {
51 const DecodedInstruction first = insns[kMaxPattern - 2];
52 const DecodedInstruction second = insns[kMaxPattern - 1];
53
55 if (second.base_address_register() == kRegisterNone /* not a store */ 54 if (second.base_address_register() == kRegisterNone /* not a store */
56 || sfi.is_data_address_register(second.base_address_register())) { 55 || sfi.is_data_address_register(second.base_address_register())) {
57 return NO_MATCH; 56 return NO_MATCH;
58 } 57 }
59 58
60 if (first.defines(second.base_address_register()) 59 if (first.defines(second.base_address_register())
61 && first.clears_bits(sfi.data_address_mask()) 60 && first.clears_bits(sfi.data_address_mask())
62 && first.always_precedes(second)) { 61 && first.always_precedes(second)) {
63 return PATTERN_SAFE; 62 return PATTERN_SAFE_2;
64 } 63 }
65 64
66 if (first.sets_Z_if_bits_clear(second.base_address_register(), 65 if (first.sets_Z_if_bits_clear(second.base_address_register(),
67 sfi.data_address_mask()) 66 sfi.data_address_mask())
68 && second.is_conditional_on(first)) { 67 && second.is_conditional_on(first)) {
69 return PATTERN_SAFE; 68 return PATTERN_SAFE_2;
70 } 69 }
71 70
72 out->report_problem(second.addr(), second.safety(), kProblemUnsafeStore); 71 out->report_problem(second.addr(), second.safety(), kProblemUnsafeStore);
73 return PATTERN_UNSAFE; 72 return PATTERN_UNSAFE;
74 } 73 }
75 74
76 /* 75 /*
77 * Ensures that all indirect branches use a safe destination address. A 76 * Ensures that all indirect branches use a safe destination address. A
78 * destination address is safe if it has specific bits masked off by its 77 * destination address is safe if it has specific bits masked off by its
79 * immediate predecessor. 78 * immediate predecessor.
80 */ 79 */
81 static PatternMatch check_branch_mask(const SfiValidator &sfi, 80 static PatternMatch check_branch_mask(const SfiValidator &sfi,
82 const DecodedInstruction &first, 81 DecodedInstruction insns[],
83 const DecodedInstruction &second,
84 ProblemSink *out) { 82 ProblemSink *out) {
83 const DecodedInstruction zeroth = insns[kMaxPattern - 3];
84 const DecodedInstruction first = insns[kMaxPattern - 2];
85 const DecodedInstruction second = insns[kMaxPattern - 1];
85 if (second.branch_target_register() == kRegisterNone) return NO_MATCH; 86 if (second.branch_target_register() == kRegisterNone) return NO_MATCH;
86 87
87 if (first.defines(second.branch_target_register()) 88
89 if ((sfi.code_address_ormask() == 0)
90 && first.defines(second.branch_target_register())
88 && first.clears_bits(sfi.code_address_mask()) 91 && first.clears_bits(sfi.code_address_mask())
89 && first.always_precedes(second)) { 92 && first.always_precedes(second)) {
90 return PATTERN_SAFE; 93 return PATTERN_SAFE_2;
94 }
95
96 if (first.defines(second.branch_target_register())
97 && first.sets_bits(sfi.code_address_ormask())
98 && first.always_precedes(second)
99 && zeroth.defines(second.branch_target_register())
100 && zeroth.clears_bits(sfi.code_address_mask())
101 && zeroth.always_precedes(first)) {
102 return PATTERN_SAFE_3;
91 } 103 }
92 104
93 out->report_problem(second.addr(), second.safety(), kProblemUnsafeBranch); 105 out->report_problem(second.addr(), second.safety(), kProblemUnsafeBranch);
94 return PATTERN_UNSAFE; 106 return PATTERN_UNSAFE;
95 } 107 }
96 108
97 /* 109 /*
98 * Verifies that any instructions that update a data-address register are 110 * Verifies that any instructions that update a data-address register are
99 * immediately followed by a mask. 111 * immediately followed by a mask.
100 */ 112 */
101 static PatternMatch check_data_register_update(const SfiValidator &sfi, 113 static PatternMatch check_data_register_update(const SfiValidator &sfi,
102 const DecodedInstruction &first, 114 DecodedInstruction insns[],
103 const DecodedInstruction &second,
104 ProblemSink *out) { 115 ProblemSink *out) {
116 const DecodedInstruction first = insns[kMaxPattern - 2];
117 const DecodedInstruction second = insns[kMaxPattern - 1];
105 if (!first.defines_any(sfi.data_address_registers())) return NO_MATCH; 118 if (!first.defines_any(sfi.data_address_registers())) return NO_MATCH;
106 119
107 // A single safe data register update doesn't affect control flow. 120 // A single safe data register update doesn't affect control flow.
108 if (first.clears_bits(sfi.data_address_mask())) return NO_MATCH; 121 if (first.clears_bits(sfi.data_address_mask())) return NO_MATCH;
109 122
110 // Exempt updates due to writeback 123 // Exempt updates due to writeback
111 RegisterList data_addr_defs = first.defs() & sfi.data_address_registers(); 124 RegisterList data_addr_defs = first.defs() & sfi.data_address_registers();
112 if ((first.immediate_addressing_defs() & sfi.data_address_registers()) 125 if ((first.immediate_addressing_defs() & sfi.data_address_registers())
113 == data_addr_defs) { 126 == data_addr_defs) {
114 return NO_MATCH; 127 return NO_MATCH;
115 } 128 }
116 129
117 if (second.defines_all(data_addr_defs) 130 if (second.defines_all(data_addr_defs)
118 && second.clears_bits(sfi.data_address_mask()) 131 && second.clears_bits(sfi.data_address_mask())
119 && second.always_follows(first)) { 132 && second.always_follows(first)) {
120 return PATTERN_SAFE; 133 return PATTERN_SAFE_2;
121 } 134 }
122 135
123 out->report_problem(first.addr(), first.safety(), kProblemUnsafeDataWrite); 136 out->report_problem(first.addr(), first.safety(), kProblemUnsafeDataWrite);
124 return PATTERN_UNSAFE; 137 return PATTERN_UNSAFE;
125 } 138 }
126 139
127 /* 140 /*
128 * Checks the location of linking branches -- to be useful, they must be in 141 * Checks the location of linking branches -- to be useful, they must be in
129 * the last bundle slot. 142 * the last bundle slot.
130 * 143 *
131 * This is not a security check per se, more of a guard against Stupid Compiler 144 * This is not a security check per se, more of a guard against Stupid Compiler
132 * Tricks. 145 * Tricks.
133 */ 146 */
147 // This function is currently thumb-unsafe due to the -4. Will re-enable when it
148 // is 100% clear how to handle, as this is not a safety issue.
149 // TODO(mrm) re-enable
bsy 2011/09/16 18:35:52 s/mrm/jasonwkim/ cannot assign TODO to people who
150 #if 0
134 static PatternMatch check_call_position(const SfiValidator &sfi, 151 static PatternMatch check_call_position(const SfiValidator &sfi,
135 const DecodedInstruction &inst, 152 const DecodedInstruction &inst,
136 ProblemSink *out) { 153 ProblemSink *out) {
137 // Identify linking branches through their definitions: 154 // Identify linking branches through their definitions:
138 if (inst.defines_all(kRegisterPc + kRegisterLink)) { 155 if (inst.defines_all(kRegisterPc + kRegisterLink)) {
139 uint32_t last_slot = sfi.bundle_for_address(inst.addr()).end_addr() - 4; 156 uint32_t last_slot = sfi.bundle_for_address(inst.addr()).end_addr() - 4;
140 if (inst.addr() != last_slot) { 157 if (inst.addr() != last_slot) {
141 out->report_problem(inst.addr(), inst.safety(), kProblemMisalignedCall); 158 out->report_problem(inst.addr(), inst.safety(), kProblemMisalignedCall);
142 return PATTERN_UNSAFE; 159 return PATTERN_UNSAFE;
143 } 160 }
144 } 161 }
145 return NO_MATCH; 162 return NO_MATCH;
146 } 163 }
164 #endif
147 165
148 /* 166 /*
149 * Checks for instructions that alter any read-only register. 167 * Checks for instructions that alter any read-only register.
150 */ 168 */
151 static PatternMatch check_read_only(const SfiValidator &sfi, 169 static PatternMatch check_read_only(const SfiValidator &sfi,
152 const DecodedInstruction &inst, 170 DecodedInstruction insns[],
153 ProblemSink *out) { 171 ProblemSink *out) {
172 DecodedInstruction inst = insns[kMaxPattern - 1];
154 if (inst.defines_any(sfi.read_only_registers())) { 173 if (inst.defines_any(sfi.read_only_registers())) {
155 out->report_problem(inst.addr(), inst.safety(), kProblemReadOnlyRegister); 174 out->report_problem(inst.addr(), inst.safety(), kProblemReadOnlyRegister);
156 return PATTERN_UNSAFE; 175 return PATTERN_UNSAFE;
157 } 176 }
158 177
159 return NO_MATCH; 178 return NO_MATCH;
160 } 179 }
161 180
162 /* 181 /*
163 * Checks writes to r15 from instructions that aren't branches. 182 * Checks writes to r15 from instructions that aren't branches.
164 */ 183 */
165 static PatternMatch check_pc_writes(const SfiValidator &sfi, 184 static PatternMatch check_pc_writes(const SfiValidator &sfi,
166 const DecodedInstruction &inst, 185 DecodedInstruction insns[],
167 ProblemSink *out) { 186 ProblemSink *out) {
187 DecodedInstruction inst = insns[kMaxPattern - 1];
168 if (inst.is_relative_branch() 188 if (inst.is_relative_branch()
169 || inst.branch_target_register() != kRegisterNone) { 189 || inst.branch_target_register() != kRegisterNone) {
170 // It's a branch. 190 // It's a branch.
171 return NO_MATCH; 191 return NO_MATCH;
172 } 192 }
173 193
174 if (!inst.defines(nacl_arm_dec::kRegisterPc)) return NO_MATCH; 194 if (!inst.defines(nacl_arm_dec::kRegisterPc)) return NO_MATCH;
175 195 // TODO(mrm) The clears_bits thing here doesn't save us, secvuln
bsy 2011/09/16 18:35:52 explain/expand on this please. if there is really
176 if (inst.clears_bits(sfi.code_address_mask())) { 196 if (inst.clears_bits(sfi.code_address_mask())) {
177 return PATTERN_SAFE; 197 return PATTERN_SAFE_1;
178 } else { 198 } else {
179 out->report_problem(inst.addr(), inst.safety(), kProblemUnsafeBranch); 199 out->report_problem(inst.addr(), inst.safety(), kProblemUnsafeBranch);
180 return PATTERN_UNSAFE; 200 return PATTERN_UNSAFE;
181 } 201 }
182 } 202 }
183 203
204 /*
205 * Groups IT blocks together, sets appropriate condition flags on them.
206 */
207 static PatternMatch check_it(const SfiValidator &sfi,
208 DecodedInstruction insns[],
209 ProblemSink *out) {
210 UNREFERENCED_PARAMETER(sfi);
211 DecodedInstruction insn_it = insns[0];
212 nacl_arm_dec::ITCond it = insn_it.it();
213 if (!it)
214 return NO_MATCH; // Not an IT instruction.
215 Instruction::Condition cond = insn_it.condition();
216 if ((cond == Instruction::AL) || (cond == Instruction::UNCONDITIONAL)) {
217 out->report_problem(insn_it.addr(), insn_it.safety(),
218 kProblemUnconditionalIT);
219 return PATTERN_UNSAFE; // This is nonsense, but encodeable.
220 }
221 // This is an IT instruction. We inform the affected instructions
222 // of their affectedness, and accept the pattern.
223 uint8_t conddex = 0;
224 for (; nacl_arm_dec::it_select(conddex, it) != nacl_arm_dec::NONE; conddex++)
225 if (nacl_arm_dec::it_select(conddex, it) == nacl_arm_dec::THEN)
226 insns[conddex + 1].set_condition(cond);
227 else if (nacl_arm_dec::it_select(conddex, it) == nacl_arm_dec::ELSE)
228 insns[conddex + 1].set_condition((Instruction::Condition)(cond ^ 1));
229 else return PATTERN_UNSAFE; /* Should never be reached */
230 // Check that this insn is allowed at this point in an IT
231 switch (insns[conddex + 1].it_safe()) {
232 default:
233 case nacl_arm_dec::NEVER: return PATTERN_UNSAFE;
234 case nacl_arm_dec::END:
235 if (nacl_arm_dec::it_select(conddex + 1, it) != nacl_arm_dec::NONE)
236 return PATTERN_UNSAFE;
237 case nacl_arm_dec::ALWAYS: {} // Fall through, we're fine.
238 }
239 PatternMatch p = {PATTERN_SAFE_MODE, -(conddex + 1)};
240 return p;
241 }
184 /********************************************************* 242 /*********************************************************
185 * 243 *
186 * Implementation of SfiValidator itself. 244 * Implementation of SfiValidator itself.
187 * 245 *
188 *********************************************************/ 246 *********************************************************/
189 247
190 SfiValidator::SfiValidator(uint32_t bytes_per_bundle, 248 SfiValidator::SfiValidator(uint32_t bytes_per_bundle,
191 uint32_t code_region_bytes, 249 uint32_t code_region_bytes,
192 uint32_t data_region_bytes, 250 uint32_t data_region_bytes,
193 RegisterList read_only_registers, 251 RegisterList read_only_registers,
194 RegisterList data_address_registers) 252 RegisterList data_address_registers,
253 bool thumb)
195 : bytes_per_bundle_(bytes_per_bundle), 254 : bytes_per_bundle_(bytes_per_bundle),
196 data_address_mask_(~(data_region_bytes - 1)), 255 data_address_mask_(~(data_region_bytes - 1)),
197 code_address_mask_(~(code_region_bytes - 1) | (bytes_per_bundle - 1)), 256 code_address_mask_(thumb
257 ? ~(code_region_bytes - 1)
258 : ~(code_region_bytes - 1) | (bytes_per_bundle - 1)),
259 code_address_ormask_(thumb
260 ? (bytes_per_bundle - 1)
261 : 0),
198 code_region_bytes_(code_region_bytes), 262 code_region_bytes_(code_region_bytes),
199 read_only_registers_(read_only_registers), 263 read_only_registers_(read_only_registers),
200 data_address_registers_(data_address_registers), 264 data_address_registers_(data_address_registers),
201 decode_state_(nacl_arm_dec::init_decode()) {} 265 thumb_(thumb ? 1 : 0),
266 decode_state_(nacl_arm_dec::init_decode(thumb)) {}
202 267
203 bool SfiValidator::validate(const vector<CodeSegment> &segments, 268 bool SfiValidator::validate(const vector<CodeSegment> &segments,
204 ProblemSink *out) { 269 ProblemSink *out) {
205 uint32_t base = segments[0].begin_addr(); 270 uint32_t base = segments[0].begin_addr();
206 uint32_t size = segments.back().end_addr() - base; 271 uint32_t size = segments.back().end_addr() - base;
207 AddressSet branches(base, size); 272 AddressSet branches(base, size);
208 AddressSet critical(base, size); 273 AddressSet critical(base, size);
209 274
210 bool complete_success = true; 275 bool complete_success = true;
211 276
212 for (vector<CodeSegment>::const_iterator it = segments.begin(); 277 for (vector<CodeSegment>::const_iterator it = segments.begin();
213 it != segments.end(); ++it) { 278 it != segments.end(); ++it) {
214 complete_success &= validate_fallthrough(*it, out, &branches, &critical); 279 complete_success &= validate_fallthrough(*it, out, &branches, &critical);
215 280
216 if (!out->should_continue()) { 281 if (!out->should_continue()) {
217 return false; 282 return false;
218 } 283 }
219 } 284 }
220 285
221 complete_success &= validate_branches(segments, branches, critical, out); 286 complete_success &= validate_branches(segments, branches, critical, out);
222 287
223 return complete_success; 288 return complete_success;
224 } 289 }
225 290
226 bool SfiValidator::validate_fallthrough(const CodeSegment &segment, 291 bool SfiValidator::validate_fallthrough(const CodeSegment &segment,
227 ProblemSink *out, 292 ProblemSink *out,
228 AddressSet *branches, 293 AddressSet *branches,
229 AddressSet *critical) { 294 AddressSet *critical) {
230 bool complete_success = true; 295 bool complete_success = true;
296 // The list of initial patterns to run (during the first window)
297 static const Pattern pre_patterns[] = {&check_it};
298
299 // The list of patterns to run in the second window.
300 static const Pattern patterns[] = {
301 &check_read_only,
302 &check_pc_writes,
303 // TODO(mrm) re-enable (see comment on actual function)
bsy 2011/09/16 18:35:52 "
jasonwkim 2011/09/16 20:09:17 All TODO(mrm) haver been replaceds
304 // &check_call_position,
305 &check_store_mask,
306 &check_branch_mask,
307 &check_data_register_update,
308 };
231 309
232 nacl_arm_dec::Forbidden initial_decoder; 310 nacl_arm_dec::Forbidden initial_decoder;
233 // Initialize the previous instruction to a scary BKPT, so patterns all fail. 311 // Initialize the previous instruction to a scary BKPT, so patterns all fail.
234 DecodedInstruction pred( 312 DecodedInstruction bkpt(
235 0, // Virtual address 0, which will be in a different bundle; 313 0, // Virtual address 0, which will be in a different bundle;
236 Instruction(0xE1277777), // The literal-pool-header BKPT instruction; 314 Instruction(0xE1277777), // The literal-pool-header BKPT instruction;
237 initial_decoder); // and ensure that it decodes as Forbidden. 315 initial_decoder); // and ensure that it decodes as Forbidden.
238 316 // TODO(mrm) Figure out how to make this vary automatically
239 for (uint32_t va = segment.begin_addr(); va != segment.end_addr(); va += 4) { 317 DecodedInstruction insns[kMaxPattern] = {bkpt, bkpt, bkpt, bkpt, bkpt};
240 DecodedInstruction inst(va, segment[va], 318 DecodedInstruction pre_insns[kMaxPattern] = {bkpt, bkpt, bkpt, bkpt, bkpt};
319 uint8_t insn_size = 0;
320 uint8_t flush = 0;
321 uint32_t va = segment.begin_addr();
322 uint32_t va_code = va + thumb_;
323 while (flush < kMaxPattern) {
324 va_code = va + thumb_;
325 if (va != segment.end_addr()) {
326 DecodedInstruction inst(va_code, segment[va],
241 nacl_arm_dec::decode(segment[va], decode_state_)); 327 nacl_arm_dec::decode(segment[va], decode_state_));
242 328 pre_insns[kMaxPattern - 1] = inst;
243 if (inst.safety() != nacl_arm_dec::MAY_BE_SAFE) { 329 insn_size = inst.size();
244 out->report_problem(va, inst.safety(), kProblemUnsafe); 330 if (inst.safety() != nacl_arm_dec::MAY_BE_SAFE) {
331 out->report_problem(va_code, inst.safety(), kProblemUnsafe);
332 if (!out->should_continue()) {
333 return false;
334 }
335 complete_success = false;
336 }
337 } else {
338 pre_insns[kMaxPattern - 1] = bkpt;
339 flush++;
340 }
341 if (va > segment.end_addr()) {
342 out->report_problem(va_code - insn_size, nacl_arm_dec::FORBIDDEN,
343 kProblemStraddlesSegment);
344 complete_success = false;
245 if (!out->should_continue()) { 345 if (!out->should_continue()) {
246 return false; 346 return false;
247 } 347 }
248 complete_success = false; 348 }
349 complete_success &= apply_patterns(pre_insns, pre_patterns,
350 NACL_ARRAY_SIZE(pre_patterns),
351 critical, out);
352 complete_success &= apply_patterns(insns, patterns,
353 NACL_ARRAY_SIZE(patterns), critical, out);
354 if (!out->should_continue()) return false;
355
356 if (insns[kMaxPattern - 1].is_relative_branch()) {
357 branches->add(insns[kMaxPattern - 1].addr());
249 } 358 }
250 359
251 complete_success &= apply_patterns(inst, out); 360 if (pre_insns[kMaxPattern - 1].is_literal_pool_head()
252 if (!out->should_continue()) return false; 361 && is_bundle_head(pre_insns[kMaxPattern - 1].addr())) {
362 // Add each instruction in this bundle to the critical set.
363 // Note, we increment va by 2 every time to deal with
364 // variable width instructions.
365 va = pre_insns[kMaxPattern - 1].addr() - thumb_;
366 uint32_t last_data_addr =
367 bundle_for_address(va + thumb_).end_addr()
368 - thumb_;
369 // last_data_addr cannot go beyond our segment
370 if (last_data_addr > segment.end_addr())
371 last_data_addr = segment.end_addr();
253 372
254 complete_success &= apply_patterns(pred, inst, critical, out); 373 assert((last_data_addr - va <= bytes_per_bundle_ &&
255 if (!out->should_continue()) return false; 374 ((last_data_addr - va) & 1) == 0) &&
375 "va and last_data_addr must be even (even in thumb2");
256 376
257 if (inst.is_relative_branch()) {
258 branches->add(inst.addr());
259 }
260 377
261 if (inst.is_literal_pool_head() 378 for (; va != last_data_addr; va += 2) {
262 && is_bundle_head(inst.addr())) {
263 // Add each instruction in this bundle to the critical set.
264 uint32_t last_data_addr = bundle_for_address(va).end_addr();
265 for (; va != last_data_addr; va += 4) {
266 critical->add(va); 379 critical->add(va);
267 } 380 }
268 381 // Wipe our slate clean with unsafe breakpoints
382 for (uint8_t i = 0; i < kMaxPattern - 1; i++) {
383 insns[i] = bkpt;
384 pre_insns[i] = bkpt;
385 }
269 // Decrement the virtual address by one instruction, so the for 386 // Decrement the virtual address by one instruction, so the for
270 // loop can bump it back forward. This is slightly dirty. 387 // loop can bump it back forward. This is slightly dirty.
271 va -= 4; 388 va -= insn_size;
272 } 389 }
273 390 // Pushback
274 pred = inst; 391 for (uint8_t i = 0; i < kMaxPattern - 1; i++)
392 insns[i] = insns[i + 1];
393 insns[kMaxPattern - 1] = pre_insns[0];
394 for (uint8_t i = 0; i < kMaxPattern - 1; i++)
395 pre_insns[i] = pre_insns[i + 1];
396 if (flush == 0)
397 va += insn_size;
275 } 398 }
276 399
277 return complete_success; 400 return complete_success;
278 } 401 }
279 402
280 static bool address_contained(uint32_t va, const vector<CodeSegment> &segs) { 403 static bool address_contained(uint32_t va, const vector<CodeSegment> &segs) {
281 for (vector<CodeSegment>::const_iterator it = segs.begin(); it != segs.end(); 404 for (vector<CodeSegment>::const_iterator it = segs.begin(); it != segs.end();
282 ++it) { 405 ++it) {
283 if (it->contains_address(va)) return true; 406 if (it->contains_address(va)) return true;
284 } 407 }
285 return false; 408 return false;
286 } 409 }
287 410
288 bool SfiValidator::validate_branches(const vector<CodeSegment> &segments, 411 bool SfiValidator::validate_branches(const vector<CodeSegment> &segments,
289 const AddressSet &branches, 412 const AddressSet &branches,
290 const AddressSet &critical, 413 const AddressSet &critical,
291 ProblemSink *out) { 414 ProblemSink *out) {
292 bool complete_success = true; 415 bool complete_success = true;
416 const uint32_t low_bitmask = 0xFFFFFFFE;
293 417
294 vector<CodeSegment>::const_iterator seg_it = segments.begin(); 418 vector<CodeSegment>::const_iterator seg_it = segments.begin();
295 419
296 for (AddressSet::Iterator it = branches.begin(); it != branches.end(); ++it) { 420 for (AddressSet::Iterator it = branches.begin(); it != branches.end(); ++it) {
297 uint32_t va = *it; 421 uint32_t va = *it;
298 422
299 // Invariant: all addresses in branches are covered by some segment; 423 // Invariant: all addresses in branches are covered by some segment;
300 // segments are in sorted order. 424 // segments are in sorted order.
301 while (!seg_it->contains_address(va)) { 425 while (!seg_it->contains_address(va)) {
302 ++seg_it; 426 ++seg_it;
303 } 427 }
304 428
305 const CodeSegment &segment = *seg_it; 429 const CodeSegment &segment = *seg_it;
306 430
307 DecodedInstruction inst(va, segment[va], 431 DecodedInstruction inst(va, segment[va],
308 nacl_arm_dec::decode(segment[va], decode_state_)); 432 nacl_arm_dec::decode(segment[va], decode_state_));
309 433
310 // We know it is_relative_branch(), so we can simply call: 434 // We know it is_relative_branch(), so we can simply call:
311 uint32_t target_va = inst.branch_target(); 435 uint32_t target_va = inst.branch_target();
312 if (address_contained(target_va, segments)) { 436 if (address_contained(target_va & low_bitmask, segments)) {
313 if (critical.contains(target_va)) { 437 if (critical.contains(target_va & low_bitmask)) {
314 out->report_problem(va, inst.safety(), kProblemBranchSplitsPattern, 438 out->report_problem(va, inst.safety(), kProblemBranchSplitsPattern,
315 target_va); 439 target_va);
316 if (!out->should_continue()) { 440 if (!out->should_continue()) {
317 return false; 441 return false;
318 } 442 }
319 complete_success = false; 443 complete_success = false;
320 } 444 }
321 } else if ((target_va & code_address_mask()) == 0) { 445 } else if ((((target_va & (~code_address_mask()))
322 // Allow bundle-aligned, in-range direct jump. 446 | code_address_ormask()) & low_bitmask) ==
447 (target_va & low_bitmask)) {
448 // If the masking operations would not modify the va, it is allowed
449 // Subltety: A non-register branch cannot transition between thumb and
450 // non-thumb mode, so we intentionally omit the low bit.
323 } else { 451 } else {
324 out->report_problem(va, inst.safety(), kProblemBranchInvalidDest, 452 out->report_problem(va | thumb_, inst.safety(),
325 target_va); 453 kProblemBranchInvalidDest, target_va);
326 if (!out->should_continue()) { 454 if (!out->should_continue()) {
327 return false; 455 return false;
328 } 456 }
329 complete_success = false; 457 complete_success = false;
330 } 458 }
331 } 459 }
332 460
333 return complete_success; 461 return complete_success;
334 } 462 }
335 463
336 bool SfiValidator::apply_patterns(const DecodedInstruction &inst, 464 bool SfiValidator::apply_patterns(DecodedInstruction insns[],
465 const Pattern patterns[], unsigned int nPatterns, AddressSet *critical,
337 ProblemSink *out) { 466 ProblemSink *out) {
338 // Single-instruction patterns
339 typedef PatternMatch (*OneInstPattern)(const SfiValidator &,
340 const DecodedInstruction &,
341 ProblemSink *out);
342 static const OneInstPattern one_inst_patterns[] = {
343 &check_read_only,
344 &check_pc_writes,
345 &check_call_position,
346 };
347 467
348 bool complete_success = true; 468 bool complete_success = true;
349 469
350 for (uint32_t i = 0; i < NACL_ARRAY_SIZE(one_inst_patterns); i++) { 470 for (uint32_t i = 0; i < nPatterns; i++) {
351 PatternMatch r = one_inst_patterns[i](*this, inst, out); 471 PatternMatch r = patterns[i](*this, insns, out);
352 switch (r) { 472 switch (r.pm_mode) {
353 case PATTERN_SAFE: 473 case NO_MATCH_MODE: break;
354 case NO_MATCH:
355 break;
356 474
357 case PATTERN_UNSAFE: 475 case PATTERN_UNSAFE_MODE:
358 complete_success = false;
359 break;
360 }
361 }
362
363 return complete_success;
364 }
365
366 bool SfiValidator::apply_patterns(const DecodedInstruction &first,
367 const DecodedInstruction &second, AddressSet *critical, ProblemSink *out) {
368 // Type for two-instruction pattern functions
369 typedef PatternMatch (*TwoInstPattern)(const SfiValidator &,
370 const DecodedInstruction &first,
371 const DecodedInstruction &second,
372 ProblemSink *out);
373
374 // The list of patterns -- defined in static functions up top.
375 static const TwoInstPattern two_inst_patterns[] = {
376 &check_store_mask,
377 &check_branch_mask,
378 &check_data_register_update,
379 };
380
381 bool complete_success = true;
382
383 for (uint32_t i = 0; i < NACL_ARRAY_SIZE(two_inst_patterns); i++) {
384 PatternMatch r = two_inst_patterns[i](*this, first, second, out);
385 switch (r) {
386 case NO_MATCH: break;
387
388 case PATTERN_UNSAFE:
389 // Pattern is in charge of reporting specific issue. 476 // Pattern is in charge of reporting specific issue.
390 complete_success = false; 477 complete_success = false;
391 break; 478 break;
392 479
393 case PATTERN_SAFE: 480 case PATTERN_SAFE_MODE:
394 if (bundle_for_address(first.addr()) 481 bool in_bundle = true;
395 != bundle_for_address(second.addr())) { 482 if (r.size >= 0) {
396 complete_success = false; 483 for (int8_t i = kMaxPattern - 2; i >= kMaxPattern - r.size; i--) {
397 out->report_problem(first.addr(), first.safety(), 484 in_bundle &= (bundle_for_address(insns[i].addr()) ==
398 kProblemPatternCrossesBundle); 485 bundle_for_address(insns[i + 1].addr()));
486 critical->add(insns[i + 1].addr());
487 }
488 if (!in_bundle) {
489 complete_success = false;
490 out->report_problem(insns[kMaxPattern-r.size].addr(),
491 insns[kMaxPattern-r.size].safety(),
492 kProblemPatternCrossesBundle);
493 }
399 } else { 494 } else {
400 critical->add(second.addr()); 495 for (int8_t i = 1; i < (-r.size); i++) {
496 in_bundle &= (bundle_for_address(insns[i].addr()) ==
497 bundle_for_address(insns[i - 1].addr()));
498 critical->add(insns[i].addr());
499 }
500 if (!in_bundle) {
501 complete_success = false;
502 out->report_problem(insns[0].addr(),
503 insns[0].safety(),
504 kProblemPatternCrossesBundle);
505 }
401 } 506 }
402 break; 507 break;
403 } 508 }
404 } 509 }
405 return complete_success; 510 return complete_success;
406 } 511 }
407 512
408 bool SfiValidator::is_data_address_register(Register r) const { 513 bool SfiValidator::is_data_address_register(Register r) const {
409 return data_address_registers_[r]; 514 return data_address_registers_[r];
410 } 515 }
411 516
412 const Bundle SfiValidator::bundle_for_address(uint32_t address) const { 517 const Bundle SfiValidator::bundle_for_address(uint32_t address) const {
413 uint32_t base = address - (address % bytes_per_bundle_); 518 // TODO(mrm) Is it a security flaw that this can point at below entry?
414 return Bundle(base, bytes_per_bundle_); 519 uint32_t shift_address = address - code_address_ormask_;
520 uint32_t base = shift_address - (shift_address % bytes_per_bundle_);
521 return Bundle(base + code_address_ormask_, bytes_per_bundle_);
415 } 522 }
416 523
417 bool SfiValidator::is_bundle_head(uint32_t address) const { 524 bool SfiValidator::is_bundle_head(uint32_t address) const {
418 return (address % bytes_per_bundle_) == 0; 525 return (address % bytes_per_bundle_) == code_address_ormask_;
419 } 526 }
420 527
421 528
422 /* 529 /*
423 * We eagerly compute both safety and defs here, because it turns out to be 530 * We eagerly compute both safety and defs here, because it turns out to be
424 * faster by 10% than doing either lazily and memoizing the result. 531 * faster by 10% than doing either lazily and memoizing the result.
425 */ 532 */
426 DecodedInstruction::DecodedInstruction(uint32_t vaddr, 533 DecodedInstruction::DecodedInstruction(uint32_t vaddr,
427 Instruction inst, 534 Instruction inst,
428 const ClassDecoder &decoder) 535 const ClassDecoder &decoder)
429 : vaddr_(vaddr), 536 : vaddr_(vaddr),
430 inst_(inst), 537 inst_(inst),
431 decoder_(&decoder), 538 decoder_(&decoder),
432 safety_(decoder.safety(inst_)), 539 safety_(decoder.safety(inst_)),
433 defs_(decoder.defs(inst_)) 540 defs_(decoder.defs(inst_)),
541 condition_(Instruction::UNCONDITIONAL)
434 {} 542 {}
435 543
436 } // namespace 544 } // namespace
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698