OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2009 The Native Client Authors. All rights reserved. | 2 * Copyright (c) 2011 The Native Client Authors. All rights reserved. |
3 * Use of this source code is governed by a BSD-style license that can | 3 * Use of this source code is governed by a BSD-style license that can |
4 * be found in the LICENSE file. | 4 * be found in the LICENSE file. |
5 * Copyright 2009, Google Inc. | 5 * Copyright (c) 2011, Google Inc. |
6 */ | 6 */ |
7 | 7 |
8 #include "native_client/src/trusted/service_runtime/nacl_config.h" | 8 #include "native_client/src/trusted/service_runtime/nacl_config.h" |
9 #include "native_client/src/trusted/validator_arm/validator.h" | 9 #include "native_client/src/trusted/validator_arm/validator.h" |
10 #include "native_client/src/include/nacl_macros.h" | 10 #include "native_client/src/include/nacl_macros.h" |
11 | 11 |
12 using nacl_arm_dec::Instruction; | 12 using nacl_arm_dec::Instruction; |
13 using nacl_arm_dec::ClassDecoder; | 13 using nacl_arm_dec::ClassDecoder; |
14 using nacl_arm_dec::Register; | 14 using nacl_arm_dec::Register; |
15 using nacl_arm_dec::RegisterList; | 15 using nacl_arm_dec::RegisterList; |
16 using nacl_arm_dec::kRegisterNone; | 16 using nacl_arm_dec::kRegisterNone; |
17 using nacl_arm_dec::kRegisterPc; | 17 using nacl_arm_dec::kRegisterPc; |
18 using nacl_arm_dec::kRegisterLink; | 18 using nacl_arm_dec::kRegisterLink; |
19 | 19 |
20 using std::vector; | 20 using std::vector; |
21 | 21 |
22 namespace nacl_arm_val { | 22 namespace nacl_arm_val { |
23 | 23 |
24 /********************************************************* | 24 /********************************************************* |
25 * Implementations of patterns used in the first pass. | 25 * Implementations of patterns used in the first pass. |
26 * | 26 * |
27 * N.B. IF YOU ADD A PATTERN HERE, REGISTER IT BELOW. | 27 * N.B. IF YOU ADD A PATTERN HERE, REGISTER IT BELOW. |
28 * See the list in apply_patterns. | 28 * See the list in apply_patterns. |
29 *********************************************************/ | 29 *********************************************************/ |
30 | 30 |
31 // A possible result from a validator pattern. | 31 // A few convenience items for return values. |
32 enum PatternMatch { | 32 PatternMatch NO_MATCH = {NO_MATCH_MODE, 0}; |
33 // The pattern does not apply to the instructions it was given. | 33 PatternMatch PATTERN_UNSAFE = {PATTERN_UNSAFE_MODE, 0}; |
34 NO_MATCH, | 34 PatternMatch PATTERN_SAFE_3 = {PATTERN_SAFE_MODE, 3}; |
35 // The pattern matches, and is safe; do not allow jumps to split it. | 35 PatternMatch PATTERN_SAFE_2 = {PATTERN_SAFE_MODE, 2}; |
36 PATTERN_SAFE, | 36 PatternMatch PATTERN_SAFE_1 = {PATTERN_SAFE_MODE, 1}; |
37 // The pattern matches, and has detected a problem. | |
38 PATTERN_UNSAFE | |
39 }; | |
40 | 37 |
41 /* | 38 /* |
42 * Ensures that all stores use a safe base address. A base address is safe if | 39 * Ensures that all stores use a safe base address. A base address is safe if |
43 * it | 40 * it |
44 * 1. Has specific bits masked off by its immediate predecessor, or | 41 * 1. Has specific bits masked off by its immediate predecessor, or |
45 * 2. Is predicated on those bits being clear, as tested by its immediate | 42 * 2. Is predicated on those bits being clear, as tested by its immediate |
46 * predecessor, or | 43 * predecessor, or |
47 * 3. Is in a register defined as always containing a safe address. | 44 * 3. Is in a register defined as always containing a safe address. |
48 * | 45 * |
49 * This pattern concerns itself with case #1, early-exiting if it finds #2. | 46 * This pattern concerns itself with case #1, early-exiting if it finds #2. |
50 */ | 47 */ |
51 static PatternMatch check_store_mask(const SfiValidator &sfi, | 48 static PatternMatch check_store_mask(const SfiValidator &sfi, |
52 const DecodedInstruction &first, | 49 DecodedInstruction insns[], |
53 const DecodedInstruction &second, | |
54 ProblemSink *out) { | 50 ProblemSink *out) { |
51 const DecodedInstruction first = insns[kMaxPattern - 2]; | |
52 const DecodedInstruction second = insns[kMaxPattern - 1]; | |
53 | |
55 if (second.base_address_register() == kRegisterNone /* not a store */ | 54 if (second.base_address_register() == kRegisterNone /* not a store */ |
56 || sfi.is_data_address_register(second.base_address_register())) { | 55 || sfi.is_data_address_register(second.base_address_register())) { |
57 return NO_MATCH; | 56 return NO_MATCH; |
58 } | 57 } |
59 | 58 |
60 if (first.defines(second.base_address_register()) | 59 if (first.defines(second.base_address_register()) |
61 && first.clears_bits(sfi.data_address_mask()) | 60 && first.clears_bits(sfi.data_address_mask()) |
62 && first.always_precedes(second)) { | 61 && first.always_precedes(second)) { |
63 return PATTERN_SAFE; | 62 return PATTERN_SAFE_2; |
64 } | 63 } |
65 | 64 |
66 if (first.sets_Z_if_bits_clear(second.base_address_register(), | 65 if (first.sets_Z_if_bits_clear(second.base_address_register(), |
67 sfi.data_address_mask()) | 66 sfi.data_address_mask()) |
68 && second.is_conditional_on(first)) { | 67 && second.is_conditional_on(first)) { |
69 return PATTERN_SAFE; | 68 return PATTERN_SAFE_2; |
70 } | 69 } |
71 | 70 |
72 out->report_problem(second.addr(), second.safety(), kProblemUnsafeStore); | 71 out->report_problem(second.addr(), second.safety(), kProblemUnsafeStore); |
73 return PATTERN_UNSAFE; | 72 return PATTERN_UNSAFE; |
74 } | 73 } |
75 | 74 |
76 /* | 75 /* |
77 * Ensures that all indirect branches use a safe destination address. A | 76 * Ensures that all indirect branches use a safe destination address. A |
78 * destination address is safe if it has specific bits masked off by its | 77 * destination address is safe if it has specific bits masked off by its |
79 * immediate predecessor. | 78 * immediate predecessor. |
80 */ | 79 */ |
81 static PatternMatch check_branch_mask(const SfiValidator &sfi, | 80 static PatternMatch check_branch_mask(const SfiValidator &sfi, |
82 const DecodedInstruction &first, | 81 DecodedInstruction insns[], |
83 const DecodedInstruction &second, | |
84 ProblemSink *out) { | 82 ProblemSink *out) { |
83 const DecodedInstruction zeroth = insns[kMaxPattern - 3]; | |
84 const DecodedInstruction first = insns[kMaxPattern - 2]; | |
85 const DecodedInstruction second = insns[kMaxPattern - 1]; | |
85 if (second.branch_target_register() == kRegisterNone) return NO_MATCH; | 86 if (second.branch_target_register() == kRegisterNone) return NO_MATCH; |
86 | 87 |
87 if (first.defines(second.branch_target_register()) | 88 |
89 if ((sfi.code_address_ormask() == 0) | |
90 && first.defines(second.branch_target_register()) | |
88 && first.clears_bits(sfi.code_address_mask()) | 91 && first.clears_bits(sfi.code_address_mask()) |
89 && first.always_precedes(second)) { | 92 && first.always_precedes(second)) { |
90 return PATTERN_SAFE; | 93 return PATTERN_SAFE_2; |
94 } | |
95 | |
96 if (first.defines(second.branch_target_register()) | |
97 && first.sets_bits(sfi.code_address_ormask()) | |
98 && first.always_precedes(second) | |
99 && zeroth.defines(second.branch_target_register()) | |
100 && zeroth.clears_bits(sfi.code_address_mask()) | |
101 && zeroth.always_precedes(first)) { | |
102 return PATTERN_SAFE_3; | |
91 } | 103 } |
92 | 104 |
93 out->report_problem(second.addr(), second.safety(), kProblemUnsafeBranch); | 105 out->report_problem(second.addr(), second.safety(), kProblemUnsafeBranch); |
94 return PATTERN_UNSAFE; | 106 return PATTERN_UNSAFE; |
95 } | 107 } |
96 | 108 |
97 /* | 109 /* |
98 * Verifies that any instructions that update a data-address register are | 110 * Verifies that any instructions that update a data-address register are |
99 * immediately followed by a mask. | 111 * immediately followed by a mask. |
100 */ | 112 */ |
101 static PatternMatch check_data_register_update(const SfiValidator &sfi, | 113 static PatternMatch check_data_register_update(const SfiValidator &sfi, |
102 const DecodedInstruction &first, | 114 DecodedInstruction insns[], |
103 const DecodedInstruction &second, | |
104 ProblemSink *out) { | 115 ProblemSink *out) { |
116 const DecodedInstruction first = insns[kMaxPattern - 2]; | |
117 const DecodedInstruction second = insns[kMaxPattern - 1]; | |
105 if (!first.defines_any(sfi.data_address_registers())) return NO_MATCH; | 118 if (!first.defines_any(sfi.data_address_registers())) return NO_MATCH; |
106 | 119 |
107 // A single safe data register update doesn't affect control flow. | 120 // A single safe data register update doesn't affect control flow. |
108 if (first.clears_bits(sfi.data_address_mask())) return NO_MATCH; | 121 if (first.clears_bits(sfi.data_address_mask())) return NO_MATCH; |
109 | 122 |
110 // Exempt updates due to writeback | 123 // Exempt updates due to writeback |
111 RegisterList data_addr_defs = first.defs() & sfi.data_address_registers(); | 124 RegisterList data_addr_defs = first.defs() & sfi.data_address_registers(); |
112 if ((first.immediate_addressing_defs() & sfi.data_address_registers()) | 125 if ((first.immediate_addressing_defs() & sfi.data_address_registers()) |
113 == data_addr_defs) { | 126 == data_addr_defs) { |
114 return NO_MATCH; | 127 return NO_MATCH; |
115 } | 128 } |
116 | 129 |
117 if (second.defines_all(data_addr_defs) | 130 if (second.defines_all(data_addr_defs) |
118 && second.clears_bits(sfi.data_address_mask()) | 131 && second.clears_bits(sfi.data_address_mask()) |
119 && second.always_follows(first)) { | 132 && second.always_follows(first)) { |
120 return PATTERN_SAFE; | 133 return PATTERN_SAFE_2; |
121 } | 134 } |
122 | 135 |
123 out->report_problem(first.addr(), first.safety(), kProblemUnsafeDataWrite); | 136 out->report_problem(first.addr(), first.safety(), kProblemUnsafeDataWrite); |
124 return PATTERN_UNSAFE; | 137 return PATTERN_UNSAFE; |
125 } | 138 } |
126 | 139 |
127 /* | 140 /* |
128 * Checks the location of linking branches -- to be useful, they must be in | 141 * Checks the location of linking branches -- to be useful, they must be in |
129 * the last bundle slot. | 142 * the last bundle slot. |
130 * | 143 * |
131 * This is not a security check per se, more of a guard against Stupid Compiler | 144 * This is not a security check per se, more of a guard against Stupid Compiler |
132 * Tricks. | 145 * Tricks. |
133 */ | 146 */ |
147 // This function is currently thumb-unsafe due to the -4. Will re-enable when it | |
148 // is 100% clear how to handle, as this is not a safety issue. | |
149 // TODO(mrm) re-enable | |
150 #if 0 | |
134 static PatternMatch check_call_position(const SfiValidator &sfi, | 151 static PatternMatch check_call_position(const SfiValidator &sfi, |
135 const DecodedInstruction &inst, | 152 const DecodedInstruction &inst, |
136 ProblemSink *out) { | 153 ProblemSink *out) { |
137 // Identify linking branches through their definitions: | 154 // Identify linking branches through their definitions: |
138 if (inst.defines_all(kRegisterPc + kRegisterLink)) { | 155 if (inst.defines_all(kRegisterPc + kRegisterLink)) { |
139 uint32_t last_slot = sfi.bundle_for_address(inst.addr()).end_addr() - 4; | 156 uint32_t last_slot = sfi.bundle_for_address(inst.addr()).end_addr() - 4; |
140 if (inst.addr() != last_slot) { | 157 if (inst.addr() != last_slot) { |
141 out->report_problem(inst.addr(), inst.safety(), kProblemMisalignedCall); | 158 out->report_problem(inst.addr(), inst.safety(), kProblemMisalignedCall); |
142 return PATTERN_UNSAFE; | 159 return PATTERN_UNSAFE; |
143 } | 160 } |
144 } | 161 } |
145 return NO_MATCH; | 162 return NO_MATCH; |
146 } | 163 } |
164 #endif | |
147 | 165 |
148 /* | 166 /* |
149 * Checks for instructions that alter any read-only register. | 167 * Checks for instructions that alter any read-only register. |
150 */ | 168 */ |
151 static PatternMatch check_read_only(const SfiValidator &sfi, | 169 static PatternMatch check_read_only(const SfiValidator &sfi, |
152 const DecodedInstruction &inst, | 170 DecodedInstruction insns[], |
153 ProblemSink *out) { | 171 ProblemSink *out) { |
172 DecodedInstruction inst = insns[kMaxPattern - 1]; | |
154 if (inst.defines_any(sfi.read_only_registers())) { | 173 if (inst.defines_any(sfi.read_only_registers())) { |
155 out->report_problem(inst.addr(), inst.safety(), kProblemReadOnlyRegister); | 174 out->report_problem(inst.addr(), inst.safety(), kProblemReadOnlyRegister); |
156 return PATTERN_UNSAFE; | 175 return PATTERN_UNSAFE; |
157 } | 176 } |
158 | 177 |
159 return NO_MATCH; | 178 return NO_MATCH; |
160 } | 179 } |
161 | 180 |
162 /* | 181 /* |
163 * Checks writes to r15 from instructions that aren't branches. | 182 * Checks writes to r15 from instructions that aren't branches. |
164 */ | 183 */ |
165 static PatternMatch check_pc_writes(const SfiValidator &sfi, | 184 static PatternMatch check_pc_writes(const SfiValidator &sfi, |
166 const DecodedInstruction &inst, | 185 DecodedInstruction insns[], |
167 ProblemSink *out) { | 186 ProblemSink *out) { |
187 DecodedInstruction inst = insns[kMaxPattern - 1]; | |
168 if (inst.is_relative_branch() | 188 if (inst.is_relative_branch() |
169 || inst.branch_target_register() != kRegisterNone) { | 189 || inst.branch_target_register() != kRegisterNone) { |
170 // It's a branch. | 190 // It's a branch. |
171 return NO_MATCH; | 191 return NO_MATCH; |
172 } | 192 } |
173 | 193 |
174 if (!inst.defines(nacl_arm_dec::kRegisterPc)) return NO_MATCH; | 194 if (!inst.defines(nacl_arm_dec::kRegisterPc)) return NO_MATCH; |
175 | 195 // TODO(mrm) The clears_bits thing here doesn't save us, secvuln |
176 if (inst.clears_bits(sfi.code_address_mask())) { | 196 if (inst.clears_bits(sfi.code_address_mask())) { |
177 return PATTERN_SAFE; | 197 return PATTERN_SAFE_1; |
178 } else { | 198 } else { |
179 out->report_problem(inst.addr(), inst.safety(), kProblemUnsafeBranch); | 199 out->report_problem(inst.addr(), inst.safety(), kProblemUnsafeBranch); |
180 return PATTERN_UNSAFE; | 200 return PATTERN_UNSAFE; |
181 } | 201 } |
182 } | 202 } |
183 | 203 |
204 /* | |
205 * Groups IT blocks together, sets appropriate condition flags on them. | |
206 */ | |
207 static PatternMatch check_it(const SfiValidator &sfi, | |
208 DecodedInstruction insns[], | |
209 ProblemSink *out) { | |
210 UNREFERENCED_PARAMETER(sfi); | |
211 DecodedInstruction insn_it = insns[0]; | |
212 nacl_arm_dec::ITCond it = insn_it.it(); | |
213 if (!it) | |
214 return NO_MATCH; // Not an IT instruction. | |
215 Instruction::Condition cond = insn_it.condition(); | |
216 if ((cond == Instruction::AL) || (cond == Instruction::UNCONDITIONAL)) { | |
217 out->report_problem(insn_it.addr(), insn_it.safety(), | |
218 kProblemUnconditionalIT); | |
219 return PATTERN_UNSAFE; // This is nonsense, but encodeable. | |
220 } | |
Karl
2011/08/30 19:53:52
Nit. blank line before commented section?
| |
221 // This is an IT instruction. We inform the affected instructions | |
222 // of their affectedness, and accept the pattern. | |
223 uint8_t conddex = 0; | |
224 for (; nacl_arm_dec::it_select(conddex, it) != nacl_arm_dec::NONE; conddex++) | |
225 if (nacl_arm_dec::it_select(conddex, it) == nacl_arm_dec::THEN) | |
226 insns[conddex + 1].set_condition(cond); | |
227 else if (nacl_arm_dec::it_select(conddex, it) == nacl_arm_dec::ELSE) | |
228 insns[conddex + 1].set_condition((Instruction::Condition)(cond ^ 1)); | |
229 else return PATTERN_UNSAFE; /* Should never be reached */ | |
Karl
2011/08/30 19:53:52
Nit. Return should be on separate line.
| |
230 // Check that this insn is allowed at this point in an IT | |
231 switch (insns[conddex + 1].it_safe()) { | |
232 default: | |
233 case nacl_arm_dec::NEVER: return PATTERN_UNSAFE; | |
Karl
2011/08/30 19:53:52
Nit. Return on separate line.
| |
234 case nacl_arm_dec::END: | |
235 if (nacl_arm_dec::it_select(conddex + 1, it) != nacl_arm_dec::NONE) | |
236 return PATTERN_UNSAFE; | |
Karl
2011/08/30 19:53:52
Not clear to me that intentional fall through expe
| |
237 case nacl_arm_dec::ALWAYS: {} // Fall through, we're fine. | |
Karl
2011/08/30 19:53:52
Why use {} instead of break?
| |
238 } | |
239 PatternMatch p = {PATTERN_SAFE_MODE, -(conddex + 1)}; | |
240 return p; | |
241 } | |
184 /********************************************************* | 242 /********************************************************* |
185 * | 243 * |
186 * Implementation of SfiValidator itself. | 244 * Implementation of SfiValidator itself. |
187 * | 245 * |
188 *********************************************************/ | 246 *********************************************************/ |
189 | 247 |
190 SfiValidator::SfiValidator(uint32_t bytes_per_bundle, | 248 SfiValidator::SfiValidator(uint32_t bytes_per_bundle, |
191 uint32_t code_region_bytes, | 249 uint32_t code_region_bytes, |
192 uint32_t data_region_bytes, | 250 uint32_t data_region_bytes, |
193 RegisterList read_only_registers, | 251 RegisterList read_only_registers, |
194 RegisterList data_address_registers) | 252 RegisterList data_address_registers, |
253 uint8_t thumb) | |
195 : bytes_per_bundle_(bytes_per_bundle), | 254 : bytes_per_bundle_(bytes_per_bundle), |
196 data_address_mask_(~(data_region_bytes - 1)), | 255 data_address_mask_(~(data_region_bytes - 1)), |
197 code_address_mask_(~(code_region_bytes - 1) | (bytes_per_bundle - 1)), | 256 code_address_mask_(thumb |
257 ? ~(code_region_bytes - 1) | |
258 : ~(code_region_bytes - 1) | (bytes_per_bundle - 1)), | |
259 code_address_ormask_(thumb | |
260 ? (bytes_per_bundle - 1) | |
261 : 0), | |
198 code_region_bytes_(code_region_bytes), | 262 code_region_bytes_(code_region_bytes), |
199 read_only_registers_(read_only_registers), | 263 read_only_registers_(read_only_registers), |
200 data_address_registers_(data_address_registers), | 264 data_address_registers_(data_address_registers), |
201 decode_state_(nacl_arm_dec::init_decode()) {} | 265 thumb_(thumb), |
266 decode_state_(nacl_arm_dec::init_decode(thumb)) {} | |
202 | 267 |
203 bool SfiValidator::validate(const vector<CodeSegment> &segments, | 268 bool SfiValidator::validate(const vector<CodeSegment> &segments, |
204 ProblemSink *out) { | 269 ProblemSink *out) { |
205 uint32_t base = segments[0].begin_addr(); | 270 uint32_t base = segments[0].begin_addr(); |
206 uint32_t size = segments.back().end_addr() - base; | 271 uint32_t size = segments.back().end_addr() - base; |
207 AddressSet branches(base, size); | 272 AddressSet branches(base, size); |
208 AddressSet critical(base, size); | 273 AddressSet critical(base, size); |
209 | 274 |
210 bool complete_success = true; | 275 bool complete_success = true; |
211 | 276 |
212 for (vector<CodeSegment>::const_iterator it = segments.begin(); | 277 for (vector<CodeSegment>::const_iterator it = segments.begin(); |
213 it != segments.end(); ++it) { | 278 it != segments.end(); ++it) { |
214 complete_success &= validate_fallthrough(*it, out, &branches, &critical); | 279 complete_success &= validate_fallthrough(*it, out, &branches, &critical); |
215 | 280 |
216 if (!out->should_continue()) { | 281 if (!out->should_continue()) { |
217 return false; | 282 return false; |
218 } | 283 } |
219 } | 284 } |
220 | 285 |
221 complete_success &= validate_branches(segments, branches, critical, out); | 286 complete_success &= validate_branches(segments, branches, critical, out); |
222 | 287 |
223 return complete_success; | 288 return complete_success; |
224 } | 289 } |
225 | 290 |
226 bool SfiValidator::validate_fallthrough(const CodeSegment &segment, | 291 bool SfiValidator::validate_fallthrough(const CodeSegment &segment, |
227 ProblemSink *out, | 292 ProblemSink *out, |
228 AddressSet *branches, | 293 AddressSet *branches, |
229 AddressSet *critical) { | 294 AddressSet *critical) { |
230 bool complete_success = true; | 295 bool complete_success = true; |
296 // The list of initial patterns to run (during the first window) | |
Karl
2011/08/30 19:53:52
Indent comment to match code.
| |
297 static const Pattern pre_patterns[] = {&check_it}; | |
298 | |
299 // The list of patterns to run in the second window. | |
Karl
2011/08/30 19:53:52
Indent comment to match code. (apply to all commen
| |
300 static const Pattern patterns[] = { | |
301 &check_read_only, | |
302 &check_pc_writes, | |
303 // TODO(mrm) re-enable (see comment on actual function) | |
304 // &check_call_position, | |
305 &check_store_mask, | |
306 &check_branch_mask, | |
307 &check_data_register_update, | |
308 }; | |
231 | 309 |
232 nacl_arm_dec::Forbidden initial_decoder; | 310 nacl_arm_dec::Forbidden initial_decoder; |
233 // Initialize the previous instruction to a scary BKPT, so patterns all fail. | 311 // Initialize the previous instruction to a scary BKPT, so patterns all fail. |
234 DecodedInstruction pred( | 312 DecodedInstruction bkpt( |
235 0, // Virtual address 0, which will be in a different bundle; | 313 0, // Virtual address 0, which will be in a different bundle; |
236 Instruction(0xE1277777), // The literal-pool-header BKPT instruction; | 314 Instruction(0xE1277777), // The literal-pool-header BKPT instruction; |
237 initial_decoder); // and ensure that it decodes as Forbidden. | 315 initial_decoder); // and ensure that it decodes as Forbidden. |
238 | 316 // TODO(mrm) Figure out how to make this vary automatically |
239 for (uint32_t va = segment.begin_addr(); va != segment.end_addr(); va += 4) { | 317 DecodedInstruction insns[kMaxPattern] = {bkpt, bkpt, bkpt, bkpt, bkpt}; |
240 DecodedInstruction inst(va, segment[va], | 318 DecodedInstruction pre_insns[kMaxPattern] = {bkpt, bkpt, bkpt, bkpt, bkpt}; |
319 uint8_t insn_size = 0; | |
320 uint8_t flush = 0; | |
321 uint32_t va = segment.begin_addr(); | |
322 while (flush < kMaxPattern) { | |
323 uint32_t va_code = va + (thumb_ ? 1 : 0); | |
324 if (va != segment.end_addr()) { | |
325 DecodedInstruction inst(va_code, segment[va], | |
241 nacl_arm_dec::decode(segment[va], decode_state_)); | 326 nacl_arm_dec::decode(segment[va], decode_state_)); |
242 | 327 pre_insns[kMaxPattern - 1] = inst; |
243 if (inst.safety() != nacl_arm_dec::MAY_BE_SAFE) { | 328 insn_size = inst.size(); |
244 out->report_problem(va, inst.safety(), kProblemUnsafe); | 329 if (inst.safety() != nacl_arm_dec::MAY_BE_SAFE) { |
330 out->report_problem(va_code, inst.safety(), kProblemUnsafe); | |
331 if (!out->should_continue()) { | |
332 return false; | |
333 } | |
334 complete_success = false; | |
335 } | |
336 } else { | |
337 pre_insns[kMaxPattern - 1] = bkpt; | |
338 flush++; | |
339 } | |
340 if (va > segment.end_addr()) { | |
341 out->report_problem(va_code - insn_size, nacl_arm_dec::FORBIDDEN, | |
342 kProblemStraddlesSegment); | |
343 complete_success = false; | |
245 if (!out->should_continue()) { | 344 if (!out->should_continue()) { |
246 return false; | 345 return false; |
247 } | 346 } |
248 complete_success = false; | 347 } |
348 complete_success &= apply_patterns(pre_insns, pre_patterns, | |
349 NACL_ARRAY_SIZE(pre_patterns), | |
350 critical, out); | |
351 complete_success &= apply_patterns(insns, patterns, | |
352 NACL_ARRAY_SIZE(patterns), critical, out); | |
353 if (!out->should_continue()) return false; | |
354 | |
355 if (insns[kMaxPattern - 1].is_relative_branch()) { | |
356 branches->add(insns[kMaxPattern - 1].addr()); | |
249 } | 357 } |
250 | 358 |
251 complete_success &= apply_patterns(inst, out); | 359 if (pre_insns[kMaxPattern - 1].is_literal_pool_head() |
252 if (!out->should_continue()) return false; | 360 && is_bundle_head(pre_insns[kMaxPattern - 1].addr())) { |
253 | |
254 complete_success &= apply_patterns(pred, inst, critical, out); | |
255 if (!out->should_continue()) return false; | |
256 | |
257 if (inst.is_relative_branch()) { | |
258 branches->add(inst.addr()); | |
259 } | |
260 | |
261 if (inst.is_literal_pool_head() | |
262 && is_bundle_head(inst.addr())) { | |
263 // Add each instruction in this bundle to the critical set. | 361 // Add each instruction in this bundle to the critical set. |
264 uint32_t last_data_addr = bundle_for_address(va).end_addr(); | 362 // Note, we increment va by 1 every time to deal with |
265 for (; va != last_data_addr; va += 4) { | 363 // variable width instructions. |
364 va = pre_insns[kMaxPattern - 1].addr() - (thumb_ ? 1 : 0); | |
365 uint32_t last_data_addr = bundle_for_address(va + (thumb_ ? 1 : 0)).end_ad dr() - 1; | |
366 // last_data_addr cannot go beyond our segment | |
367 if (last_data_addr > segment.end_addr()) | |
368 last_data_addr = segment.end_addr(); | |
369 for (; va != last_data_addr; va += 1) { | |
266 critical->add(va); | 370 critical->add(va); |
267 } | 371 } |
268 | 372 // Wipe our slate clean with unsafe breakpoints |
373 for (uint8_t i = 0; i < kMaxPattern - 1; i++) { | |
374 insns[i] = bkpt; | |
375 pre_insns[i] = bkpt; | |
376 } | |
269 // Decrement the virtual address by one instruction, so the for | 377 // Decrement the virtual address by one instruction, so the for |
270 // loop can bump it back forward. This is slightly dirty. | 378 // loop can bump it back forward. This is slightly dirty. |
271 va -= 4; | 379 va -= insn_size; |
272 } | 380 } |
273 | 381 // Pushback |
274 pred = inst; | 382 for (uint8_t i = 0; i < kMaxPattern - 1; i++) |
383 insns[i] = insns[i + 1]; | |
384 insns[kMaxPattern - 1] = pre_insns[0]; | |
385 for (uint8_t i = 0; i < kMaxPattern - 1; i++) | |
386 pre_insns[i] = pre_insns[i + 1]; | |
387 if (flush == 0) | |
388 va += insn_size; | |
275 } | 389 } |
276 | 390 |
277 return complete_success; | 391 return complete_success; |
278 } | 392 } |
279 | 393 |
280 static bool address_contained(uint32_t va, const vector<CodeSegment> &segs) { | 394 static bool address_contained(uint32_t va, const vector<CodeSegment> &segs) { |
281 for (vector<CodeSegment>::const_iterator it = segs.begin(); it != segs.end(); | 395 for (vector<CodeSegment>::const_iterator it = segs.begin(); it != segs.end(); |
282 ++it) { | 396 ++it) { |
283 if (it->contains_address(va)) return true; | 397 if (it->contains_address(va)) return true; |
284 } | 398 } |
285 return false; | 399 return false; |
286 } | 400 } |
287 | 401 |
288 bool SfiValidator::validate_branches(const vector<CodeSegment> &segments, | 402 bool SfiValidator::validate_branches(const vector<CodeSegment> &segments, |
289 const AddressSet &branches, | 403 const AddressSet &branches, |
290 const AddressSet &critical, | 404 const AddressSet &critical, |
291 ProblemSink *out) { | 405 ProblemSink *out) { |
292 bool complete_success = true; | 406 bool complete_success = true; |
407 const uint32_t low_bitmask = 0xFFFFFFFE; | |
293 | 408 |
294 vector<CodeSegment>::const_iterator seg_it = segments.begin(); | 409 vector<CodeSegment>::const_iterator seg_it = segments.begin(); |
295 | 410 |
296 for (AddressSet::Iterator it = branches.begin(); it != branches.end(); ++it) { | 411 for (AddressSet::Iterator it = branches.begin(); it != branches.end(); ++it) { |
297 uint32_t va = *it; | 412 uint32_t va = *it; |
298 | 413 |
299 // Invariant: all addresses in branches are covered by some segment; | 414 // Invariant: all addresses in branches are covered by some segment; |
300 // segments are in sorted order. | 415 // segments are in sorted order. |
301 while (!seg_it->contains_address(va)) { | 416 while (!seg_it->contains_address(va)) { |
302 ++seg_it; | 417 ++seg_it; |
303 } | 418 } |
304 | 419 |
305 const CodeSegment &segment = *seg_it; | 420 const CodeSegment &segment = *seg_it; |
306 | 421 |
307 DecodedInstruction inst(va, segment[va], | 422 DecodedInstruction inst(va, segment[va], |
308 nacl_arm_dec::decode(segment[va], decode_state_)); | 423 nacl_arm_dec::decode(segment[va], decode_state_)); |
309 | 424 |
310 // We know it is_relative_branch(), so we can simply call: | 425 // We know it is_relative_branch(), so we can simply call: |
311 uint32_t target_va = inst.branch_target(); | 426 uint32_t target_va = inst.branch_target(); |
312 if (address_contained(target_va, segments)) { | 427 if (address_contained(target_va & low_bitmask, segments)) { |
313 if (critical.contains(target_va)) { | 428 if (critical.contains(target_va & low_bitmask)) { |
314 out->report_problem(va, inst.safety(), kProblemBranchSplitsPattern, | 429 out->report_problem(va, inst.safety(), kProblemBranchSplitsPattern, |
315 target_va); | 430 target_va); |
316 if (!out->should_continue()) { | 431 if (!out->should_continue()) { |
317 return false; | 432 return false; |
318 } | 433 } |
319 complete_success = false; | 434 complete_success = false; |
320 } | 435 } |
321 } else if ((target_va & code_address_mask()) == 0) { | 436 } else if ((((target_va & (~code_address_mask())) |
322 // Allow bundle-aligned, in-range direct jump. | 437 | code_address_ormask()) & low_bitmask) == |
438 (target_va & low_bitmask)) { | |
439 // If the masking operations would not modify the va, it is allowed | |
440 // Subltety: A non-register branch cannot transition between thumb and | |
441 // non-thumb mode, so we intentionally omit the low bit. | |
323 } else { | 442 } else { |
324 out->report_problem(va, inst.safety(), kProblemBranchInvalidDest, | 443 out->report_problem(va | thumb_, inst.safety(), |
325 target_va); | 444 kProblemBranchInvalidDest, target_va); |
326 if (!out->should_continue()) { | 445 if (!out->should_continue()) { |
327 return false; | 446 return false; |
328 } | 447 } |
329 complete_success = false; | 448 complete_success = false; |
330 } | 449 } |
331 } | 450 } |
332 | 451 |
333 return complete_success; | 452 return complete_success; |
334 } | 453 } |
335 | 454 |
336 bool SfiValidator::apply_patterns(const DecodedInstruction &inst, | 455 bool SfiValidator::apply_patterns(DecodedInstruction insns[], |
456 const Pattern patterns[], unsigned int nPatterns, AddressSet *critical, | |
337 ProblemSink *out) { | 457 ProblemSink *out) { |
338 // Single-instruction patterns | |
339 typedef PatternMatch (*OneInstPattern)(const SfiValidator &, | |
340 const DecodedInstruction &, | |
341 ProblemSink *out); | |
342 static const OneInstPattern one_inst_patterns[] = { | |
343 &check_read_only, | |
344 &check_pc_writes, | |
345 &check_call_position, | |
346 }; | |
347 | 458 |
348 bool complete_success = true; | 459 bool complete_success = true; |
349 | 460 |
350 for (uint32_t i = 0; i < NACL_ARRAY_SIZE(one_inst_patterns); i++) { | 461 for (uint32_t i = 0; i < nPatterns; i++) { |
351 PatternMatch r = one_inst_patterns[i](*this, inst, out); | 462 PatternMatch r = patterns[i](*this, insns, out); |
352 switch (r) { | 463 switch (r.pm_mode) { |
353 case PATTERN_SAFE: | 464 case NO_MATCH_MODE: break; |
354 case NO_MATCH: | |
355 break; | |
356 | 465 |
357 case PATTERN_UNSAFE: | 466 case PATTERN_UNSAFE_MODE: |
358 complete_success = false; | |
359 break; | |
360 } | |
361 } | |
362 | |
363 return complete_success; | |
364 } | |
365 | |
366 bool SfiValidator::apply_patterns(const DecodedInstruction &first, | |
367 const DecodedInstruction &second, AddressSet *critical, ProblemSink *out) { | |
368 // Type for two-instruction pattern functions | |
369 typedef PatternMatch (*TwoInstPattern)(const SfiValidator &, | |
370 const DecodedInstruction &first, | |
371 const DecodedInstruction &second, | |
372 ProblemSink *out); | |
373 | |
374 // The list of patterns -- defined in static functions up top. | |
375 static const TwoInstPattern two_inst_patterns[] = { | |
376 &check_store_mask, | |
377 &check_branch_mask, | |
378 &check_data_register_update, | |
379 }; | |
380 | |
381 bool complete_success = true; | |
382 | |
383 for (uint32_t i = 0; i < NACL_ARRAY_SIZE(two_inst_patterns); i++) { | |
384 PatternMatch r = two_inst_patterns[i](*this, first, second, out); | |
385 switch (r) { | |
386 case NO_MATCH: break; | |
387 | |
388 case PATTERN_UNSAFE: | |
389 // Pattern is in charge of reporting specific issue. | 467 // Pattern is in charge of reporting specific issue. |
390 complete_success = false; | 468 complete_success = false; |
391 break; | 469 break; |
392 | 470 |
393 case PATTERN_SAFE: | 471 case PATTERN_SAFE_MODE: |
394 if (bundle_for_address(first.addr()) | 472 bool in_bundle = true; |
395 != bundle_for_address(second.addr())) { | 473 if (r.size >= 0) { |
396 complete_success = false; | 474 for (int8_t i = kMaxPattern - 2; i >= kMaxPattern - r.size; i--) { |
397 out->report_problem(first.addr(), first.safety(), | 475 in_bundle &= (bundle_for_address(insns[i].addr()) == |
398 kProblemPatternCrossesBundle); | 476 bundle_for_address(insns[i + 1].addr())); |
477 critical->add(insns[i + 1].addr()); | |
478 } | |
479 if (!in_bundle) { | |
480 complete_success = false; | |
481 out->report_problem(insns[kMaxPattern-r.size].addr(), | |
482 insns[kMaxPattern-r.size].safety(), | |
483 kProblemPatternCrossesBundle); | |
484 } | |
399 } else { | 485 } else { |
400 critical->add(second.addr()); | 486 for (int8_t i = 1; i < (-r.size); i++) { |
487 in_bundle &= (bundle_for_address(insns[i].addr()) == | |
488 bundle_for_address(insns[i - 1].addr())); | |
489 critical->add(insns[i].addr()); | |
490 } | |
491 if (!in_bundle) { | |
492 complete_success = false; | |
493 out->report_problem(insns[0].addr(), | |
494 insns[0].safety(), | |
495 kProblemPatternCrossesBundle); | |
496 } | |
401 } | 497 } |
402 break; | 498 break; |
403 } | 499 } |
404 } | 500 } |
405 return complete_success; | 501 return complete_success; |
406 } | 502 } |
407 | 503 |
408 bool SfiValidator::is_data_address_register(Register r) const { | 504 bool SfiValidator::is_data_address_register(Register r) const { |
409 return data_address_registers_[r]; | 505 return data_address_registers_[r]; |
410 } | 506 } |
411 | 507 |
412 const Bundle SfiValidator::bundle_for_address(uint32_t address) const { | 508 const Bundle SfiValidator::bundle_for_address(uint32_t address) const { |
413 uint32_t base = address - (address % bytes_per_bundle_); | 509 // TODO(mrm) Is it a security flaw that this can point at below entry? |
414 return Bundle(base, bytes_per_bundle_); | 510 uint32_t shift_address = address - code_address_ormask_; |
511 uint32_t base = shift_address - (shift_address % bytes_per_bundle_); | |
512 return Bundle(base + code_address_ormask_, bytes_per_bundle_); | |
415 } | 513 } |
416 | 514 |
417 bool SfiValidator::is_bundle_head(uint32_t address) const { | 515 bool SfiValidator::is_bundle_head(uint32_t address) const { |
418 return (address % bytes_per_bundle_) == 0; | 516 return (address % bytes_per_bundle_) == code_address_ormask_; |
419 } | 517 } |
420 | 518 |
421 | 519 |
422 /* | 520 /* |
423 * We eagerly compute both safety and defs here, because it turns out to be | 521 * We eagerly compute both safety and defs here, because it turns out to be |
424 * faster by 10% than doing either lazily and memoizing the result. | 522 * faster by 10% than doing either lazily and memoizing the result. |
425 */ | 523 */ |
426 DecodedInstruction::DecodedInstruction(uint32_t vaddr, | 524 DecodedInstruction::DecodedInstruction(uint32_t vaddr, |
427 Instruction inst, | 525 Instruction inst, |
428 const ClassDecoder &decoder) | 526 const ClassDecoder &decoder) |
429 : vaddr_(vaddr), | 527 : vaddr_(vaddr), |
430 inst_(inst), | 528 inst_(inst), |
431 decoder_(&decoder), | 529 decoder_(&decoder), |
432 safety_(decoder.safety(inst_)), | 530 safety_(decoder.safety(inst_)), |
433 defs_(decoder.defs(inst_)) | 531 defs_(decoder.defs(inst_)), |
532 condition_(Instruction::UNCONDITIONAL) | |
434 {} | 533 {} |
435 | 534 |
436 } // namespace | 535 } // namespace |
OLD | NEW |