OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #if V8_TARGET_ARCH_ARM | 9 #if V8_TARGET_ARCH_ARM |
10 | 10 |
(...skipping 18 matching lines...) Expand all Loading... |
29 } | 29 } |
30 | 30 |
31 | 31 |
32 void MacroAssembler::Jump(Register target, Condition cond) { | 32 void MacroAssembler::Jump(Register target, Condition cond) { |
33 bx(target, cond); | 33 bx(target, cond); |
34 } | 34 } |
35 | 35 |
36 | 36 |
37 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 37 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
38 Condition cond) { | 38 Condition cond) { |
39 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 39 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
40 mov(pc, Operand(target, rmode), LeaveCC, cond); | 40 mov(pc, Operand(target, rmode), LeaveCC, cond); |
41 } | 41 } |
42 | 42 |
43 | 43 |
44 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, | 44 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, |
45 Condition cond) { | 45 Condition cond) { |
46 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | 46 DCHECK(!RelocInfo::IsCodeTarget(rmode)); |
47 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); | 47 Jump(reinterpret_cast<intptr_t>(target), rmode, cond); |
48 } | 48 } |
49 | 49 |
50 | 50 |
51 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | 51 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
52 Condition cond) { | 52 Condition cond) { |
53 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 53 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
54 // 'code' is always generated ARM code, never THUMB code | 54 // 'code' is always generated ARM code, never THUMB code |
55 AllowDeferredHandleDereference embedding_raw_address; | 55 AllowDeferredHandleDereference embedding_raw_address; |
56 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | 56 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
57 } | 57 } |
58 | 58 |
59 | 59 |
60 int MacroAssembler::CallSize(Register target, Condition cond) { | 60 int MacroAssembler::CallSize(Register target, Condition cond) { |
61 return kInstrSize; | 61 return kInstrSize; |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 void MacroAssembler::Call(Register target, Condition cond) { | 65 void MacroAssembler::Call(Register target, Condition cond) { |
66 // Block constant pool for the call instruction sequence. | 66 // Block constant pool for the call instruction sequence. |
67 BlockConstPoolScope block_const_pool(this); | 67 BlockConstPoolScope block_const_pool(this); |
68 Label start; | 68 Label start; |
69 bind(&start); | 69 bind(&start); |
70 blx(target, cond); | 70 blx(target, cond); |
71 ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); | 71 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start)); |
72 } | 72 } |
73 | 73 |
74 | 74 |
75 int MacroAssembler::CallSize( | 75 int MacroAssembler::CallSize( |
76 Address target, RelocInfo::Mode rmode, Condition cond) { | 76 Address target, RelocInfo::Mode rmode, Condition cond) { |
77 Instr mov_instr = cond | MOV | LeaveCC; | 77 Instr mov_instr = cond | MOV | LeaveCC; |
78 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | 78 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); |
79 return kInstrSize + | 79 return kInstrSize + |
80 mov_operand.instructions_required(this, mov_instr) * kInstrSize; | 80 mov_operand.instructions_required(this, mov_instr) * kInstrSize; |
81 } | 81 } |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
131 | 131 |
132 // Statement positions are expected to be recorded when the target | 132 // Statement positions are expected to be recorded when the target |
133 // address is loaded. The mov method will automatically record | 133 // address is loaded. The mov method will automatically record |
134 // positions when pc is the target, since this is not the case here | 134 // positions when pc is the target, since this is not the case here |
135 // we have to do it explicitly. | 135 // we have to do it explicitly. |
136 positions_recorder()->WriteRecordedPositions(); | 136 positions_recorder()->WriteRecordedPositions(); |
137 | 137 |
138 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); | 138 mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode)); |
139 blx(ip, cond); | 139 blx(ip, cond); |
140 | 140 |
141 ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); | 141 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
142 if (mode == NEVER_INLINE_TARGET_ADDRESS) { | 142 if (mode == NEVER_INLINE_TARGET_ADDRESS) { |
143 set_predictable_code_size(old_predictable_code_size); | 143 set_predictable_code_size(old_predictable_code_size); |
144 } | 144 } |
145 } | 145 } |
146 | 146 |
147 | 147 |
148 int MacroAssembler::CallSize(Handle<Code> code, | 148 int MacroAssembler::CallSize(Handle<Code> code, |
149 RelocInfo::Mode rmode, | 149 RelocInfo::Mode rmode, |
150 TypeFeedbackId ast_id, | 150 TypeFeedbackId ast_id, |
151 Condition cond) { | 151 Condition cond) { |
152 AllowDeferredHandleDereference using_raw_address; | 152 AllowDeferredHandleDereference using_raw_address; |
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); | 153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); |
154 } | 154 } |
155 | 155 |
156 | 156 |
157 void MacroAssembler::Call(Handle<Code> code, | 157 void MacroAssembler::Call(Handle<Code> code, |
158 RelocInfo::Mode rmode, | 158 RelocInfo::Mode rmode, |
159 TypeFeedbackId ast_id, | 159 TypeFeedbackId ast_id, |
160 Condition cond, | 160 Condition cond, |
161 TargetAddressStorageMode mode) { | 161 TargetAddressStorageMode mode) { |
162 Label start; | 162 Label start; |
163 bind(&start); | 163 bind(&start); |
164 ASSERT(RelocInfo::IsCodeTarget(rmode)); | 164 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
165 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { | 165 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { |
166 SetRecordedAstId(ast_id); | 166 SetRecordedAstId(ast_id); |
167 rmode = RelocInfo::CODE_TARGET_WITH_ID; | 167 rmode = RelocInfo::CODE_TARGET_WITH_ID; |
168 } | 168 } |
169 // 'code' is always generated ARM code, never THUMB code | 169 // 'code' is always generated ARM code, never THUMB code |
170 AllowDeferredHandleDereference embedding_raw_address; | 170 AllowDeferredHandleDereference embedding_raw_address; |
171 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); | 171 Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); |
172 } | 172 } |
173 | 173 |
174 | 174 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
215 mov(ip, Operand(handle)); | 215 mov(ip, Operand(handle)); |
216 push(ip); | 216 push(ip); |
217 } | 217 } |
218 | 218 |
219 | 219 |
220 void MacroAssembler::Move(Register dst, Handle<Object> value) { | 220 void MacroAssembler::Move(Register dst, Handle<Object> value) { |
221 AllowDeferredHandleDereference smi_check; | 221 AllowDeferredHandleDereference smi_check; |
222 if (value->IsSmi()) { | 222 if (value->IsSmi()) { |
223 mov(dst, Operand(value)); | 223 mov(dst, Operand(value)); |
224 } else { | 224 } else { |
225 ASSERT(value->IsHeapObject()); | 225 DCHECK(value->IsHeapObject()); |
226 if (isolate()->heap()->InNewSpace(*value)) { | 226 if (isolate()->heap()->InNewSpace(*value)) { |
227 Handle<Cell> cell = isolate()->factory()->NewCell(value); | 227 Handle<Cell> cell = isolate()->factory()->NewCell(value); |
228 mov(dst, Operand(cell)); | 228 mov(dst, Operand(cell)); |
229 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset)); | 229 ldr(dst, FieldMemOperand(dst, Cell::kValueOffset)); |
230 } else { | 230 } else { |
231 mov(dst, Operand(value)); | 231 mov(dst, Operand(value)); |
232 } | 232 } |
233 } | 233 } |
234 } | 234 } |
235 | 235 |
(...skipping 11 matching lines...) Expand all Loading... |
247 } | 247 } |
248 } | 248 } |
249 | 249 |
250 | 250 |
251 void MacroAssembler::Mls(Register dst, Register src1, Register src2, | 251 void MacroAssembler::Mls(Register dst, Register src1, Register src2, |
252 Register srcA, Condition cond) { | 252 Register srcA, Condition cond) { |
253 if (CpuFeatures::IsSupported(MLS)) { | 253 if (CpuFeatures::IsSupported(MLS)) { |
254 CpuFeatureScope scope(this, MLS); | 254 CpuFeatureScope scope(this, MLS); |
255 mls(dst, src1, src2, srcA, cond); | 255 mls(dst, src1, src2, srcA, cond); |
256 } else { | 256 } else { |
257 ASSERT(!srcA.is(ip)); | 257 DCHECK(!srcA.is(ip)); |
258 mul(ip, src1, src2, LeaveCC, cond); | 258 mul(ip, src1, src2, LeaveCC, cond); |
259 sub(dst, srcA, ip, LeaveCC, cond); | 259 sub(dst, srcA, ip, LeaveCC, cond); |
260 } | 260 } |
261 } | 261 } |
262 | 262 |
263 | 263 |
264 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, | 264 void MacroAssembler::And(Register dst, Register src1, const Operand& src2, |
265 Condition cond) { | 265 Condition cond) { |
266 if (!src2.is_reg() && | 266 if (!src2.is_reg() && |
267 !src2.must_output_reloc_info(this) && | 267 !src2.must_output_reloc_info(this) && |
268 src2.immediate() == 0) { | 268 src2.immediate() == 0) { |
269 mov(dst, Operand::Zero(), LeaveCC, cond); | 269 mov(dst, Operand::Zero(), LeaveCC, cond); |
270 } else if (!(src2.instructions_required(this) == 1) && | 270 } else if (!(src2.instructions_required(this) == 1) && |
271 !src2.must_output_reloc_info(this) && | 271 !src2.must_output_reloc_info(this) && |
272 CpuFeatures::IsSupported(ARMv7) && | 272 CpuFeatures::IsSupported(ARMv7) && |
273 IsPowerOf2(src2.immediate() + 1)) { | 273 IsPowerOf2(src2.immediate() + 1)) { |
274 ubfx(dst, src1, 0, | 274 ubfx(dst, src1, 0, |
275 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); | 275 WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); |
276 } else { | 276 } else { |
277 and_(dst, src1, src2, LeaveCC, cond); | 277 and_(dst, src1, src2, LeaveCC, cond); |
278 } | 278 } |
279 } | 279 } |
280 | 280 |
281 | 281 |
282 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, | 282 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width, |
283 Condition cond) { | 283 Condition cond) { |
284 ASSERT(lsb < 32); | 284 DCHECK(lsb < 32); |
285 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 285 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
286 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 286 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
287 and_(dst, src1, Operand(mask), LeaveCC, cond); | 287 and_(dst, src1, Operand(mask), LeaveCC, cond); |
288 if (lsb != 0) { | 288 if (lsb != 0) { |
289 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); | 289 mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond); |
290 } | 290 } |
291 } else { | 291 } else { |
292 ubfx(dst, src1, lsb, width, cond); | 292 ubfx(dst, src1, lsb, width, cond); |
293 } | 293 } |
294 } | 294 } |
295 | 295 |
296 | 296 |
297 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, | 297 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, |
298 Condition cond) { | 298 Condition cond) { |
299 ASSERT(lsb < 32); | 299 DCHECK(lsb < 32); |
300 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 300 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
301 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 301 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
302 and_(dst, src1, Operand(mask), LeaveCC, cond); | 302 and_(dst, src1, Operand(mask), LeaveCC, cond); |
303 int shift_up = 32 - lsb - width; | 303 int shift_up = 32 - lsb - width; |
304 int shift_down = lsb + shift_up; | 304 int shift_down = lsb + shift_up; |
305 if (shift_up != 0) { | 305 if (shift_up != 0) { |
306 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); | 306 mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond); |
307 } | 307 } |
308 if (shift_down != 0) { | 308 if (shift_down != 0) { |
309 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); | 309 mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond); |
310 } | 310 } |
311 } else { | 311 } else { |
312 sbfx(dst, src1, lsb, width, cond); | 312 sbfx(dst, src1, lsb, width, cond); |
313 } | 313 } |
314 } | 314 } |
315 | 315 |
316 | 316 |
317 void MacroAssembler::Bfi(Register dst, | 317 void MacroAssembler::Bfi(Register dst, |
318 Register src, | 318 Register src, |
319 Register scratch, | 319 Register scratch, |
320 int lsb, | 320 int lsb, |
321 int width, | 321 int width, |
322 Condition cond) { | 322 Condition cond) { |
323 ASSERT(0 <= lsb && lsb < 32); | 323 DCHECK(0 <= lsb && lsb < 32); |
324 ASSERT(0 <= width && width < 32); | 324 DCHECK(0 <= width && width < 32); |
325 ASSERT(lsb + width < 32); | 325 DCHECK(lsb + width < 32); |
326 ASSERT(!scratch.is(dst)); | 326 DCHECK(!scratch.is(dst)); |
327 if (width == 0) return; | 327 if (width == 0) return; |
328 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 328 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
329 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 329 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
330 bic(dst, dst, Operand(mask)); | 330 bic(dst, dst, Operand(mask)); |
331 and_(scratch, src, Operand((1 << width) - 1)); | 331 and_(scratch, src, Operand((1 << width) - 1)); |
332 mov(scratch, Operand(scratch, LSL, lsb)); | 332 mov(scratch, Operand(scratch, LSL, lsb)); |
333 orr(dst, dst, scratch); | 333 orr(dst, dst, scratch); |
334 } else { | 334 } else { |
335 bfi(dst, src, lsb, width, cond); | 335 bfi(dst, src, lsb, width, cond); |
336 } | 336 } |
337 } | 337 } |
338 | 338 |
339 | 339 |
340 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, | 340 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, |
341 Condition cond) { | 341 Condition cond) { |
342 ASSERT(lsb < 32); | 342 DCHECK(lsb < 32); |
343 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 343 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
344 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); | 344 int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1); |
345 bic(dst, src, Operand(mask)); | 345 bic(dst, src, Operand(mask)); |
346 } else { | 346 } else { |
347 Move(dst, src, cond); | 347 Move(dst, src, cond); |
348 bfc(dst, lsb, width, cond); | 348 bfc(dst, lsb, width, cond); |
349 } | 349 } |
350 } | 350 } |
351 | 351 |
352 | 352 |
353 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, | 353 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, |
354 Condition cond) { | 354 Condition cond) { |
355 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { | 355 if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { |
356 ASSERT(!dst.is(pc) && !src.rm().is(pc)); | 356 DCHECK(!dst.is(pc) && !src.rm().is(pc)); |
357 ASSERT((satpos >= 0) && (satpos <= 31)); | 357 DCHECK((satpos >= 0) && (satpos <= 31)); |
358 | 358 |
359 // These asserts are required to ensure compatibility with the ARMv7 | 359 // These asserts are required to ensure compatibility with the ARMv7 |
360 // implementation. | 360 // implementation. |
361 ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL)); | 361 DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL)); |
362 ASSERT(src.rs().is(no_reg)); | 362 DCHECK(src.rs().is(no_reg)); |
363 | 363 |
364 Label done; | 364 Label done; |
365 int satval = (1 << satpos) - 1; | 365 int satval = (1 << satpos) - 1; |
366 | 366 |
367 if (cond != al) { | 367 if (cond != al) { |
368 b(NegateCondition(cond), &done); // Skip saturate if !condition. | 368 b(NegateCondition(cond), &done); // Skip saturate if !condition. |
369 } | 369 } |
370 if (!(src.is_reg() && dst.is(src.rm()))) { | 370 if (!(src.is_reg() && dst.is(src.rm()))) { |
371 mov(dst, src); | 371 mov(dst, src); |
372 } | 372 } |
373 tst(dst, Operand(~satval)); | 373 tst(dst, Operand(~satval)); |
374 b(eq, &done); | 374 b(eq, &done); |
375 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative. | 375 mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative. |
376 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. | 376 mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. |
377 bind(&done); | 377 bind(&done); |
378 } else { | 378 } else { |
379 usat(dst, satpos, src, cond); | 379 usat(dst, satpos, src, cond); |
380 } | 380 } |
381 } | 381 } |
382 | 382 |
383 | 383 |
384 void MacroAssembler::Load(Register dst, | 384 void MacroAssembler::Load(Register dst, |
385 const MemOperand& src, | 385 const MemOperand& src, |
386 Representation r) { | 386 Representation r) { |
387 ASSERT(!r.IsDouble()); | 387 DCHECK(!r.IsDouble()); |
388 if (r.IsInteger8()) { | 388 if (r.IsInteger8()) { |
389 ldrsb(dst, src); | 389 ldrsb(dst, src); |
390 } else if (r.IsUInteger8()) { | 390 } else if (r.IsUInteger8()) { |
391 ldrb(dst, src); | 391 ldrb(dst, src); |
392 } else if (r.IsInteger16()) { | 392 } else if (r.IsInteger16()) { |
393 ldrsh(dst, src); | 393 ldrsh(dst, src); |
394 } else if (r.IsUInteger16()) { | 394 } else if (r.IsUInteger16()) { |
395 ldrh(dst, src); | 395 ldrh(dst, src); |
396 } else { | 396 } else { |
397 ldr(dst, src); | 397 ldr(dst, src); |
398 } | 398 } |
399 } | 399 } |
400 | 400 |
401 | 401 |
402 void MacroAssembler::Store(Register src, | 402 void MacroAssembler::Store(Register src, |
403 const MemOperand& dst, | 403 const MemOperand& dst, |
404 Representation r) { | 404 Representation r) { |
405 ASSERT(!r.IsDouble()); | 405 DCHECK(!r.IsDouble()); |
406 if (r.IsInteger8() || r.IsUInteger8()) { | 406 if (r.IsInteger8() || r.IsUInteger8()) { |
407 strb(src, dst); | 407 strb(src, dst); |
408 } else if (r.IsInteger16() || r.IsUInteger16()) { | 408 } else if (r.IsInteger16() || r.IsUInteger16()) { |
409 strh(src, dst); | 409 strh(src, dst); |
410 } else { | 410 } else { |
411 if (r.IsHeapObject()) { | 411 if (r.IsHeapObject()) { |
412 AssertNotSmi(src); | 412 AssertNotSmi(src); |
413 } else if (r.IsSmi()) { | 413 } else if (r.IsSmi()) { |
414 AssertSmi(src); | 414 AssertSmi(src); |
415 } | 415 } |
(...skipping 22 matching lines...) Expand all Loading... |
438 Heap::RootListIndex index, | 438 Heap::RootListIndex index, |
439 Condition cond) { | 439 Condition cond) { |
440 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); | 440 str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); |
441 } | 441 } |
442 | 442 |
443 | 443 |
444 void MacroAssembler::InNewSpace(Register object, | 444 void MacroAssembler::InNewSpace(Register object, |
445 Register scratch, | 445 Register scratch, |
446 Condition cond, | 446 Condition cond, |
447 Label* branch) { | 447 Label* branch) { |
448 ASSERT(cond == eq || cond == ne); | 448 DCHECK(cond == eq || cond == ne); |
449 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); | 449 and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); |
450 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); | 450 cmp(scratch, Operand(ExternalReference::new_space_start(isolate()))); |
451 b(cond, branch); | 451 b(cond, branch); |
452 } | 452 } |
453 | 453 |
454 | 454 |
455 void MacroAssembler::RecordWriteField( | 455 void MacroAssembler::RecordWriteField( |
456 Register object, | 456 Register object, |
457 int offset, | 457 int offset, |
458 Register value, | 458 Register value, |
459 Register dst, | 459 Register dst, |
460 LinkRegisterStatus lr_status, | 460 LinkRegisterStatus lr_status, |
461 SaveFPRegsMode save_fp, | 461 SaveFPRegsMode save_fp, |
462 RememberedSetAction remembered_set_action, | 462 RememberedSetAction remembered_set_action, |
463 SmiCheck smi_check, | 463 SmiCheck smi_check, |
464 PointersToHereCheck pointers_to_here_check_for_value) { | 464 PointersToHereCheck pointers_to_here_check_for_value) { |
465 // First, check if a write barrier is even needed. The tests below | 465 // First, check if a write barrier is even needed. The tests below |
466 // catch stores of Smis. | 466 // catch stores of Smis. |
467 Label done; | 467 Label done; |
468 | 468 |
469 // Skip barrier if writing a smi. | 469 // Skip barrier if writing a smi. |
470 if (smi_check == INLINE_SMI_CHECK) { | 470 if (smi_check == INLINE_SMI_CHECK) { |
471 JumpIfSmi(value, &done); | 471 JumpIfSmi(value, &done); |
472 } | 472 } |
473 | 473 |
474 // Although the object register is tagged, the offset is relative to the start | 474 // Although the object register is tagged, the offset is relative to the start |
475 // of the object, so so offset must be a multiple of kPointerSize. | 475 // of the object, so so offset must be a multiple of kPointerSize. |
476 ASSERT(IsAligned(offset, kPointerSize)); | 476 DCHECK(IsAligned(offset, kPointerSize)); |
477 | 477 |
478 add(dst, object, Operand(offset - kHeapObjectTag)); | 478 add(dst, object, Operand(offset - kHeapObjectTag)); |
479 if (emit_debug_code()) { | 479 if (emit_debug_code()) { |
480 Label ok; | 480 Label ok; |
481 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 481 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); |
482 b(eq, &ok); | 482 b(eq, &ok); |
483 stop("Unaligned cell in write barrier"); | 483 stop("Unaligned cell in write barrier"); |
484 bind(&ok); | 484 bind(&ok); |
485 } | 485 } |
486 | 486 |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
579 // tag is shifted away. | 579 // tag is shifted away. |
580 void MacroAssembler::RecordWrite( | 580 void MacroAssembler::RecordWrite( |
581 Register object, | 581 Register object, |
582 Register address, | 582 Register address, |
583 Register value, | 583 Register value, |
584 LinkRegisterStatus lr_status, | 584 LinkRegisterStatus lr_status, |
585 SaveFPRegsMode fp_mode, | 585 SaveFPRegsMode fp_mode, |
586 RememberedSetAction remembered_set_action, | 586 RememberedSetAction remembered_set_action, |
587 SmiCheck smi_check, | 587 SmiCheck smi_check, |
588 PointersToHereCheck pointers_to_here_check_for_value) { | 588 PointersToHereCheck pointers_to_here_check_for_value) { |
589 ASSERT(!object.is(value)); | 589 DCHECK(!object.is(value)); |
590 if (emit_debug_code()) { | 590 if (emit_debug_code()) { |
591 ldr(ip, MemOperand(address)); | 591 ldr(ip, MemOperand(address)); |
592 cmp(ip, value); | 592 cmp(ip, value); |
593 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 593 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
594 } | 594 } |
595 | 595 |
596 if (remembered_set_action == OMIT_REMEMBERED_SET && | 596 if (remembered_set_action == OMIT_REMEMBERED_SET && |
597 !FLAG_incremental_marking) { | 597 !FLAG_incremental_marking) { |
598 return; | 598 return; |
599 } | 599 } |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
666 // Store pointer to buffer and increment buffer top. | 666 // Store pointer to buffer and increment buffer top. |
667 str(address, MemOperand(scratch, kPointerSize, PostIndex)); | 667 str(address, MemOperand(scratch, kPointerSize, PostIndex)); |
668 // Write back new top of buffer. | 668 // Write back new top of buffer. |
669 str(scratch, MemOperand(ip)); | 669 str(scratch, MemOperand(ip)); |
670 // Call stub on end of buffer. | 670 // Call stub on end of buffer. |
671 // Check for end of buffer. | 671 // Check for end of buffer. |
672 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); | 672 tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
673 if (and_then == kFallThroughAtEnd) { | 673 if (and_then == kFallThroughAtEnd) { |
674 b(eq, &done); | 674 b(eq, &done); |
675 } else { | 675 } else { |
676 ASSERT(and_then == kReturnAtEnd); | 676 DCHECK(and_then == kReturnAtEnd); |
677 Ret(eq); | 677 Ret(eq); |
678 } | 678 } |
679 push(lr); | 679 push(lr); |
680 StoreBufferOverflowStub store_buffer_overflow = | 680 StoreBufferOverflowStub store_buffer_overflow = |
681 StoreBufferOverflowStub(isolate(), fp_mode); | 681 StoreBufferOverflowStub(isolate(), fp_mode); |
682 CallStub(&store_buffer_overflow); | 682 CallStub(&store_buffer_overflow); |
683 pop(lr); | 683 pop(lr); |
684 bind(&done); | 684 bind(&done); |
685 if (and_then == kReturnAtEnd) { | 685 if (and_then == kReturnAtEnd) { |
686 Ret(); | 686 Ret(); |
687 } | 687 } |
688 } | 688 } |
689 | 689 |
690 | 690 |
691 void MacroAssembler::PushFixedFrame(Register marker_reg) { | 691 void MacroAssembler::PushFixedFrame(Register marker_reg) { |
692 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 692 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); |
693 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 693 stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | |
694 cp.bit() | | 694 cp.bit() | |
695 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 695 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | |
696 fp.bit() | | 696 fp.bit() | |
697 lr.bit()); | 697 lr.bit()); |
698 } | 698 } |
699 | 699 |
700 | 700 |
701 void MacroAssembler::PopFixedFrame(Register marker_reg) { | 701 void MacroAssembler::PopFixedFrame(Register marker_reg) { |
702 ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code()); | 702 DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); |
703 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | | 703 ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | |
704 cp.bit() | | 704 cp.bit() | |
705 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | | 705 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) | |
706 fp.bit() | | 706 fp.bit() | |
707 lr.bit()); | 707 lr.bit()); |
708 } | 708 } |
709 | 709 |
710 | 710 |
711 // Push and pop all registers that can hold pointers. | 711 // Push and pop all registers that can hold pointers. |
712 void MacroAssembler::PushSafepointRegisters() { | 712 void MacroAssembler::PushSafepointRegisters() { |
713 // Safepoints expect a block of contiguous register values starting with r0: | 713 // Safepoints expect a block of contiguous register values starting with r0: |
714 ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); | 714 DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); |
715 // Safepoints expect a block of kNumSafepointRegisters values on the | 715 // Safepoints expect a block of kNumSafepointRegisters values on the |
716 // stack, so adjust the stack for unsaved registers. | 716 // stack, so adjust the stack for unsaved registers. |
717 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 717 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
718 ASSERT(num_unsaved >= 0); | 718 DCHECK(num_unsaved >= 0); |
719 sub(sp, sp, Operand(num_unsaved * kPointerSize)); | 719 sub(sp, sp, Operand(num_unsaved * kPointerSize)); |
720 stm(db_w, sp, kSafepointSavedRegisters); | 720 stm(db_w, sp, kSafepointSavedRegisters); |
721 } | 721 } |
722 | 722 |
723 | 723 |
724 void MacroAssembler::PopSafepointRegisters() { | 724 void MacroAssembler::PopSafepointRegisters() { |
725 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 725 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
726 ldm(ia_w, sp, kSafepointSavedRegisters); | 726 ldm(ia_w, sp, kSafepointSavedRegisters); |
727 add(sp, sp, Operand(num_unsaved * kPointerSize)); | 727 add(sp, sp, Operand(num_unsaved * kPointerSize)); |
728 } | 728 } |
729 | 729 |
730 | 730 |
731 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | 731 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
732 str(src, SafepointRegisterSlot(dst)); | 732 str(src, SafepointRegisterSlot(dst)); |
733 } | 733 } |
734 | 734 |
735 | 735 |
736 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 736 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
737 ldr(dst, SafepointRegisterSlot(src)); | 737 ldr(dst, SafepointRegisterSlot(src)); |
738 } | 738 } |
739 | 739 |
740 | 740 |
741 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 741 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
742 // The registers are pushed starting with the highest encoding, | 742 // The registers are pushed starting with the highest encoding, |
743 // which means that lowest encodings are closest to the stack pointer. | 743 // which means that lowest encodings are closest to the stack pointer. |
744 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); | 744 DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); |
745 return reg_code; | 745 return reg_code; |
746 } | 746 } |
747 | 747 |
748 | 748 |
749 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 749 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
750 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 750 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
751 } | 751 } |
752 | 752 |
753 | 753 |
754 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | 754 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { |
755 // Number of d-regs not known at snapshot time. | 755 // Number of d-regs not known at snapshot time. |
756 ASSERT(!serializer_enabled()); | 756 DCHECK(!serializer_enabled()); |
757 // General purpose registers are pushed last on the stack. | 757 // General purpose registers are pushed last on the stack. |
758 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; | 758 int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; |
759 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 759 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
760 return MemOperand(sp, doubles_size + register_offset); | 760 return MemOperand(sp, doubles_size + register_offset); |
761 } | 761 } |
762 | 762 |
763 | 763 |
764 void MacroAssembler::Ldrd(Register dst1, Register dst2, | 764 void MacroAssembler::Ldrd(Register dst1, Register dst2, |
765 const MemOperand& src, Condition cond) { | 765 const MemOperand& src, Condition cond) { |
766 ASSERT(src.rm().is(no_reg)); | 766 DCHECK(src.rm().is(no_reg)); |
767 ASSERT(!dst1.is(lr)); // r14. | 767 DCHECK(!dst1.is(lr)); // r14. |
768 | 768 |
769 // V8 does not use this addressing mode, so the fallback code | 769 // V8 does not use this addressing mode, so the fallback code |
770 // below doesn't support it yet. | 770 // below doesn't support it yet. |
771 ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex)); | 771 DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex)); |
772 | 772 |
773 // Generate two ldr instructions if ldrd is not available. | 773 // Generate two ldr instructions if ldrd is not available. |
774 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 774 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && |
775 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { | 775 (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) { |
776 CpuFeatureScope scope(this, ARMv7); | 776 CpuFeatureScope scope(this, ARMv7); |
777 ldrd(dst1, dst2, src, cond); | 777 ldrd(dst1, dst2, src, cond); |
778 } else { | 778 } else { |
779 if ((src.am() == Offset) || (src.am() == NegOffset)) { | 779 if ((src.am() == Offset) || (src.am() == NegOffset)) { |
780 MemOperand src2(src); | 780 MemOperand src2(src); |
781 src2.set_offset(src2.offset() + 4); | 781 src2.set_offset(src2.offset() + 4); |
782 if (dst1.is(src.rn())) { | 782 if (dst1.is(src.rn())) { |
783 ldr(dst2, src2, cond); | 783 ldr(dst2, src2, cond); |
784 ldr(dst1, src, cond); | 784 ldr(dst1, src, cond); |
785 } else { | 785 } else { |
786 ldr(dst1, src, cond); | 786 ldr(dst1, src, cond); |
787 ldr(dst2, src2, cond); | 787 ldr(dst2, src2, cond); |
788 } | 788 } |
789 } else { // PostIndex or NegPostIndex. | 789 } else { // PostIndex or NegPostIndex. |
790 ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex)); | 790 DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex)); |
791 if (dst1.is(src.rn())) { | 791 if (dst1.is(src.rn())) { |
792 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); | 792 ldr(dst2, MemOperand(src.rn(), 4, Offset), cond); |
793 ldr(dst1, src, cond); | 793 ldr(dst1, src, cond); |
794 } else { | 794 } else { |
795 MemOperand src2(src); | 795 MemOperand src2(src); |
796 src2.set_offset(src2.offset() - 4); | 796 src2.set_offset(src2.offset() - 4); |
797 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); | 797 ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond); |
798 ldr(dst2, src2, cond); | 798 ldr(dst2, src2, cond); |
799 } | 799 } |
800 } | 800 } |
801 } | 801 } |
802 } | 802 } |
803 | 803 |
804 | 804 |
805 void MacroAssembler::Strd(Register src1, Register src2, | 805 void MacroAssembler::Strd(Register src1, Register src2, |
806 const MemOperand& dst, Condition cond) { | 806 const MemOperand& dst, Condition cond) { |
807 ASSERT(dst.rm().is(no_reg)); | 807 DCHECK(dst.rm().is(no_reg)); |
808 ASSERT(!src1.is(lr)); // r14. | 808 DCHECK(!src1.is(lr)); // r14. |
809 | 809 |
810 // V8 does not use this addressing mode, so the fallback code | 810 // V8 does not use this addressing mode, so the fallback code |
811 // below doesn't support it yet. | 811 // below doesn't support it yet. |
812 ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); | 812 DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex)); |
813 | 813 |
814 // Generate two str instructions if strd is not available. | 814 // Generate two str instructions if strd is not available. |
815 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && | 815 if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() && |
816 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { | 816 (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) { |
817 CpuFeatureScope scope(this, ARMv7); | 817 CpuFeatureScope scope(this, ARMv7); |
818 strd(src1, src2, dst, cond); | 818 strd(src1, src2, dst, cond); |
819 } else { | 819 } else { |
820 MemOperand dst2(dst); | 820 MemOperand dst2(dst); |
821 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { | 821 if ((dst.am() == Offset) || (dst.am() == NegOffset)) { |
822 dst2.set_offset(dst2.offset() + 4); | 822 dst2.set_offset(dst2.offset() + 4); |
823 str(src1, dst, cond); | 823 str(src1, dst, cond); |
824 str(src2, dst2, cond); | 824 str(src2, dst2, cond); |
825 } else { // PostIndex or NegPostIndex. | 825 } else { // PostIndex or NegPostIndex. |
826 ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 826 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); |
827 dst2.set_offset(dst2.offset() - 4); | 827 dst2.set_offset(dst2.offset() - 4); |
828 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | 828 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); |
829 str(src2, dst2, cond); | 829 str(src2, dst2, cond); |
830 } | 830 } |
831 } | 831 } |
832 } | 832 } |
833 | 833 |
834 | 834 |
835 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | 835 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { |
836 // If needed, restore wanted bits of FPSCR. | 836 // If needed, restore wanted bits of FPSCR. |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
946 } else { | 946 } else { |
947 vmov(dst, VmovIndexLo, src); | 947 vmov(dst, VmovIndexLo, src); |
948 } | 948 } |
949 } | 949 } |
950 | 950 |
951 | 951 |
952 void MacroAssembler::LoadConstantPoolPointerRegister() { | 952 void MacroAssembler::LoadConstantPoolPointerRegister() { |
953 if (FLAG_enable_ool_constant_pool) { | 953 if (FLAG_enable_ool_constant_pool) { |
954 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - | 954 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - |
955 pc_offset() - Instruction::kPCReadOffset; | 955 pc_offset() - Instruction::kPCReadOffset; |
956 ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); | 956 DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); |
957 ldr(pp, MemOperand(pc, constant_pool_offset)); | 957 ldr(pp, MemOperand(pc, constant_pool_offset)); |
958 } | 958 } |
959 } | 959 } |
960 | 960 |
961 | 961 |
962 void MacroAssembler::StubPrologue() { | 962 void MacroAssembler::StubPrologue() { |
963 PushFixedFrame(); | 963 PushFixedFrame(); |
964 Push(Smi::FromInt(StackFrame::STUB)); | 964 Push(Smi::FromInt(StackFrame::STUB)); |
965 // Adjust FP to point to saved FP. | 965 // Adjust FP to point to saved FP. |
966 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 966 add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1030 mov(sp, fp); | 1030 mov(sp, fp); |
1031 frame_ends = pc_offset(); | 1031 frame_ends = pc_offset(); |
1032 ldm(ia_w, sp, fp.bit() | lr.bit()); | 1032 ldm(ia_w, sp, fp.bit() | lr.bit()); |
1033 } | 1033 } |
1034 return frame_ends; | 1034 return frame_ends; |
1035 } | 1035 } |
1036 | 1036 |
1037 | 1037 |
1038 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { | 1038 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { |
1039 // Set up the frame structure on the stack. | 1039 // Set up the frame structure on the stack. |
1040 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); | 1040 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); |
1041 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); | 1041 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); |
1042 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); | 1042 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); |
1043 Push(lr, fp); | 1043 Push(lr, fp); |
1044 mov(fp, Operand(sp)); // Set up new frame pointer. | 1044 mov(fp, Operand(sp)); // Set up new frame pointer. |
1045 // Reserve room for saved entry sp and code object. | 1045 // Reserve room for saved entry sp and code object. |
1046 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize)); | 1046 sub(sp, sp, Operand(ExitFrameConstants::kFrameSize)); |
1047 if (emit_debug_code()) { | 1047 if (emit_debug_code()) { |
1048 mov(ip, Operand::Zero()); | 1048 mov(ip, Operand::Zero()); |
1049 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 1049 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1050 } | 1050 } |
1051 if (FLAG_enable_ool_constant_pool) { | 1051 if (FLAG_enable_ool_constant_pool) { |
1052 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | 1052 str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); |
(...skipping 15 matching lines...) Expand all Loading... |
1068 // DwVfpRegister::kMaxNumRegisters * kDoubleSize, | 1068 // DwVfpRegister::kMaxNumRegisters * kDoubleSize, |
1069 // since the sp slot, code slot and constant pool slot (if | 1069 // since the sp slot, code slot and constant pool slot (if |
1070 // FLAG_enable_ool_constant_pool) were pushed after the fp. | 1070 // FLAG_enable_ool_constant_pool) were pushed after the fp. |
1071 } | 1071 } |
1072 | 1072 |
1073 // Reserve place for the return address and stack space and align the frame | 1073 // Reserve place for the return address and stack space and align the frame |
1074 // preparing for calling the runtime function. | 1074 // preparing for calling the runtime function. |
1075 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 1075 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
1076 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); | 1076 sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); |
1077 if (frame_alignment > 0) { | 1077 if (frame_alignment > 0) { |
1078 ASSERT(IsPowerOf2(frame_alignment)); | 1078 DCHECK(IsPowerOf2(frame_alignment)); |
1079 and_(sp, sp, Operand(-frame_alignment)); | 1079 and_(sp, sp, Operand(-frame_alignment)); |
1080 } | 1080 } |
1081 | 1081 |
1082 // Set the exit frame sp value to point just before the return address | 1082 // Set the exit frame sp value to point just before the return address |
1083 // location. | 1083 // location. |
1084 add(ip, sp, Operand(kPointerSize)); | 1084 add(ip, sp, Operand(kPointerSize)); |
1085 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 1085 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1086 } | 1086 } |
1087 | 1087 |
1088 | 1088 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1187 | 1187 |
1188 // Check whether the expected and actual arguments count match. If not, | 1188 // Check whether the expected and actual arguments count match. If not, |
1189 // setup registers according to contract with ArgumentsAdaptorTrampoline: | 1189 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
1190 // r0: actual arguments count | 1190 // r0: actual arguments count |
1191 // r1: function (passed through to callee) | 1191 // r1: function (passed through to callee) |
1192 // r2: expected arguments count | 1192 // r2: expected arguments count |
1193 | 1193 |
1194 // The code below is made a lot easier because the calling code already sets | 1194 // The code below is made a lot easier because the calling code already sets |
1195 // up actual and expected registers according to the contract if values are | 1195 // up actual and expected registers according to the contract if values are |
1196 // passed in registers. | 1196 // passed in registers. |
1197 ASSERT(actual.is_immediate() || actual.reg().is(r0)); | 1197 DCHECK(actual.is_immediate() || actual.reg().is(r0)); |
1198 ASSERT(expected.is_immediate() || expected.reg().is(r2)); | 1198 DCHECK(expected.is_immediate() || expected.reg().is(r2)); |
1199 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); | 1199 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3)); |
1200 | 1200 |
1201 if (expected.is_immediate()) { | 1201 if (expected.is_immediate()) { |
1202 ASSERT(actual.is_immediate()); | 1202 DCHECK(actual.is_immediate()); |
1203 if (expected.immediate() == actual.immediate()) { | 1203 if (expected.immediate() == actual.immediate()) { |
1204 definitely_matches = true; | 1204 definitely_matches = true; |
1205 } else { | 1205 } else { |
1206 mov(r0, Operand(actual.immediate())); | 1206 mov(r0, Operand(actual.immediate())); |
1207 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 1207 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
1208 if (expected.immediate() == sentinel) { | 1208 if (expected.immediate() == sentinel) { |
1209 // Don't worry about adapting arguments for builtins that | 1209 // Don't worry about adapting arguments for builtins that |
1210 // don't want that done. Skip adaption code by making it look | 1210 // don't want that done. Skip adaption code by making it look |
1211 // like we have a match between expected and actual number of | 1211 // like we have a match between expected and actual number of |
1212 // arguments. | 1212 // arguments. |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1249 } | 1249 } |
1250 } | 1250 } |
1251 | 1251 |
1252 | 1252 |
1253 void MacroAssembler::InvokeCode(Register code, | 1253 void MacroAssembler::InvokeCode(Register code, |
1254 const ParameterCount& expected, | 1254 const ParameterCount& expected, |
1255 const ParameterCount& actual, | 1255 const ParameterCount& actual, |
1256 InvokeFlag flag, | 1256 InvokeFlag flag, |
1257 const CallWrapper& call_wrapper) { | 1257 const CallWrapper& call_wrapper) { |
1258 // You can't call a function without a valid frame. | 1258 // You can't call a function without a valid frame. |
1259 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1259 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1260 | 1260 |
1261 Label done; | 1261 Label done; |
1262 bool definitely_mismatches = false; | 1262 bool definitely_mismatches = false; |
1263 InvokePrologue(expected, actual, Handle<Code>::null(), code, | 1263 InvokePrologue(expected, actual, Handle<Code>::null(), code, |
1264 &done, &definitely_mismatches, flag, | 1264 &done, &definitely_mismatches, flag, |
1265 call_wrapper); | 1265 call_wrapper); |
1266 if (!definitely_mismatches) { | 1266 if (!definitely_mismatches) { |
1267 if (flag == CALL_FUNCTION) { | 1267 if (flag == CALL_FUNCTION) { |
1268 call_wrapper.BeforeCall(CallSize(code)); | 1268 call_wrapper.BeforeCall(CallSize(code)); |
1269 Call(code); | 1269 Call(code); |
1270 call_wrapper.AfterCall(); | 1270 call_wrapper.AfterCall(); |
1271 } else { | 1271 } else { |
1272 ASSERT(flag == JUMP_FUNCTION); | 1272 DCHECK(flag == JUMP_FUNCTION); |
1273 Jump(code); | 1273 Jump(code); |
1274 } | 1274 } |
1275 | 1275 |
1276 // Continue here if InvokePrologue does handle the invocation due to | 1276 // Continue here if InvokePrologue does handle the invocation due to |
1277 // mismatched parameter counts. | 1277 // mismatched parameter counts. |
1278 bind(&done); | 1278 bind(&done); |
1279 } | 1279 } |
1280 } | 1280 } |
1281 | 1281 |
1282 | 1282 |
1283 void MacroAssembler::InvokeFunction(Register fun, | 1283 void MacroAssembler::InvokeFunction(Register fun, |
1284 const ParameterCount& actual, | 1284 const ParameterCount& actual, |
1285 InvokeFlag flag, | 1285 InvokeFlag flag, |
1286 const CallWrapper& call_wrapper) { | 1286 const CallWrapper& call_wrapper) { |
1287 // You can't call a function without a valid frame. | 1287 // You can't call a function without a valid frame. |
1288 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1288 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1289 | 1289 |
1290 // Contract with called JS functions requires that function is passed in r1. | 1290 // Contract with called JS functions requires that function is passed in r1. |
1291 ASSERT(fun.is(r1)); | 1291 DCHECK(fun.is(r1)); |
1292 | 1292 |
1293 Register expected_reg = r2; | 1293 Register expected_reg = r2; |
1294 Register code_reg = r3; | 1294 Register code_reg = r3; |
1295 | 1295 |
1296 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1296 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
1297 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1297 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
1298 ldr(expected_reg, | 1298 ldr(expected_reg, |
1299 FieldMemOperand(code_reg, | 1299 FieldMemOperand(code_reg, |
1300 SharedFunctionInfo::kFormalParameterCountOffset)); | 1300 SharedFunctionInfo::kFormalParameterCountOffset)); |
1301 SmiUntag(expected_reg); | 1301 SmiUntag(expected_reg); |
1302 ldr(code_reg, | 1302 ldr(code_reg, |
1303 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1303 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
1304 | 1304 |
1305 ParameterCount expected(expected_reg); | 1305 ParameterCount expected(expected_reg); |
1306 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | 1306 InvokeCode(code_reg, expected, actual, flag, call_wrapper); |
1307 } | 1307 } |
1308 | 1308 |
1309 | 1309 |
1310 void MacroAssembler::InvokeFunction(Register function, | 1310 void MacroAssembler::InvokeFunction(Register function, |
1311 const ParameterCount& expected, | 1311 const ParameterCount& expected, |
1312 const ParameterCount& actual, | 1312 const ParameterCount& actual, |
1313 InvokeFlag flag, | 1313 InvokeFlag flag, |
1314 const CallWrapper& call_wrapper) { | 1314 const CallWrapper& call_wrapper) { |
1315 // You can't call a function without a valid frame. | 1315 // You can't call a function without a valid frame. |
1316 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 1316 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1317 | 1317 |
1318 // Contract with called JS functions requires that function is passed in r1. | 1318 // Contract with called JS functions requires that function is passed in r1. |
1319 ASSERT(function.is(r1)); | 1319 DCHECK(function.is(r1)); |
1320 | 1320 |
1321 // Get the function and setup the context. | 1321 // Get the function and setup the context. |
1322 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 1322 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
1323 | 1323 |
1324 // We call indirectly through the code field in the function to | 1324 // We call indirectly through the code field in the function to |
1325 // allow recompilation to take effect without changing any of the | 1325 // allow recompilation to take effect without changing any of the |
1326 // call sites. | 1326 // call sites. |
1327 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 1327 ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
1328 InvokeCode(r3, expected, actual, flag, call_wrapper); | 1328 InvokeCode(r3, expected, actual, flag, call_wrapper); |
1329 } | 1329 } |
(...skipping 25 matching lines...) Expand all Loading... |
1355 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1355 cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1356 b(lt, fail); | 1356 b(lt, fail); |
1357 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); | 1357 cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
1358 b(gt, fail); | 1358 b(gt, fail); |
1359 } | 1359 } |
1360 | 1360 |
1361 | 1361 |
1362 void MacroAssembler::IsObjectJSStringType(Register object, | 1362 void MacroAssembler::IsObjectJSStringType(Register object, |
1363 Register scratch, | 1363 Register scratch, |
1364 Label* fail) { | 1364 Label* fail) { |
1365 ASSERT(kNotStringTag != 0); | 1365 DCHECK(kNotStringTag != 0); |
1366 | 1366 |
1367 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1367 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1368 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1368 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1369 tst(scratch, Operand(kIsNotStringMask)); | 1369 tst(scratch, Operand(kIsNotStringMask)); |
1370 b(ne, fail); | 1370 b(ne, fail); |
1371 } | 1371 } |
1372 | 1372 |
1373 | 1373 |
1374 void MacroAssembler::IsObjectNameType(Register object, | 1374 void MacroAssembler::IsObjectNameType(Register object, |
1375 Register scratch, | 1375 Register scratch, |
1376 Label* fail) { | 1376 Label* fail) { |
1377 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1377 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1378 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1378 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1379 cmp(scratch, Operand(LAST_NAME_TYPE)); | 1379 cmp(scratch, Operand(LAST_NAME_TYPE)); |
1380 b(hi, fail); | 1380 b(hi, fail); |
1381 } | 1381 } |
1382 | 1382 |
1383 | 1383 |
1384 void MacroAssembler::DebugBreak() { | 1384 void MacroAssembler::DebugBreak() { |
1385 mov(r0, Operand::Zero()); | 1385 mov(r0, Operand::Zero()); |
1386 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); | 1386 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
1387 CEntryStub ces(isolate(), 1); | 1387 CEntryStub ces(isolate(), 1); |
1388 ASSERT(AllowThisStubCall(&ces)); | 1388 DCHECK(AllowThisStubCall(&ces)); |
1389 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 1389 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
1390 } | 1390 } |
1391 | 1391 |
1392 | 1392 |
1393 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | 1393 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
1394 int handler_index) { | 1394 int handler_index) { |
1395 // Adjust this code if not the case. | 1395 // Adjust this code if not the case. |
1396 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 1396 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
1397 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1397 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1398 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 1398 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1526 | 1526 |
1527 JumpToHandlerEntry(); | 1527 JumpToHandlerEntry(); |
1528 } | 1528 } |
1529 | 1529 |
1530 | 1530 |
1531 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1531 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
1532 Register scratch, | 1532 Register scratch, |
1533 Label* miss) { | 1533 Label* miss) { |
1534 Label same_contexts; | 1534 Label same_contexts; |
1535 | 1535 |
1536 ASSERT(!holder_reg.is(scratch)); | 1536 DCHECK(!holder_reg.is(scratch)); |
1537 ASSERT(!holder_reg.is(ip)); | 1537 DCHECK(!holder_reg.is(ip)); |
1538 ASSERT(!scratch.is(ip)); | 1538 DCHECK(!scratch.is(ip)); |
1539 | 1539 |
1540 // Load current lexical context from the stack frame. | 1540 // Load current lexical context from the stack frame. |
1541 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1541 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1542 // In debug mode, make sure the lexical context is set. | 1542 // In debug mode, make sure the lexical context is set. |
1543 #ifdef DEBUG | 1543 #ifdef DEBUG |
1544 cmp(scratch, Operand::Zero()); | 1544 cmp(scratch, Operand::Zero()); |
1545 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 1545 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
1546 #endif | 1546 #endif |
1547 | 1547 |
1548 // Load the native context of the current context. | 1548 // Load the native context of the current context. |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1676 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 1676 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
1677 // Use t2 for index calculations and keep the hash intact in t0. | 1677 // Use t2 for index calculations and keep the hash intact in t0. |
1678 mov(t2, t0); | 1678 mov(t2, t0); |
1679 // Compute the masked index: (hash + i + i * i) & mask. | 1679 // Compute the masked index: (hash + i + i * i) & mask. |
1680 if (i > 0) { | 1680 if (i > 0) { |
1681 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1681 add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
1682 } | 1682 } |
1683 and_(t2, t2, Operand(t1)); | 1683 and_(t2, t2, Operand(t1)); |
1684 | 1684 |
1685 // Scale the index by multiplying by the element size. | 1685 // Scale the index by multiplying by the element size. |
1686 ASSERT(SeededNumberDictionary::kEntrySize == 3); | 1686 DCHECK(SeededNumberDictionary::kEntrySize == 3); |
1687 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 | 1687 add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 |
1688 | 1688 |
1689 // Check if the key is identical to the name. | 1689 // Check if the key is identical to the name. |
1690 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); | 1690 add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); |
1691 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); | 1691 ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); |
1692 cmp(key, Operand(ip)); | 1692 cmp(key, Operand(ip)); |
1693 if (i != kNumberDictionaryProbes - 1) { | 1693 if (i != kNumberDictionaryProbes - 1) { |
1694 b(eq, &done); | 1694 b(eq, &done); |
1695 } else { | 1695 } else { |
1696 b(ne, miss); | 1696 b(ne, miss); |
(...skipping 15 matching lines...) Expand all Loading... |
1712 ldr(result, FieldMemOperand(t2, kValueOffset)); | 1712 ldr(result, FieldMemOperand(t2, kValueOffset)); |
1713 } | 1713 } |
1714 | 1714 |
1715 | 1715 |
1716 void MacroAssembler::Allocate(int object_size, | 1716 void MacroAssembler::Allocate(int object_size, |
1717 Register result, | 1717 Register result, |
1718 Register scratch1, | 1718 Register scratch1, |
1719 Register scratch2, | 1719 Register scratch2, |
1720 Label* gc_required, | 1720 Label* gc_required, |
1721 AllocationFlags flags) { | 1721 AllocationFlags flags) { |
1722 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 1722 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
1723 if (!FLAG_inline_new) { | 1723 if (!FLAG_inline_new) { |
1724 if (emit_debug_code()) { | 1724 if (emit_debug_code()) { |
1725 // Trash the registers to simulate an allocation failure. | 1725 // Trash the registers to simulate an allocation failure. |
1726 mov(result, Operand(0x7091)); | 1726 mov(result, Operand(0x7091)); |
1727 mov(scratch1, Operand(0x7191)); | 1727 mov(scratch1, Operand(0x7191)); |
1728 mov(scratch2, Operand(0x7291)); | 1728 mov(scratch2, Operand(0x7291)); |
1729 } | 1729 } |
1730 jmp(gc_required); | 1730 jmp(gc_required); |
1731 return; | 1731 return; |
1732 } | 1732 } |
1733 | 1733 |
1734 ASSERT(!result.is(scratch1)); | 1734 DCHECK(!result.is(scratch1)); |
1735 ASSERT(!result.is(scratch2)); | 1735 DCHECK(!result.is(scratch2)); |
1736 ASSERT(!scratch1.is(scratch2)); | 1736 DCHECK(!scratch1.is(scratch2)); |
1737 ASSERT(!scratch1.is(ip)); | 1737 DCHECK(!scratch1.is(ip)); |
1738 ASSERT(!scratch2.is(ip)); | 1738 DCHECK(!scratch2.is(ip)); |
1739 | 1739 |
1740 // Make object size into bytes. | 1740 // Make object size into bytes. |
1741 if ((flags & SIZE_IN_WORDS) != 0) { | 1741 if ((flags & SIZE_IN_WORDS) != 0) { |
1742 object_size *= kPointerSize; | 1742 object_size *= kPointerSize; |
1743 } | 1743 } |
1744 ASSERT_EQ(0, object_size & kObjectAlignmentMask); | 1744 DCHECK_EQ(0, object_size & kObjectAlignmentMask); |
1745 | 1745 |
1746 // Check relative positions of allocation top and limit addresses. | 1746 // Check relative positions of allocation top and limit addresses. |
1747 // The values must be adjacent in memory to allow the use of LDM. | 1747 // The values must be adjacent in memory to allow the use of LDM. |
1748 // Also, assert that the registers are numbered such that the values | 1748 // Also, assert that the registers are numbered such that the values |
1749 // are loaded in the correct order. | 1749 // are loaded in the correct order. |
1750 ExternalReference allocation_top = | 1750 ExternalReference allocation_top = |
1751 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1751 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1752 ExternalReference allocation_limit = | 1752 ExternalReference allocation_limit = |
1753 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1753 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1754 | 1754 |
1755 intptr_t top = | 1755 intptr_t top = |
1756 reinterpret_cast<intptr_t>(allocation_top.address()); | 1756 reinterpret_cast<intptr_t>(allocation_top.address()); |
1757 intptr_t limit = | 1757 intptr_t limit = |
1758 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1758 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1759 ASSERT((limit - top) == kPointerSize); | 1759 DCHECK((limit - top) == kPointerSize); |
1760 ASSERT(result.code() < ip.code()); | 1760 DCHECK(result.code() < ip.code()); |
1761 | 1761 |
1762 // Set up allocation top address register. | 1762 // Set up allocation top address register. |
1763 Register topaddr = scratch1; | 1763 Register topaddr = scratch1; |
1764 mov(topaddr, Operand(allocation_top)); | 1764 mov(topaddr, Operand(allocation_top)); |
1765 | 1765 |
1766 // This code stores a temporary value in ip. This is OK, as the code below | 1766 // This code stores a temporary value in ip. This is OK, as the code below |
1767 // does not need ip for implicit literal generation. | 1767 // does not need ip for implicit literal generation. |
1768 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1768 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1769 // Load allocation top into result and allocation limit into ip. | 1769 // Load allocation top into result and allocation limit into ip. |
1770 ldm(ia, topaddr, result.bit() | ip.bit()); | 1770 ldm(ia, topaddr, result.bit() | ip.bit()); |
1771 } else { | 1771 } else { |
1772 if (emit_debug_code()) { | 1772 if (emit_debug_code()) { |
1773 // Assert that result actually contains top on entry. ip is used | 1773 // Assert that result actually contains top on entry. ip is used |
1774 // immediately below so this use of ip does not cause difference with | 1774 // immediately below so this use of ip does not cause difference with |
1775 // respect to register content between debug and release mode. | 1775 // respect to register content between debug and release mode. |
1776 ldr(ip, MemOperand(topaddr)); | 1776 ldr(ip, MemOperand(topaddr)); |
1777 cmp(result, ip); | 1777 cmp(result, ip); |
1778 Check(eq, kUnexpectedAllocationTop); | 1778 Check(eq, kUnexpectedAllocationTop); |
1779 } | 1779 } |
1780 // Load allocation limit into ip. Result already contains allocation top. | 1780 // Load allocation limit into ip. Result already contains allocation top. |
1781 ldr(ip, MemOperand(topaddr, limit - top)); | 1781 ldr(ip, MemOperand(topaddr, limit - top)); |
1782 } | 1782 } |
1783 | 1783 |
1784 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1784 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1785 // Align the next allocation. Storing the filler map without checking top is | 1785 // Align the next allocation. Storing the filler map without checking top is |
1786 // safe in new-space because the limit of the heap is aligned there. | 1786 // safe in new-space because the limit of the heap is aligned there. |
1787 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1787 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1788 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1788 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
1789 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1789 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); |
1790 Label aligned; | 1790 Label aligned; |
1791 b(eq, &aligned); | 1791 b(eq, &aligned); |
1792 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1792 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1793 cmp(result, Operand(ip)); | 1793 cmp(result, Operand(ip)); |
1794 b(hs, gc_required); | 1794 b(hs, gc_required); |
1795 } | 1795 } |
1796 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1796 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1797 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1797 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); |
1798 bind(&aligned); | 1798 bind(&aligned); |
1799 } | 1799 } |
1800 | 1800 |
1801 // Calculate new top and bail out if new space is exhausted. Use result | 1801 // Calculate new top and bail out if new space is exhausted. Use result |
1802 // to calculate the new top. We must preserve the ip register at this | 1802 // to calculate the new top. We must preserve the ip register at this |
1803 // point, so we cannot just use add(). | 1803 // point, so we cannot just use add(). |
1804 ASSERT(object_size > 0); | 1804 DCHECK(object_size > 0); |
1805 Register source = result; | 1805 Register source = result; |
1806 Condition cond = al; | 1806 Condition cond = al; |
1807 int shift = 0; | 1807 int shift = 0; |
1808 while (object_size != 0) { | 1808 while (object_size != 0) { |
1809 if (((object_size >> shift) & 0x03) == 0) { | 1809 if (((object_size >> shift) & 0x03) == 0) { |
1810 shift += 2; | 1810 shift += 2; |
1811 } else { | 1811 } else { |
1812 int bits = object_size & (0xff << shift); | 1812 int bits = object_size & (0xff << shift); |
1813 object_size -= bits; | 1813 object_size -= bits; |
1814 shift += 8; | 1814 shift += 8; |
1815 Operand bits_operand(bits); | 1815 Operand bits_operand(bits); |
1816 ASSERT(bits_operand.instructions_required(this) == 1); | 1816 DCHECK(bits_operand.instructions_required(this) == 1); |
1817 add(scratch2, source, bits_operand, SetCC, cond); | 1817 add(scratch2, source, bits_operand, SetCC, cond); |
1818 source = scratch2; | 1818 source = scratch2; |
1819 cond = cc; | 1819 cond = cc; |
1820 } | 1820 } |
1821 } | 1821 } |
1822 b(cs, gc_required); | 1822 b(cs, gc_required); |
1823 cmp(scratch2, Operand(ip)); | 1823 cmp(scratch2, Operand(ip)); |
1824 b(hi, gc_required); | 1824 b(hi, gc_required); |
1825 str(scratch2, MemOperand(topaddr)); | 1825 str(scratch2, MemOperand(topaddr)); |
1826 | 1826 |
(...skipping 16 matching lines...) Expand all Loading... |
1843 mov(result, Operand(0x7091)); | 1843 mov(result, Operand(0x7091)); |
1844 mov(scratch1, Operand(0x7191)); | 1844 mov(scratch1, Operand(0x7191)); |
1845 mov(scratch2, Operand(0x7291)); | 1845 mov(scratch2, Operand(0x7291)); |
1846 } | 1846 } |
1847 jmp(gc_required); | 1847 jmp(gc_required); |
1848 return; | 1848 return; |
1849 } | 1849 } |
1850 | 1850 |
1851 // Assert that the register arguments are different and that none of | 1851 // Assert that the register arguments are different and that none of |
1852 // them are ip. ip is used explicitly in the code generated below. | 1852 // them are ip. ip is used explicitly in the code generated below. |
1853 ASSERT(!result.is(scratch1)); | 1853 DCHECK(!result.is(scratch1)); |
1854 ASSERT(!result.is(scratch2)); | 1854 DCHECK(!result.is(scratch2)); |
1855 ASSERT(!scratch1.is(scratch2)); | 1855 DCHECK(!scratch1.is(scratch2)); |
1856 ASSERT(!object_size.is(ip)); | 1856 DCHECK(!object_size.is(ip)); |
1857 ASSERT(!result.is(ip)); | 1857 DCHECK(!result.is(ip)); |
1858 ASSERT(!scratch1.is(ip)); | 1858 DCHECK(!scratch1.is(ip)); |
1859 ASSERT(!scratch2.is(ip)); | 1859 DCHECK(!scratch2.is(ip)); |
1860 | 1860 |
1861 // Check relative positions of allocation top and limit addresses. | 1861 // Check relative positions of allocation top and limit addresses. |
1862 // The values must be adjacent in memory to allow the use of LDM. | 1862 // The values must be adjacent in memory to allow the use of LDM. |
1863 // Also, assert that the registers are numbered such that the values | 1863 // Also, assert that the registers are numbered such that the values |
1864 // are loaded in the correct order. | 1864 // are loaded in the correct order. |
1865 ExternalReference allocation_top = | 1865 ExternalReference allocation_top = |
1866 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 1866 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
1867 ExternalReference allocation_limit = | 1867 ExternalReference allocation_limit = |
1868 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 1868 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
1869 intptr_t top = | 1869 intptr_t top = |
1870 reinterpret_cast<intptr_t>(allocation_top.address()); | 1870 reinterpret_cast<intptr_t>(allocation_top.address()); |
1871 intptr_t limit = | 1871 intptr_t limit = |
1872 reinterpret_cast<intptr_t>(allocation_limit.address()); | 1872 reinterpret_cast<intptr_t>(allocation_limit.address()); |
1873 ASSERT((limit - top) == kPointerSize); | 1873 DCHECK((limit - top) == kPointerSize); |
1874 ASSERT(result.code() < ip.code()); | 1874 DCHECK(result.code() < ip.code()); |
1875 | 1875 |
1876 // Set up allocation top address. | 1876 // Set up allocation top address. |
1877 Register topaddr = scratch1; | 1877 Register topaddr = scratch1; |
1878 mov(topaddr, Operand(allocation_top)); | 1878 mov(topaddr, Operand(allocation_top)); |
1879 | 1879 |
1880 // This code stores a temporary value in ip. This is OK, as the code below | 1880 // This code stores a temporary value in ip. This is OK, as the code below |
1881 // does not need ip for implicit literal generation. | 1881 // does not need ip for implicit literal generation. |
1882 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1882 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1883 // Load allocation top into result and allocation limit into ip. | 1883 // Load allocation top into result and allocation limit into ip. |
1884 ldm(ia, topaddr, result.bit() | ip.bit()); | 1884 ldm(ia, topaddr, result.bit() | ip.bit()); |
1885 } else { | 1885 } else { |
1886 if (emit_debug_code()) { | 1886 if (emit_debug_code()) { |
1887 // Assert that result actually contains top on entry. ip is used | 1887 // Assert that result actually contains top on entry. ip is used |
1888 // immediately below so this use of ip does not cause difference with | 1888 // immediately below so this use of ip does not cause difference with |
1889 // respect to register content between debug and release mode. | 1889 // respect to register content between debug and release mode. |
1890 ldr(ip, MemOperand(topaddr)); | 1890 ldr(ip, MemOperand(topaddr)); |
1891 cmp(result, ip); | 1891 cmp(result, ip); |
1892 Check(eq, kUnexpectedAllocationTop); | 1892 Check(eq, kUnexpectedAllocationTop); |
1893 } | 1893 } |
1894 // Load allocation limit into ip. Result already contains allocation top. | 1894 // Load allocation limit into ip. Result already contains allocation top. |
1895 ldr(ip, MemOperand(topaddr, limit - top)); | 1895 ldr(ip, MemOperand(topaddr, limit - top)); |
1896 } | 1896 } |
1897 | 1897 |
1898 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1898 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1899 // Align the next allocation. Storing the filler map without checking top is | 1899 // Align the next allocation. Storing the filler map without checking top is |
1900 // safe in new-space because the limit of the heap is aligned there. | 1900 // safe in new-space because the limit of the heap is aligned there. |
1901 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 1901 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
1902 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1902 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); |
1903 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); | 1903 and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); |
1904 Label aligned; | 1904 Label aligned; |
1905 b(eq, &aligned); | 1905 b(eq, &aligned); |
1906 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 1906 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
1907 cmp(result, Operand(ip)); | 1907 cmp(result, Operand(ip)); |
1908 b(hs, gc_required); | 1908 b(hs, gc_required); |
1909 } | 1909 } |
1910 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1910 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
1911 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); | 1911 str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); |
1912 bind(&aligned); | 1912 bind(&aligned); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1959 | 1959 |
1960 | 1960 |
1961 void MacroAssembler::AllocateTwoByteString(Register result, | 1961 void MacroAssembler::AllocateTwoByteString(Register result, |
1962 Register length, | 1962 Register length, |
1963 Register scratch1, | 1963 Register scratch1, |
1964 Register scratch2, | 1964 Register scratch2, |
1965 Register scratch3, | 1965 Register scratch3, |
1966 Label* gc_required) { | 1966 Label* gc_required) { |
1967 // Calculate the number of bytes needed for the characters in the string while | 1967 // Calculate the number of bytes needed for the characters in the string while |
1968 // observing object alignment. | 1968 // observing object alignment. |
1969 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1969 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1970 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. | 1970 mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars. |
1971 add(scratch1, scratch1, | 1971 add(scratch1, scratch1, |
1972 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); | 1972 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); |
1973 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 1973 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
1974 | 1974 |
1975 // Allocate two-byte string in new space. | 1975 // Allocate two-byte string in new space. |
1976 Allocate(scratch1, | 1976 Allocate(scratch1, |
1977 result, | 1977 result, |
1978 scratch2, | 1978 scratch2, |
1979 scratch3, | 1979 scratch3, |
(...skipping 10 matching lines...) Expand all Loading... |
1990 | 1990 |
1991 | 1991 |
1992 void MacroAssembler::AllocateAsciiString(Register result, | 1992 void MacroAssembler::AllocateAsciiString(Register result, |
1993 Register length, | 1993 Register length, |
1994 Register scratch1, | 1994 Register scratch1, |
1995 Register scratch2, | 1995 Register scratch2, |
1996 Register scratch3, | 1996 Register scratch3, |
1997 Label* gc_required) { | 1997 Label* gc_required) { |
1998 // Calculate the number of bytes needed for the characters in the string while | 1998 // Calculate the number of bytes needed for the characters in the string while |
1999 // observing object alignment. | 1999 // observing object alignment. |
2000 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 2000 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
2001 ASSERT(kCharSize == 1); | 2001 DCHECK(kCharSize == 1); |
2002 add(scratch1, length, | 2002 add(scratch1, length, |
2003 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); | 2003 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); |
2004 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); | 2004 and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
2005 | 2005 |
2006 // Allocate ASCII string in new space. | 2006 // Allocate ASCII string in new space. |
2007 Allocate(scratch1, | 2007 Allocate(scratch1, |
2008 result, | 2008 result, |
2009 scratch2, | 2009 scratch2, |
2010 scratch3, | 2010 scratch3, |
2011 gc_required, | 2011 gc_required, |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2122 // will never need ip). | 2122 // will never need ip). |
2123 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 2123 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
2124 STATIC_ASSERT(LAST_TYPE < 256); | 2124 STATIC_ASSERT(LAST_TYPE < 256); |
2125 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 2125 ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
2126 cmp(type_reg, Operand(type)); | 2126 cmp(type_reg, Operand(type)); |
2127 } | 2127 } |
2128 | 2128 |
2129 | 2129 |
2130 void MacroAssembler::CompareRoot(Register obj, | 2130 void MacroAssembler::CompareRoot(Register obj, |
2131 Heap::RootListIndex index) { | 2131 Heap::RootListIndex index) { |
2132 ASSERT(!obj.is(ip)); | 2132 DCHECK(!obj.is(ip)); |
2133 LoadRoot(ip, index); | 2133 LoadRoot(ip, index); |
2134 cmp(obj, ip); | 2134 cmp(obj, ip); |
2135 } | 2135 } |
2136 | 2136 |
2137 | 2137 |
2138 void MacroAssembler::CheckFastElements(Register map, | 2138 void MacroAssembler::CheckFastElements(Register map, |
2139 Register scratch, | 2139 Register scratch, |
2140 Label* fail) { | 2140 Label* fail) { |
2141 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 2141 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
2142 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 2142 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2336 } | 2336 } |
2337 | 2337 |
2338 // All done. | 2338 // All done. |
2339 bind(&done); | 2339 bind(&done); |
2340 } | 2340 } |
2341 | 2341 |
2342 | 2342 |
2343 void MacroAssembler::CallStub(CodeStub* stub, | 2343 void MacroAssembler::CallStub(CodeStub* stub, |
2344 TypeFeedbackId ast_id, | 2344 TypeFeedbackId ast_id, |
2345 Condition cond) { | 2345 Condition cond) { |
2346 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. | 2346 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. |
2347 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); | 2347 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); |
2348 } | 2348 } |
2349 | 2349 |
2350 | 2350 |
2351 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { | 2351 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { |
2352 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); | 2352 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); |
2353 } | 2353 } |
2354 | 2354 |
2355 | 2355 |
2356 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | 2356 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { |
(...skipping 10 matching lines...) Expand all Loading... |
2367 ExternalReference next_address = | 2367 ExternalReference next_address = |
2368 ExternalReference::handle_scope_next_address(isolate()); | 2368 ExternalReference::handle_scope_next_address(isolate()); |
2369 const int kNextOffset = 0; | 2369 const int kNextOffset = 0; |
2370 const int kLimitOffset = AddressOffset( | 2370 const int kLimitOffset = AddressOffset( |
2371 ExternalReference::handle_scope_limit_address(isolate()), | 2371 ExternalReference::handle_scope_limit_address(isolate()), |
2372 next_address); | 2372 next_address); |
2373 const int kLevelOffset = AddressOffset( | 2373 const int kLevelOffset = AddressOffset( |
2374 ExternalReference::handle_scope_level_address(isolate()), | 2374 ExternalReference::handle_scope_level_address(isolate()), |
2375 next_address); | 2375 next_address); |
2376 | 2376 |
2377 ASSERT(function_address.is(r1) || function_address.is(r2)); | 2377 DCHECK(function_address.is(r1) || function_address.is(r2)); |
2378 | 2378 |
2379 Label profiler_disabled; | 2379 Label profiler_disabled; |
2380 Label end_profiler_check; | 2380 Label end_profiler_check; |
2381 mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); | 2381 mov(r9, Operand(ExternalReference::is_profiling_address(isolate()))); |
2382 ldrb(r9, MemOperand(r9, 0)); | 2382 ldrb(r9, MemOperand(r9, 0)); |
2383 cmp(r9, Operand(0)); | 2383 cmp(r9, Operand(0)); |
2384 b(eq, &profiler_disabled); | 2384 b(eq, &profiler_disabled); |
2385 | 2385 |
2386 // Additional parameter is the address of the actual callback. | 2386 // Additional parameter is the address of the actual callback. |
2387 mov(r3, Operand(thunk_ref)); | 2387 mov(r3, Operand(thunk_ref)); |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2489 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 2489 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
2490 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 2490 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
2491 } | 2491 } |
2492 | 2492 |
2493 | 2493 |
2494 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 2494 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
2495 // If the hash field contains an array index pick it out. The assert checks | 2495 // If the hash field contains an array index pick it out. The assert checks |
2496 // that the constants for the maximum number of digits for an array index | 2496 // that the constants for the maximum number of digits for an array index |
2497 // cached in the hash field and the number of bits reserved for it does not | 2497 // cached in the hash field and the number of bits reserved for it does not |
2498 // conflict. | 2498 // conflict. |
2499 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 2499 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
2500 (1 << String::kArrayIndexValueBits)); | 2500 (1 << String::kArrayIndexValueBits)); |
2501 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); | 2501 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); |
2502 } | 2502 } |
2503 | 2503 |
2504 | 2504 |
2505 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { | 2505 void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) { |
2506 if (CpuFeatures::IsSupported(VFP3)) { | 2506 if (CpuFeatures::IsSupported(VFP3)) { |
2507 vmov(value.low(), smi); | 2507 vmov(value.low(), smi); |
2508 vcvt_f64_s32(value, 1); | 2508 vcvt_f64_s32(value, 1); |
2509 } else { | 2509 } else { |
2510 SmiUntag(ip, smi); | 2510 SmiUntag(ip, smi); |
2511 vmov(value.low(), ip); | 2511 vmov(value.low(), ip); |
2512 vcvt_f64_s32(value, value.low()); | 2512 vcvt_f64_s32(value, value.low()); |
2513 } | 2513 } |
2514 } | 2514 } |
2515 | 2515 |
2516 | 2516 |
2517 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, | 2517 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, |
2518 LowDwVfpRegister double_scratch) { | 2518 LowDwVfpRegister double_scratch) { |
2519 ASSERT(!double_input.is(double_scratch)); | 2519 DCHECK(!double_input.is(double_scratch)); |
2520 vcvt_s32_f64(double_scratch.low(), double_input); | 2520 vcvt_s32_f64(double_scratch.low(), double_input); |
2521 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2521 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2522 VFPCompareAndSetFlags(double_input, double_scratch); | 2522 VFPCompareAndSetFlags(double_input, double_scratch); |
2523 } | 2523 } |
2524 | 2524 |
2525 | 2525 |
2526 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2526 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2527 DwVfpRegister double_input, | 2527 DwVfpRegister double_input, |
2528 LowDwVfpRegister double_scratch) { | 2528 LowDwVfpRegister double_scratch) { |
2529 ASSERT(!double_input.is(double_scratch)); | 2529 DCHECK(!double_input.is(double_scratch)); |
2530 vcvt_s32_f64(double_scratch.low(), double_input); | 2530 vcvt_s32_f64(double_scratch.low(), double_input); |
2531 vmov(result, double_scratch.low()); | 2531 vmov(result, double_scratch.low()); |
2532 vcvt_f64_s32(double_scratch, double_scratch.low()); | 2532 vcvt_f64_s32(double_scratch, double_scratch.low()); |
2533 VFPCompareAndSetFlags(double_input, double_scratch); | 2533 VFPCompareAndSetFlags(double_input, double_scratch); |
2534 } | 2534 } |
2535 | 2535 |
2536 | 2536 |
2537 void MacroAssembler::TryInt32Floor(Register result, | 2537 void MacroAssembler::TryInt32Floor(Register result, |
2538 DwVfpRegister double_input, | 2538 DwVfpRegister double_input, |
2539 Register input_high, | 2539 Register input_high, |
2540 LowDwVfpRegister double_scratch, | 2540 LowDwVfpRegister double_scratch, |
2541 Label* done, | 2541 Label* done, |
2542 Label* exact) { | 2542 Label* exact) { |
2543 ASSERT(!result.is(input_high)); | 2543 DCHECK(!result.is(input_high)); |
2544 ASSERT(!double_input.is(double_scratch)); | 2544 DCHECK(!double_input.is(double_scratch)); |
2545 Label negative, exception; | 2545 Label negative, exception; |
2546 | 2546 |
2547 VmovHigh(input_high, double_input); | 2547 VmovHigh(input_high, double_input); |
2548 | 2548 |
2549 // Test for NaN and infinities. | 2549 // Test for NaN and infinities. |
2550 Sbfx(result, input_high, | 2550 Sbfx(result, input_high, |
2551 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 2551 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
2552 cmp(result, Operand(-1)); | 2552 cmp(result, Operand(-1)); |
2553 b(eq, &exception); | 2553 b(eq, &exception); |
2554 // Test for values that can be exactly represented as a | 2554 // Test for values that can be exactly represented as a |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2612 pop(lr); | 2612 pop(lr); |
2613 | 2613 |
2614 bind(&done); | 2614 bind(&done); |
2615 } | 2615 } |
2616 | 2616 |
2617 | 2617 |
2618 void MacroAssembler::TruncateHeapNumberToI(Register result, | 2618 void MacroAssembler::TruncateHeapNumberToI(Register result, |
2619 Register object) { | 2619 Register object) { |
2620 Label done; | 2620 Label done; |
2621 LowDwVfpRegister double_scratch = kScratchDoubleReg; | 2621 LowDwVfpRegister double_scratch = kScratchDoubleReg; |
2622 ASSERT(!result.is(object)); | 2622 DCHECK(!result.is(object)); |
2623 | 2623 |
2624 vldr(double_scratch, | 2624 vldr(double_scratch, |
2625 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); | 2625 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); |
2626 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2626 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2627 | 2627 |
2628 // If we fell through then inline version didn't succeed - call stub instead. | 2628 // If we fell through then inline version didn't succeed - call stub instead. |
2629 push(lr); | 2629 push(lr); |
2630 DoubleToIStub stub(isolate(), | 2630 DoubleToIStub stub(isolate(), |
2631 object, | 2631 object, |
2632 result, | 2632 result, |
2633 HeapNumber::kValueOffset - kHeapObjectTag, | 2633 HeapNumber::kValueOffset - kHeapObjectTag, |
2634 true, | 2634 true, |
2635 true); | 2635 true); |
2636 CallStub(&stub); | 2636 CallStub(&stub); |
2637 pop(lr); | 2637 pop(lr); |
2638 | 2638 |
2639 bind(&done); | 2639 bind(&done); |
2640 } | 2640 } |
2641 | 2641 |
2642 | 2642 |
2643 void MacroAssembler::TruncateNumberToI(Register object, | 2643 void MacroAssembler::TruncateNumberToI(Register object, |
2644 Register result, | 2644 Register result, |
2645 Register heap_number_map, | 2645 Register heap_number_map, |
2646 Register scratch1, | 2646 Register scratch1, |
2647 Label* not_number) { | 2647 Label* not_number) { |
2648 Label done; | 2648 Label done; |
2649 ASSERT(!result.is(object)); | 2649 DCHECK(!result.is(object)); |
2650 | 2650 |
2651 UntagAndJumpIfSmi(result, object, &done); | 2651 UntagAndJumpIfSmi(result, object, &done); |
2652 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 2652 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
2653 TruncateHeapNumberToI(result, object); | 2653 TruncateHeapNumberToI(result, object); |
2654 | 2654 |
2655 bind(&done); | 2655 bind(&done); |
2656 } | 2656 } |
2657 | 2657 |
2658 | 2658 |
2659 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2659 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2723 int result_size) { | 2723 int result_size) { |
2724 TailCallExternalReference(ExternalReference(fid, isolate()), | 2724 TailCallExternalReference(ExternalReference(fid, isolate()), |
2725 num_arguments, | 2725 num_arguments, |
2726 result_size); | 2726 result_size); |
2727 } | 2727 } |
2728 | 2728 |
2729 | 2729 |
2730 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 2730 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
2731 #if defined(__thumb__) | 2731 #if defined(__thumb__) |
2732 // Thumb mode builtin. | 2732 // Thumb mode builtin. |
2733 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); | 2733 DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); |
2734 #endif | 2734 #endif |
2735 mov(r1, Operand(builtin)); | 2735 mov(r1, Operand(builtin)); |
2736 CEntryStub stub(isolate(), 1); | 2736 CEntryStub stub(isolate(), 1); |
2737 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2737 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
2738 } | 2738 } |
2739 | 2739 |
2740 | 2740 |
2741 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 2741 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
2742 InvokeFlag flag, | 2742 InvokeFlag flag, |
2743 const CallWrapper& call_wrapper) { | 2743 const CallWrapper& call_wrapper) { |
2744 // You can't call a builtin without a valid frame. | 2744 // You can't call a builtin without a valid frame. |
2745 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 2745 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
2746 | 2746 |
2747 GetBuiltinEntry(r2, id); | 2747 GetBuiltinEntry(r2, id); |
2748 if (flag == CALL_FUNCTION) { | 2748 if (flag == CALL_FUNCTION) { |
2749 call_wrapper.BeforeCall(CallSize(r2)); | 2749 call_wrapper.BeforeCall(CallSize(r2)); |
2750 Call(r2); | 2750 Call(r2); |
2751 call_wrapper.AfterCall(); | 2751 call_wrapper.AfterCall(); |
2752 } else { | 2752 } else { |
2753 ASSERT(flag == JUMP_FUNCTION); | 2753 DCHECK(flag == JUMP_FUNCTION); |
2754 Jump(r2); | 2754 Jump(r2); |
2755 } | 2755 } |
2756 } | 2756 } |
2757 | 2757 |
2758 | 2758 |
2759 void MacroAssembler::GetBuiltinFunction(Register target, | 2759 void MacroAssembler::GetBuiltinFunction(Register target, |
2760 Builtins::JavaScript id) { | 2760 Builtins::JavaScript id) { |
2761 // Load the builtins object into target register. | 2761 // Load the builtins object into target register. |
2762 ldr(target, | 2762 ldr(target, |
2763 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2763 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2764 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | 2764 ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
2765 // Load the JavaScript builtin function from the builtins object. | 2765 // Load the JavaScript builtin function from the builtins object. |
2766 ldr(target, FieldMemOperand(target, | 2766 ldr(target, FieldMemOperand(target, |
2767 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 2767 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
2768 } | 2768 } |
2769 | 2769 |
2770 | 2770 |
2771 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 2771 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
2772 ASSERT(!target.is(r1)); | 2772 DCHECK(!target.is(r1)); |
2773 GetBuiltinFunction(r1, id); | 2773 GetBuiltinFunction(r1, id); |
2774 // Load the code entry point from the builtins object. | 2774 // Load the code entry point from the builtins object. |
2775 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2775 ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
2776 } | 2776 } |
2777 | 2777 |
2778 | 2778 |
2779 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 2779 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
2780 Register scratch1, Register scratch2) { | 2780 Register scratch1, Register scratch2) { |
2781 if (FLAG_native_code_counters && counter->Enabled()) { | 2781 if (FLAG_native_code_counters && counter->Enabled()) { |
2782 mov(scratch1, Operand(value)); | 2782 mov(scratch1, Operand(value)); |
2783 mov(scratch2, Operand(ExternalReference(counter))); | 2783 mov(scratch2, Operand(ExternalReference(counter))); |
2784 str(scratch1, MemOperand(scratch2)); | 2784 str(scratch1, MemOperand(scratch2)); |
2785 } | 2785 } |
2786 } | 2786 } |
2787 | 2787 |
2788 | 2788 |
2789 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 2789 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
2790 Register scratch1, Register scratch2) { | 2790 Register scratch1, Register scratch2) { |
2791 ASSERT(value > 0); | 2791 DCHECK(value > 0); |
2792 if (FLAG_native_code_counters && counter->Enabled()) { | 2792 if (FLAG_native_code_counters && counter->Enabled()) { |
2793 mov(scratch2, Operand(ExternalReference(counter))); | 2793 mov(scratch2, Operand(ExternalReference(counter))); |
2794 ldr(scratch1, MemOperand(scratch2)); | 2794 ldr(scratch1, MemOperand(scratch2)); |
2795 add(scratch1, scratch1, Operand(value)); | 2795 add(scratch1, scratch1, Operand(value)); |
2796 str(scratch1, MemOperand(scratch2)); | 2796 str(scratch1, MemOperand(scratch2)); |
2797 } | 2797 } |
2798 } | 2798 } |
2799 | 2799 |
2800 | 2800 |
2801 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 2801 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
2802 Register scratch1, Register scratch2) { | 2802 Register scratch1, Register scratch2) { |
2803 ASSERT(value > 0); | 2803 DCHECK(value > 0); |
2804 if (FLAG_native_code_counters && counter->Enabled()) { | 2804 if (FLAG_native_code_counters && counter->Enabled()) { |
2805 mov(scratch2, Operand(ExternalReference(counter))); | 2805 mov(scratch2, Operand(ExternalReference(counter))); |
2806 ldr(scratch1, MemOperand(scratch2)); | 2806 ldr(scratch1, MemOperand(scratch2)); |
2807 sub(scratch1, scratch1, Operand(value)); | 2807 sub(scratch1, scratch1, Operand(value)); |
2808 str(scratch1, MemOperand(scratch2)); | 2808 str(scratch1, MemOperand(scratch2)); |
2809 } | 2809 } |
2810 } | 2810 } |
2811 | 2811 |
2812 | 2812 |
2813 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { | 2813 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { |
2814 if (emit_debug_code()) | 2814 if (emit_debug_code()) |
2815 Check(cond, reason); | 2815 Check(cond, reason); |
2816 } | 2816 } |
2817 | 2817 |
2818 | 2818 |
2819 void MacroAssembler::AssertFastElements(Register elements) { | 2819 void MacroAssembler::AssertFastElements(Register elements) { |
2820 if (emit_debug_code()) { | 2820 if (emit_debug_code()) { |
2821 ASSERT(!elements.is(ip)); | 2821 DCHECK(!elements.is(ip)); |
2822 Label ok; | 2822 Label ok; |
2823 push(elements); | 2823 push(elements); |
2824 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 2824 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
2825 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 2825 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
2826 cmp(elements, ip); | 2826 cmp(elements, ip); |
2827 b(eq, &ok); | 2827 b(eq, &ok); |
2828 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); | 2828 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); |
2829 cmp(elements, ip); | 2829 cmp(elements, ip); |
2830 b(eq, &ok); | 2830 b(eq, &ok); |
2831 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); | 2831 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2875 } else { | 2875 } else { |
2876 CallRuntime(Runtime::kAbort, 1); | 2876 CallRuntime(Runtime::kAbort, 1); |
2877 } | 2877 } |
2878 // will not return here | 2878 // will not return here |
2879 if (is_const_pool_blocked()) { | 2879 if (is_const_pool_blocked()) { |
2880 // If the calling code cares about the exact number of | 2880 // If the calling code cares about the exact number of |
2881 // instructions generated, we insert padding here to keep the size | 2881 // instructions generated, we insert padding here to keep the size |
2882 // of the Abort macro constant. | 2882 // of the Abort macro constant. |
2883 static const int kExpectedAbortInstructions = 7; | 2883 static const int kExpectedAbortInstructions = 7; |
2884 int abort_instructions = InstructionsGeneratedSince(&abort_start); | 2884 int abort_instructions = InstructionsGeneratedSince(&abort_start); |
2885 ASSERT(abort_instructions <= kExpectedAbortInstructions); | 2885 DCHECK(abort_instructions <= kExpectedAbortInstructions); |
2886 while (abort_instructions++ < kExpectedAbortInstructions) { | 2886 while (abort_instructions++ < kExpectedAbortInstructions) { |
2887 nop(); | 2887 nop(); |
2888 } | 2888 } |
2889 } | 2889 } |
2890 } | 2890 } |
2891 | 2891 |
2892 | 2892 |
2893 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2893 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
2894 if (context_chain_length > 0) { | 2894 if (context_chain_length > 0) { |
2895 // Move up the chain of contexts to the context containing the slot. | 2895 // Move up the chain of contexts to the context containing the slot. |
(...skipping 586 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3482 int num_double_arguments, | 3482 int num_double_arguments, |
3483 Register scratch) { | 3483 Register scratch) { |
3484 int frame_alignment = ActivationFrameAlignment(); | 3484 int frame_alignment = ActivationFrameAlignment(); |
3485 int stack_passed_arguments = CalculateStackPassedWords( | 3485 int stack_passed_arguments = CalculateStackPassedWords( |
3486 num_reg_arguments, num_double_arguments); | 3486 num_reg_arguments, num_double_arguments); |
3487 if (frame_alignment > kPointerSize) { | 3487 if (frame_alignment > kPointerSize) { |
3488 // Make stack end at alignment and make room for num_arguments - 4 words | 3488 // Make stack end at alignment and make room for num_arguments - 4 words |
3489 // and the original value of sp. | 3489 // and the original value of sp. |
3490 mov(scratch, sp); | 3490 mov(scratch, sp); |
3491 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 3491 sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
3492 ASSERT(IsPowerOf2(frame_alignment)); | 3492 DCHECK(IsPowerOf2(frame_alignment)); |
3493 and_(sp, sp, Operand(-frame_alignment)); | 3493 and_(sp, sp, Operand(-frame_alignment)); |
3494 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3494 str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
3495 } else { | 3495 } else { |
3496 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 3496 sub(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
3497 } | 3497 } |
3498 } | 3498 } |
3499 | 3499 |
3500 | 3500 |
3501 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3501 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3502 Register scratch) { | 3502 Register scratch) { |
3503 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3503 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3504 } | 3504 } |
3505 | 3505 |
3506 | 3506 |
3507 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { | 3507 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { |
3508 ASSERT(src.is(d0)); | 3508 DCHECK(src.is(d0)); |
3509 if (!use_eabi_hardfloat()) { | 3509 if (!use_eabi_hardfloat()) { |
3510 vmov(r0, r1, src); | 3510 vmov(r0, r1, src); |
3511 } | 3511 } |
3512 } | 3512 } |
3513 | 3513 |
3514 | 3514 |
3515 // On ARM this is just a synonym to make the purpose clear. | 3515 // On ARM this is just a synonym to make the purpose clear. |
3516 void MacroAssembler::MovToFloatResult(DwVfpRegister src) { | 3516 void MacroAssembler::MovToFloatResult(DwVfpRegister src) { |
3517 MovToFloatParameter(src); | 3517 MovToFloatParameter(src); |
3518 } | 3518 } |
3519 | 3519 |
3520 | 3520 |
3521 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, | 3521 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, |
3522 DwVfpRegister src2) { | 3522 DwVfpRegister src2) { |
3523 ASSERT(src1.is(d0)); | 3523 DCHECK(src1.is(d0)); |
3524 ASSERT(src2.is(d1)); | 3524 DCHECK(src2.is(d1)); |
3525 if (!use_eabi_hardfloat()) { | 3525 if (!use_eabi_hardfloat()) { |
3526 vmov(r0, r1, src1); | 3526 vmov(r0, r1, src1); |
3527 vmov(r2, r3, src2); | 3527 vmov(r2, r3, src2); |
3528 } | 3528 } |
3529 } | 3529 } |
3530 | 3530 |
3531 | 3531 |
3532 void MacroAssembler::CallCFunction(ExternalReference function, | 3532 void MacroAssembler::CallCFunction(ExternalReference function, |
3533 int num_reg_arguments, | 3533 int num_reg_arguments, |
3534 int num_double_arguments) { | 3534 int num_double_arguments) { |
(...skipping 17 matching lines...) Expand all Loading... |
3552 | 3552 |
3553 void MacroAssembler::CallCFunction(Register function, | 3553 void MacroAssembler::CallCFunction(Register function, |
3554 int num_arguments) { | 3554 int num_arguments) { |
3555 CallCFunction(function, num_arguments, 0); | 3555 CallCFunction(function, num_arguments, 0); |
3556 } | 3556 } |
3557 | 3557 |
3558 | 3558 |
3559 void MacroAssembler::CallCFunctionHelper(Register function, | 3559 void MacroAssembler::CallCFunctionHelper(Register function, |
3560 int num_reg_arguments, | 3560 int num_reg_arguments, |
3561 int num_double_arguments) { | 3561 int num_double_arguments) { |
3562 ASSERT(has_frame()); | 3562 DCHECK(has_frame()); |
3563 // Make sure that the stack is aligned before calling a C function unless | 3563 // Make sure that the stack is aligned before calling a C function unless |
3564 // running in the simulator. The simulator has its own alignment check which | 3564 // running in the simulator. The simulator has its own alignment check which |
3565 // provides more information. | 3565 // provides more information. |
3566 #if V8_HOST_ARCH_ARM | 3566 #if V8_HOST_ARCH_ARM |
3567 if (emit_debug_code()) { | 3567 if (emit_debug_code()) { |
3568 int frame_alignment = base::OS::ActivationFrameAlignment(); | 3568 int frame_alignment = base::OS::ActivationFrameAlignment(); |
3569 int frame_alignment_mask = frame_alignment - 1; | 3569 int frame_alignment_mask = frame_alignment - 1; |
3570 if (frame_alignment > kPointerSize) { | 3570 if (frame_alignment > kPointerSize) { |
3571 ASSERT(IsPowerOf2(frame_alignment)); | 3571 DCHECK(IsPowerOf2(frame_alignment)); |
3572 Label alignment_as_expected; | 3572 Label alignment_as_expected; |
3573 tst(sp, Operand(frame_alignment_mask)); | 3573 tst(sp, Operand(frame_alignment_mask)); |
3574 b(eq, &alignment_as_expected); | 3574 b(eq, &alignment_as_expected); |
3575 // Don't use Check here, as it will call Runtime_Abort possibly | 3575 // Don't use Check here, as it will call Runtime_Abort possibly |
3576 // re-entering here. | 3576 // re-entering here. |
3577 stop("Unexpected alignment"); | 3577 stop("Unexpected alignment"); |
3578 bind(&alignment_as_expected); | 3578 bind(&alignment_as_expected); |
3579 } | 3579 } |
3580 } | 3580 } |
3581 #endif | 3581 #endif |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3686 b(ne, if_deprecated); | 3686 b(ne, if_deprecated); |
3687 } | 3687 } |
3688 } | 3688 } |
3689 | 3689 |
3690 | 3690 |
3691 void MacroAssembler::JumpIfBlack(Register object, | 3691 void MacroAssembler::JumpIfBlack(Register object, |
3692 Register scratch0, | 3692 Register scratch0, |
3693 Register scratch1, | 3693 Register scratch1, |
3694 Label* on_black) { | 3694 Label* on_black) { |
3695 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | 3695 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
3696 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3696 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3697 } | 3697 } |
3698 | 3698 |
3699 | 3699 |
3700 void MacroAssembler::HasColor(Register object, | 3700 void MacroAssembler::HasColor(Register object, |
3701 Register bitmap_scratch, | 3701 Register bitmap_scratch, |
3702 Register mask_scratch, | 3702 Register mask_scratch, |
3703 Label* has_color, | 3703 Label* has_color, |
3704 int first_bit, | 3704 int first_bit, |
3705 int second_bit) { | 3705 int second_bit) { |
3706 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); | 3706 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); |
3707 | 3707 |
3708 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3708 GetMarkBits(object, bitmap_scratch, mask_scratch); |
3709 | 3709 |
3710 Label other_color, word_boundary; | 3710 Label other_color, word_boundary; |
3711 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3711 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3712 tst(ip, Operand(mask_scratch)); | 3712 tst(ip, Operand(mask_scratch)); |
3713 b(first_bit == 1 ? eq : ne, &other_color); | 3713 b(first_bit == 1 ? eq : ne, &other_color); |
3714 // Shift left 1 by adding. | 3714 // Shift left 1 by adding. |
3715 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); | 3715 add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); |
3716 b(eq, &word_boundary); | 3716 b(eq, &word_boundary); |
(...skipping 12 matching lines...) Expand all Loading... |
3729 // Detect some, but not all, common pointer-free objects. This is used by the | 3729 // Detect some, but not all, common pointer-free objects. This is used by the |
3730 // incremental write barrier which doesn't care about oddballs (they are always | 3730 // incremental write barrier which doesn't care about oddballs (they are always |
3731 // marked black immediately so this code is not hit). | 3731 // marked black immediately so this code is not hit). |
3732 void MacroAssembler::JumpIfDataObject(Register value, | 3732 void MacroAssembler::JumpIfDataObject(Register value, |
3733 Register scratch, | 3733 Register scratch, |
3734 Label* not_data_object) { | 3734 Label* not_data_object) { |
3735 Label is_data_object; | 3735 Label is_data_object; |
3736 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 3736 ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
3737 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | 3737 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
3738 b(eq, &is_data_object); | 3738 b(eq, &is_data_object); |
3739 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3739 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3740 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3740 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3741 // If it's a string and it's not a cons string then it's an object containing | 3741 // If it's a string and it's not a cons string then it's an object containing |
3742 // no GC pointers. | 3742 // no GC pointers. |
3743 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 3743 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
3744 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 3744 tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
3745 b(ne, not_data_object); | 3745 b(ne, not_data_object); |
3746 bind(&is_data_object); | 3746 bind(&is_data_object); |
3747 } | 3747 } |
3748 | 3748 |
3749 | 3749 |
3750 void MacroAssembler::GetMarkBits(Register addr_reg, | 3750 void MacroAssembler::GetMarkBits(Register addr_reg, |
3751 Register bitmap_reg, | 3751 Register bitmap_reg, |
3752 Register mask_reg) { | 3752 Register mask_reg) { |
3753 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); | 3753 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
3754 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); | 3754 and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); |
3755 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | 3755 Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
3756 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 3756 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
3757 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); | 3757 Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); |
3758 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); | 3758 add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); |
3759 mov(ip, Operand(1)); | 3759 mov(ip, Operand(1)); |
3760 mov(mask_reg, Operand(ip, LSL, mask_reg)); | 3760 mov(mask_reg, Operand(ip, LSL, mask_reg)); |
3761 } | 3761 } |
3762 | 3762 |
3763 | 3763 |
3764 void MacroAssembler::EnsureNotWhite( | 3764 void MacroAssembler::EnsureNotWhite( |
3765 Register value, | 3765 Register value, |
3766 Register bitmap_scratch, | 3766 Register bitmap_scratch, |
3767 Register mask_scratch, | 3767 Register mask_scratch, |
3768 Register load_scratch, | 3768 Register load_scratch, |
3769 Label* value_is_white_and_not_data) { | 3769 Label* value_is_white_and_not_data) { |
3770 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); | 3770 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); |
3771 GetMarkBits(value, bitmap_scratch, mask_scratch); | 3771 GetMarkBits(value, bitmap_scratch, mask_scratch); |
3772 | 3772 |
3773 // If the value is black or grey we don't need to do anything. | 3773 // If the value is black or grey we don't need to do anything. |
3774 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 3774 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
3775 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 3775 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
3776 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | 3776 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
3777 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 3777 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
3778 | 3778 |
3779 Label done; | 3779 Label done; |
3780 | 3780 |
3781 // Since both black and grey have a 1 in the first position and white does | 3781 // Since both black and grey have a 1 in the first position and white does |
3782 // not have a 1 there we only need to check one bit. | 3782 // not have a 1 there we only need to check one bit. |
3783 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3783 ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3784 tst(mask_scratch, load_scratch); | 3784 tst(mask_scratch, load_scratch); |
3785 b(ne, &done); | 3785 b(ne, &done); |
3786 | 3786 |
3787 if (emit_debug_code()) { | 3787 if (emit_debug_code()) { |
(...skipping 12 matching lines...) Expand all Loading... |
3800 Register length = load_scratch; // Holds length of object after testing type. | 3800 Register length = load_scratch; // Holds length of object after testing type. |
3801 Label is_data_object; | 3801 Label is_data_object; |
3802 | 3802 |
3803 // Check for heap-number | 3803 // Check for heap-number |
3804 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | 3804 ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
3805 CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 3805 CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
3806 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); | 3806 mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); |
3807 b(eq, &is_data_object); | 3807 b(eq, &is_data_object); |
3808 | 3808 |
3809 // Check for strings. | 3809 // Check for strings. |
3810 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 3810 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
3811 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 3811 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
3812 // If it's a string and it's not a cons string then it's an object containing | 3812 // If it's a string and it's not a cons string then it's an object containing |
3813 // no GC pointers. | 3813 // no GC pointers. |
3814 Register instance_type = load_scratch; | 3814 Register instance_type = load_scratch; |
3815 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 3815 ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
3816 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); | 3816 tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
3817 b(ne, value_is_white_and_not_data); | 3817 b(ne, value_is_white_and_not_data); |
3818 // It's a non-indirect (non-cons and non-slice) string. | 3818 // It's a non-indirect (non-cons and non-slice) string. |
3819 // If it's external, the length is just ExternalString::kSize. | 3819 // If it's external, the length is just ExternalString::kSize. |
3820 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 3820 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
3821 // External strings are the only ones with the kExternalStringTag bit | 3821 // External strings are the only ones with the kExternalStringTag bit |
3822 // set. | 3822 // set. |
3823 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | 3823 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); |
3824 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | 3824 DCHECK_EQ(0, kConsStringTag & kExternalStringTag); |
3825 tst(instance_type, Operand(kExternalStringTag)); | 3825 tst(instance_type, Operand(kExternalStringTag)); |
3826 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); | 3826 mov(length, Operand(ExternalString::kSize), LeaveCC, ne); |
3827 b(ne, &is_data_object); | 3827 b(ne, &is_data_object); |
3828 | 3828 |
3829 // Sequential string, either ASCII or UC16. | 3829 // Sequential string, either ASCII or UC16. |
3830 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | 3830 // For ASCII (char-size of 1) we shift the smi tag away to get the length. |
3831 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | 3831 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby |
3832 // getting the length multiplied by 2. | 3832 // getting the length multiplied by 2. |
3833 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); | 3833 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); |
3834 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | 3834 DCHECK(kSmiTag == 0 && kSmiTagSize == 1); |
3835 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); | 3835 ldr(ip, FieldMemOperand(value, String::kLengthOffset)); |
3836 tst(instance_type, Operand(kStringEncodingMask)); | 3836 tst(instance_type, Operand(kStringEncodingMask)); |
3837 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); | 3837 mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); |
3838 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); | 3838 add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); |
3839 and_(length, length, Operand(~kObjectAlignmentMask)); | 3839 and_(length, length, Operand(~kObjectAlignmentMask)); |
3840 | 3840 |
3841 bind(&is_data_object); | 3841 bind(&is_data_object); |
3842 // Value is a data object, and it is white. Mark it black. Since we know | 3842 // Value is a data object, and it is white. Mark it black. Since we know |
3843 // that the object is white we can make it black by flipping one bit. | 3843 // that the object is white we can make it black by flipping one bit. |
3844 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3844 ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3987 UNREACHABLE(); | 3987 UNREACHABLE(); |
3988 return no_reg; | 3988 return no_reg; |
3989 } | 3989 } |
3990 | 3990 |
3991 | 3991 |
3992 void MacroAssembler::JumpIfDictionaryInPrototypeChain( | 3992 void MacroAssembler::JumpIfDictionaryInPrototypeChain( |
3993 Register object, | 3993 Register object, |
3994 Register scratch0, | 3994 Register scratch0, |
3995 Register scratch1, | 3995 Register scratch1, |
3996 Label* found) { | 3996 Label* found) { |
3997 ASSERT(!scratch1.is(scratch0)); | 3997 DCHECK(!scratch1.is(scratch0)); |
3998 Factory* factory = isolate()->factory(); | 3998 Factory* factory = isolate()->factory(); |
3999 Register current = scratch0; | 3999 Register current = scratch0; |
4000 Label loop_again; | 4000 Label loop_again; |
4001 | 4001 |
4002 // scratch contained elements pointer. | 4002 // scratch contained elements pointer. |
4003 mov(current, object); | 4003 mov(current, object); |
4004 | 4004 |
4005 // Loop based on the map going up the prototype chain. | 4005 // Loop based on the map going up the prototype chain. |
4006 bind(&loop_again); | 4006 bind(&loop_again); |
4007 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); | 4007 ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4047 CodePatcher::CodePatcher(byte* address, | 4047 CodePatcher::CodePatcher(byte* address, |
4048 int instructions, | 4048 int instructions, |
4049 FlushICache flush_cache) | 4049 FlushICache flush_cache) |
4050 : address_(address), | 4050 : address_(address), |
4051 size_(instructions * Assembler::kInstrSize), | 4051 size_(instructions * Assembler::kInstrSize), |
4052 masm_(NULL, address, size_ + Assembler::kGap), | 4052 masm_(NULL, address, size_ + Assembler::kGap), |
4053 flush_cache_(flush_cache) { | 4053 flush_cache_(flush_cache) { |
4054 // Create a new macro assembler pointing to the address of the code to patch. | 4054 // Create a new macro assembler pointing to the address of the code to patch. |
4055 // The size is adjusted with kGap on order for the assembler to generate size | 4055 // The size is adjusted with kGap on order for the assembler to generate size |
4056 // bytes of instructions without failing with buffer size constraints. | 4056 // bytes of instructions without failing with buffer size constraints. |
4057 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 4057 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4058 } | 4058 } |
4059 | 4059 |
4060 | 4060 |
4061 CodePatcher::~CodePatcher() { | 4061 CodePatcher::~CodePatcher() { |
4062 // Indicate that code has changed. | 4062 // Indicate that code has changed. |
4063 if (flush_cache_ == FLUSH) { | 4063 if (flush_cache_ == FLUSH) { |
4064 CpuFeatures::FlushICache(address_, size_); | 4064 CpuFeatures::FlushICache(address_, size_); |
4065 } | 4065 } |
4066 | 4066 |
4067 // Check that the code was patched as expected. | 4067 // Check that the code was patched as expected. |
4068 ASSERT(masm_.pc_ == address_ + size_); | 4068 DCHECK(masm_.pc_ == address_ + size_); |
4069 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 4069 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4070 } | 4070 } |
4071 | 4071 |
4072 | 4072 |
4073 void CodePatcher::Emit(Instr instr) { | 4073 void CodePatcher::Emit(Instr instr) { |
4074 masm()->emit(instr); | 4074 masm()->emit(instr); |
4075 } | 4075 } |
4076 | 4076 |
4077 | 4077 |
4078 void CodePatcher::Emit(Address addr) { | 4078 void CodePatcher::Emit(Address addr) { |
4079 masm()->emit(reinterpret_cast<Instr>(addr)); | 4079 masm()->emit(reinterpret_cast<Instr>(addr)); |
4080 } | 4080 } |
4081 | 4081 |
4082 | 4082 |
4083 void CodePatcher::EmitCondition(Condition cond) { | 4083 void CodePatcher::EmitCondition(Condition cond) { |
4084 Instr instr = Assembler::instr_at(masm_.pc_); | 4084 Instr instr = Assembler::instr_at(masm_.pc_); |
4085 instr = (instr & ~kCondMask) | cond; | 4085 instr = (instr & ~kCondMask) | cond; |
4086 masm_.emit(instr); | 4086 masm_.emit(instr); |
4087 } | 4087 } |
4088 | 4088 |
4089 | 4089 |
4090 void MacroAssembler::TruncatingDiv(Register result, | 4090 void MacroAssembler::TruncatingDiv(Register result, |
4091 Register dividend, | 4091 Register dividend, |
4092 int32_t divisor) { | 4092 int32_t divisor) { |
4093 ASSERT(!dividend.is(result)); | 4093 DCHECK(!dividend.is(result)); |
4094 ASSERT(!dividend.is(ip)); | 4094 DCHECK(!dividend.is(ip)); |
4095 ASSERT(!result.is(ip)); | 4095 DCHECK(!result.is(ip)); |
4096 MultiplierAndShift ms(divisor); | 4096 MultiplierAndShift ms(divisor); |
4097 mov(ip, Operand(ms.multiplier())); | 4097 mov(ip, Operand(ms.multiplier())); |
4098 smull(ip, result, dividend, ip); | 4098 smull(ip, result, dividend, ip); |
4099 if (divisor > 0 && ms.multiplier() < 0) { | 4099 if (divisor > 0 && ms.multiplier() < 0) { |
4100 add(result, result, Operand(dividend)); | 4100 add(result, result, Operand(dividend)); |
4101 } | 4101 } |
4102 if (divisor < 0 && ms.multiplier() > 0) { | 4102 if (divisor < 0 && ms.multiplier() > 0) { |
4103 sub(result, result, Operand(dividend)); | 4103 sub(result, result, Operand(dividend)); |
4104 } | 4104 } |
4105 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 4105 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); |
4106 add(result, result, Operand(dividend, LSR, 31)); | 4106 add(result, result, Operand(dividend, LSR, 31)); |
4107 } | 4107 } |
4108 | 4108 |
4109 | 4109 |
4110 } } // namespace v8::internal | 4110 } } // namespace v8::internal |
4111 | 4111 |
4112 #endif // V8_TARGET_ARCH_ARM | 4112 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |