OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" |
8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/hydrogen-osr.h" | 10 #include "src/hydrogen-osr.h" |
(...skipping 23 matching lines...) Expand all Loading... |
34 LCodeGen* codegen_; | 34 LCodeGen* codegen_; |
35 LPointerMap* pointers_; | 35 LPointerMap* pointers_; |
36 Safepoint::DeoptMode deopt_mode_; | 36 Safepoint::DeoptMode deopt_mode_; |
37 }; | 37 }; |
38 | 38 |
39 | 39 |
40 #define __ masm()-> | 40 #define __ masm()-> |
41 | 41 |
42 bool LCodeGen::GenerateCode() { | 42 bool LCodeGen::GenerateCode() { |
43 LPhase phase("Z_Code generation", chunk()); | 43 LPhase phase("Z_Code generation", chunk()); |
44 ASSERT(is_unused()); | 44 DCHECK(is_unused()); |
45 status_ = GENERATING; | 45 status_ = GENERATING; |
46 | 46 |
47 // Open a frame scope to indicate that there is a frame on the stack. The | 47 // Open a frame scope to indicate that there is a frame on the stack. The |
48 // NONE indicates that the scope shouldn't actually generate code to set up | 48 // NONE indicates that the scope shouldn't actually generate code to set up |
49 // the frame (that is done in GeneratePrologue). | 49 // the frame (that is done in GeneratePrologue). |
50 FrameScope frame_scope(masm_, StackFrame::NONE); | 50 FrameScope frame_scope(masm_, StackFrame::NONE); |
51 | 51 |
52 return GeneratePrologue() && | 52 return GeneratePrologue() && |
53 GenerateBody() && | 53 GenerateBody() && |
54 GenerateDeferredCode() && | 54 GenerateDeferredCode() && |
55 GenerateDeoptJumpTable() && | 55 GenerateDeoptJumpTable() && |
56 GenerateSafepointTable(); | 56 GenerateSafepointTable(); |
57 } | 57 } |
58 | 58 |
59 | 59 |
60 void LCodeGen::FinishCode(Handle<Code> code) { | 60 void LCodeGen::FinishCode(Handle<Code> code) { |
61 ASSERT(is_done()); | 61 DCHECK(is_done()); |
62 code->set_stack_slots(GetStackSlotCount()); | 62 code->set_stack_slots(GetStackSlotCount()); |
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); | 64 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); |
65 PopulateDeoptimizationData(code); | 65 PopulateDeoptimizationData(code); |
66 } | 66 } |
67 | 67 |
68 | 68 |
69 void LCodeGen::SaveCallerDoubles() { | 69 void LCodeGen::SaveCallerDoubles() { |
70 ASSERT(info()->saves_caller_doubles()); | 70 DCHECK(info()->saves_caller_doubles()); |
71 ASSERT(NeedsEagerFrame()); | 71 DCHECK(NeedsEagerFrame()); |
72 Comment(";;; Save clobbered callee double registers"); | 72 Comment(";;; Save clobbered callee double registers"); |
73 int count = 0; | 73 int count = 0; |
74 BitVector* doubles = chunk()->allocated_double_registers(); | 74 BitVector* doubles = chunk()->allocated_double_registers(); |
75 BitVector::Iterator save_iterator(doubles); | 75 BitVector::Iterator save_iterator(doubles); |
76 while (!save_iterator.Done()) { | 76 while (!save_iterator.Done()) { |
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), | 77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
78 MemOperand(sp, count * kDoubleSize)); | 78 MemOperand(sp, count * kDoubleSize)); |
79 save_iterator.Advance(); | 79 save_iterator.Advance(); |
80 count++; | 80 count++; |
81 } | 81 } |
82 } | 82 } |
83 | 83 |
84 | 84 |
85 void LCodeGen::RestoreCallerDoubles() { | 85 void LCodeGen::RestoreCallerDoubles() { |
86 ASSERT(info()->saves_caller_doubles()); | 86 DCHECK(info()->saves_caller_doubles()); |
87 ASSERT(NeedsEagerFrame()); | 87 DCHECK(NeedsEagerFrame()); |
88 Comment(";;; Restore clobbered callee double registers"); | 88 Comment(";;; Restore clobbered callee double registers"); |
89 BitVector* doubles = chunk()->allocated_double_registers(); | 89 BitVector* doubles = chunk()->allocated_double_registers(); |
90 BitVector::Iterator save_iterator(doubles); | 90 BitVector::Iterator save_iterator(doubles); |
91 int count = 0; | 91 int count = 0; |
92 while (!save_iterator.Done()) { | 92 while (!save_iterator.Done()) { |
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), | 93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
94 MemOperand(sp, count * kDoubleSize)); | 94 MemOperand(sp, count * kDoubleSize)); |
95 save_iterator.Advance(); | 95 save_iterator.Advance(); |
96 count++; | 96 count++; |
97 } | 97 } |
98 } | 98 } |
99 | 99 |
100 | 100 |
101 bool LCodeGen::GeneratePrologue() { | 101 bool LCodeGen::GeneratePrologue() { |
102 ASSERT(is_generating()); | 102 DCHECK(is_generating()); |
103 | 103 |
104 if (info()->IsOptimizing()) { | 104 if (info()->IsOptimizing()) { |
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
106 | 106 |
107 #ifdef DEBUG | 107 #ifdef DEBUG |
108 if (strlen(FLAG_stop_at) > 0 && | 108 if (strlen(FLAG_stop_at) > 0 && |
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
110 __ stop("stop_at"); | 110 __ stop("stop_at"); |
111 } | 111 } |
112 #endif | 112 #endif |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
240 void LCodeGen::GenerateOsrPrologue() { | 240 void LCodeGen::GenerateOsrPrologue() { |
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there | 241 // Generate the OSR entry prologue at the first unknown OSR value, or if there |
242 // are none, at the OSR entrypoint instruction. | 242 // are none, at the OSR entrypoint instruction. |
243 if (osr_pc_offset_ >= 0) return; | 243 if (osr_pc_offset_ >= 0) return; |
244 | 244 |
245 osr_pc_offset_ = masm()->pc_offset(); | 245 osr_pc_offset_ = masm()->pc_offset(); |
246 | 246 |
247 // Adjust the frame size, subsuming the unoptimized frame into the | 247 // Adjust the frame size, subsuming the unoptimized frame into the |
248 // optimized frame. | 248 // optimized frame. |
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | 249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
250 ASSERT(slots >= 0); | 250 DCHECK(slots >= 0); |
251 __ sub(sp, sp, Operand(slots * kPointerSize)); | 251 __ sub(sp, sp, Operand(slots * kPointerSize)); |
252 } | 252 } |
253 | 253 |
254 | 254 |
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
256 if (instr->IsCall()) { | 256 if (instr->IsCall()) { |
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
258 } | 258 } |
259 if (!instr->IsLazyBailout() && !instr->IsGap()) { | 259 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
260 safepoints_.BumpLastLazySafepointIndex(); | 260 safepoints_.BumpLastLazySafepointIndex(); |
261 } | 261 } |
262 } | 262 } |
263 | 263 |
264 | 264 |
265 bool LCodeGen::GenerateDeferredCode() { | 265 bool LCodeGen::GenerateDeferredCode() { |
266 ASSERT(is_generating()); | 266 DCHECK(is_generating()); |
267 if (deferred_.length() > 0) { | 267 if (deferred_.length() > 0) { |
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
269 LDeferredCode* code = deferred_[i]; | 269 LDeferredCode* code = deferred_[i]; |
270 | 270 |
271 HValue* value = | 271 HValue* value = |
272 instructions_->at(code->instruction_index())->hydrogen_value(); | 272 instructions_->at(code->instruction_index())->hydrogen_value(); |
273 RecordAndWritePosition( | 273 RecordAndWritePosition( |
274 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 274 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
275 | 275 |
276 Comment(";;; <@%d,#%d> " | 276 Comment(";;; <@%d,#%d> " |
277 "-------------------- Deferred %s --------------------", | 277 "-------------------- Deferred %s --------------------", |
278 code->instruction_index(), | 278 code->instruction_index(), |
279 code->instr()->hydrogen_value()->id(), | 279 code->instr()->hydrogen_value()->id(), |
280 code->instr()->Mnemonic()); | 280 code->instr()->Mnemonic()); |
281 __ bind(code->entry()); | 281 __ bind(code->entry()); |
282 if (NeedsDeferredFrame()) { | 282 if (NeedsDeferredFrame()) { |
283 Comment(";;; Build frame"); | 283 Comment(";;; Build frame"); |
284 ASSERT(!frame_is_built_); | 284 DCHECK(!frame_is_built_); |
285 ASSERT(info()->IsStub()); | 285 DCHECK(info()->IsStub()); |
286 frame_is_built_ = true; | 286 frame_is_built_ = true; |
287 __ PushFixedFrame(); | 287 __ PushFixedFrame(); |
288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); | 288 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
289 __ push(scratch0()); | 289 __ push(scratch0()); |
290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 290 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
291 Comment(";;; Deferred code"); | 291 Comment(";;; Deferred code"); |
292 } | 292 } |
293 code->Generate(); | 293 code->Generate(); |
294 if (NeedsDeferredFrame()) { | 294 if (NeedsDeferredFrame()) { |
295 Comment(";;; Destroy frame"); | 295 Comment(";;; Destroy frame"); |
296 ASSERT(frame_is_built_); | 296 DCHECK(frame_is_built_); |
297 __ pop(ip); | 297 __ pop(ip); |
298 __ PopFixedFrame(); | 298 __ PopFixedFrame(); |
299 frame_is_built_ = false; | 299 frame_is_built_ = false; |
300 } | 300 } |
301 __ jmp(code->exit()); | 301 __ jmp(code->exit()); |
302 } | 302 } |
303 } | 303 } |
304 | 304 |
305 // Force constant pool emission at the end of the deferred code to make | 305 // Force constant pool emission at the end of the deferred code to make |
306 // sure that no constant pools are emitted after. | 306 // sure that no constant pools are emitted after. |
(...skipping 22 matching lines...) Expand all Loading... |
329 Comment(";;; -------------------- Jump table --------------------"); | 329 Comment(";;; -------------------- Jump table --------------------"); |
330 Address base = deopt_jump_table_[0].address; | 330 Address base = deopt_jump_table_[0].address; |
331 | 331 |
332 Register entry_offset = scratch0(); | 332 Register entry_offset = scratch0(); |
333 | 333 |
334 int length = deopt_jump_table_.length(); | 334 int length = deopt_jump_table_.length(); |
335 for (int i = 0; i < length; i++) { | 335 for (int i = 0; i < length; i++) { |
336 __ bind(&deopt_jump_table_[i].label); | 336 __ bind(&deopt_jump_table_[i].label); |
337 | 337 |
338 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; | 338 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; |
339 ASSERT(type == deopt_jump_table_[0].bailout_type); | 339 DCHECK(type == deopt_jump_table_[0].bailout_type); |
340 Address entry = deopt_jump_table_[i].address; | 340 Address entry = deopt_jump_table_[i].address; |
341 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 341 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
342 ASSERT(id != Deoptimizer::kNotDeoptimizationEntry); | 342 DCHECK(id != Deoptimizer::kNotDeoptimizationEntry); |
343 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 343 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
344 | 344 |
345 // Second-level deopt table entries are contiguous and small, so instead | 345 // Second-level deopt table entries are contiguous and small, so instead |
346 // of loading the full, absolute address of each one, load an immediate | 346 // of loading the full, absolute address of each one, load an immediate |
347 // offset which will be added to the base address later. | 347 // offset which will be added to the base address later. |
348 __ mov(entry_offset, Operand(entry - base)); | 348 __ mov(entry_offset, Operand(entry - base)); |
349 | 349 |
350 if (deopt_jump_table_[i].needs_frame) { | 350 if (deopt_jump_table_[i].needs_frame) { |
351 ASSERT(!info()->saves_caller_doubles()); | 351 DCHECK(!info()->saves_caller_doubles()); |
352 if (needs_frame.is_bound()) { | 352 if (needs_frame.is_bound()) { |
353 __ b(&needs_frame); | 353 __ b(&needs_frame); |
354 } else { | 354 } else { |
355 __ bind(&needs_frame); | 355 __ bind(&needs_frame); |
356 Comment(";;; call deopt with frame"); | 356 Comment(";;; call deopt with frame"); |
357 __ PushFixedFrame(); | 357 __ PushFixedFrame(); |
358 // This variant of deopt can only be used with stubs. Since we don't | 358 // This variant of deopt can only be used with stubs. Since we don't |
359 // have a function pointer to install in the stack frame that we're | 359 // have a function pointer to install in the stack frame that we're |
360 // building, install a special marker there instead. | 360 // building, install a special marker there instead. |
361 ASSERT(info()->IsStub()); | 361 DCHECK(info()->IsStub()); |
362 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); | 362 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); |
363 __ push(ip); | 363 __ push(ip); |
364 __ add(fp, sp, | 364 __ add(fp, sp, |
365 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 365 Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
366 __ bind(&call_deopt_entry); | 366 __ bind(&call_deopt_entry); |
367 // Add the base address to the offset previously loaded in | 367 // Add the base address to the offset previously loaded in |
368 // entry_offset. | 368 // entry_offset. |
369 __ add(entry_offset, entry_offset, | 369 __ add(entry_offset, entry_offset, |
370 Operand(ExternalReference::ForDeoptEntry(base))); | 370 Operand(ExternalReference::ForDeoptEntry(base))); |
371 __ blx(entry_offset); | 371 __ blx(entry_offset); |
372 } | 372 } |
373 | 373 |
374 masm()->CheckConstPool(false, false); | 374 masm()->CheckConstPool(false, false); |
375 } else { | 375 } else { |
376 // The last entry can fall through into `call_deopt_entry`, avoiding a | 376 // The last entry can fall through into `call_deopt_entry`, avoiding a |
377 // branch. | 377 // branch. |
378 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); | 378 bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); |
379 | 379 |
380 if (need_branch) __ b(&call_deopt_entry); | 380 if (need_branch) __ b(&call_deopt_entry); |
381 | 381 |
382 masm()->CheckConstPool(false, !need_branch); | 382 masm()->CheckConstPool(false, !need_branch); |
383 } | 383 } |
384 } | 384 } |
385 | 385 |
386 if (!call_deopt_entry.is_bound()) { | 386 if (!call_deopt_entry.is_bound()) { |
387 Comment(";;; call deopt"); | 387 Comment(";;; call deopt"); |
388 __ bind(&call_deopt_entry); | 388 __ bind(&call_deopt_entry); |
389 | 389 |
390 if (info()->saves_caller_doubles()) { | 390 if (info()->saves_caller_doubles()) { |
391 ASSERT(info()->IsStub()); | 391 DCHECK(info()->IsStub()); |
392 RestoreCallerDoubles(); | 392 RestoreCallerDoubles(); |
393 } | 393 } |
394 | 394 |
395 // Add the base address to the offset previously loaded in entry_offset. | 395 // Add the base address to the offset previously loaded in entry_offset. |
396 __ add(entry_offset, entry_offset, | 396 __ add(entry_offset, entry_offset, |
397 Operand(ExternalReference::ForDeoptEntry(base))); | 397 Operand(ExternalReference::ForDeoptEntry(base))); |
398 __ blx(entry_offset); | 398 __ blx(entry_offset); |
399 } | 399 } |
400 } | 400 } |
401 | 401 |
402 // Force constant pool emission at the end of the deopt jump table to make | 402 // Force constant pool emission at the end of the deopt jump table to make |
403 // sure that no constant pools are emitted after. | 403 // sure that no constant pools are emitted after. |
404 masm()->CheckConstPool(true, false); | 404 masm()->CheckConstPool(true, false); |
405 | 405 |
406 // The deoptimization jump table is the last part of the instruction | 406 // The deoptimization jump table is the last part of the instruction |
407 // sequence. Mark the generated code as done unless we bailed out. | 407 // sequence. Mark the generated code as done unless we bailed out. |
408 if (!is_aborted()) status_ = DONE; | 408 if (!is_aborted()) status_ = DONE; |
409 return !is_aborted(); | 409 return !is_aborted(); |
410 } | 410 } |
411 | 411 |
412 | 412 |
413 bool LCodeGen::GenerateSafepointTable() { | 413 bool LCodeGen::GenerateSafepointTable() { |
414 ASSERT(is_done()); | 414 DCHECK(is_done()); |
415 safepoints_.Emit(masm(), GetStackSlotCount()); | 415 safepoints_.Emit(masm(), GetStackSlotCount()); |
416 return !is_aborted(); | 416 return !is_aborted(); |
417 } | 417 } |
418 | 418 |
419 | 419 |
420 Register LCodeGen::ToRegister(int index) const { | 420 Register LCodeGen::ToRegister(int index) const { |
421 return Register::FromAllocationIndex(index); | 421 return Register::FromAllocationIndex(index); |
422 } | 422 } |
423 | 423 |
424 | 424 |
425 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { | 425 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { |
426 return DwVfpRegister::FromAllocationIndex(index); | 426 return DwVfpRegister::FromAllocationIndex(index); |
427 } | 427 } |
428 | 428 |
429 | 429 |
430 Register LCodeGen::ToRegister(LOperand* op) const { | 430 Register LCodeGen::ToRegister(LOperand* op) const { |
431 ASSERT(op->IsRegister()); | 431 DCHECK(op->IsRegister()); |
432 return ToRegister(op->index()); | 432 return ToRegister(op->index()); |
433 } | 433 } |
434 | 434 |
435 | 435 |
436 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { | 436 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
437 if (op->IsRegister()) { | 437 if (op->IsRegister()) { |
438 return ToRegister(op->index()); | 438 return ToRegister(op->index()); |
439 } else if (op->IsConstantOperand()) { | 439 } else if (op->IsConstantOperand()) { |
440 LConstantOperand* const_op = LConstantOperand::cast(op); | 440 LConstantOperand* const_op = LConstantOperand::cast(op); |
441 HConstant* constant = chunk_->LookupConstant(const_op); | 441 HConstant* constant = chunk_->LookupConstant(const_op); |
442 Handle<Object> literal = constant->handle(isolate()); | 442 Handle<Object> literal = constant->handle(isolate()); |
443 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 443 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
444 if (r.IsInteger32()) { | 444 if (r.IsInteger32()) { |
445 ASSERT(literal->IsNumber()); | 445 DCHECK(literal->IsNumber()); |
446 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); | 446 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); |
447 } else if (r.IsDouble()) { | 447 } else if (r.IsDouble()) { |
448 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); | 448 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); |
449 } else { | 449 } else { |
450 ASSERT(r.IsSmiOrTagged()); | 450 DCHECK(r.IsSmiOrTagged()); |
451 __ Move(scratch, literal); | 451 __ Move(scratch, literal); |
452 } | 452 } |
453 return scratch; | 453 return scratch; |
454 } else if (op->IsStackSlot()) { | 454 } else if (op->IsStackSlot()) { |
455 __ ldr(scratch, ToMemOperand(op)); | 455 __ ldr(scratch, ToMemOperand(op)); |
456 return scratch; | 456 return scratch; |
457 } | 457 } |
458 UNREACHABLE(); | 458 UNREACHABLE(); |
459 return scratch; | 459 return scratch; |
460 } | 460 } |
461 | 461 |
462 | 462 |
463 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 463 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
464 ASSERT(op->IsDoubleRegister()); | 464 DCHECK(op->IsDoubleRegister()); |
465 return ToDoubleRegister(op->index()); | 465 return ToDoubleRegister(op->index()); |
466 } | 466 } |
467 | 467 |
468 | 468 |
469 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, | 469 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
470 SwVfpRegister flt_scratch, | 470 SwVfpRegister flt_scratch, |
471 DwVfpRegister dbl_scratch) { | 471 DwVfpRegister dbl_scratch) { |
472 if (op->IsDoubleRegister()) { | 472 if (op->IsDoubleRegister()) { |
473 return ToDoubleRegister(op->index()); | 473 return ToDoubleRegister(op->index()); |
474 } else if (op->IsConstantOperand()) { | 474 } else if (op->IsConstantOperand()) { |
475 LConstantOperand* const_op = LConstantOperand::cast(op); | 475 LConstantOperand* const_op = LConstantOperand::cast(op); |
476 HConstant* constant = chunk_->LookupConstant(const_op); | 476 HConstant* constant = chunk_->LookupConstant(const_op); |
477 Handle<Object> literal = constant->handle(isolate()); | 477 Handle<Object> literal = constant->handle(isolate()); |
478 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 478 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
479 if (r.IsInteger32()) { | 479 if (r.IsInteger32()) { |
480 ASSERT(literal->IsNumber()); | 480 DCHECK(literal->IsNumber()); |
481 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); | 481 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); |
482 __ vmov(flt_scratch, ip); | 482 __ vmov(flt_scratch, ip); |
483 __ vcvt_f64_s32(dbl_scratch, flt_scratch); | 483 __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
484 return dbl_scratch; | 484 return dbl_scratch; |
485 } else if (r.IsDouble()) { | 485 } else if (r.IsDouble()) { |
486 Abort(kUnsupportedDoubleImmediate); | 486 Abort(kUnsupportedDoubleImmediate); |
487 } else if (r.IsTagged()) { | 487 } else if (r.IsTagged()) { |
488 Abort(kUnsupportedTaggedImmediate); | 488 Abort(kUnsupportedTaggedImmediate); |
489 } | 489 } |
490 } else if (op->IsStackSlot()) { | 490 } else if (op->IsStackSlot()) { |
491 // TODO(regis): Why is vldr not taking a MemOperand? | 491 // TODO(regis): Why is vldr not taking a MemOperand? |
492 // __ vldr(dbl_scratch, ToMemOperand(op)); | 492 // __ vldr(dbl_scratch, ToMemOperand(op)); |
493 MemOperand mem_op = ToMemOperand(op); | 493 MemOperand mem_op = ToMemOperand(op); |
494 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); | 494 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); |
495 return dbl_scratch; | 495 return dbl_scratch; |
496 } | 496 } |
497 UNREACHABLE(); | 497 UNREACHABLE(); |
498 return dbl_scratch; | 498 return dbl_scratch; |
499 } | 499 } |
500 | 500 |
501 | 501 |
502 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | 502 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { |
503 HConstant* constant = chunk_->LookupConstant(op); | 503 HConstant* constant = chunk_->LookupConstant(op); |
504 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | 504 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); |
505 return constant->handle(isolate()); | 505 return constant->handle(isolate()); |
506 } | 506 } |
507 | 507 |
508 | 508 |
509 bool LCodeGen::IsInteger32(LConstantOperand* op) const { | 509 bool LCodeGen::IsInteger32(LConstantOperand* op) const { |
510 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | 510 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); |
511 } | 511 } |
512 | 512 |
513 | 513 |
514 bool LCodeGen::IsSmi(LConstantOperand* op) const { | 514 bool LCodeGen::IsSmi(LConstantOperand* op) const { |
515 return chunk_->LookupLiteralRepresentation(op).IsSmi(); | 515 return chunk_->LookupLiteralRepresentation(op).IsSmi(); |
516 } | 516 } |
517 | 517 |
518 | 518 |
519 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | 519 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
520 return ToRepresentation(op, Representation::Integer32()); | 520 return ToRepresentation(op, Representation::Integer32()); |
521 } | 521 } |
522 | 522 |
523 | 523 |
524 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, | 524 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, |
525 const Representation& r) const { | 525 const Representation& r) const { |
526 HConstant* constant = chunk_->LookupConstant(op); | 526 HConstant* constant = chunk_->LookupConstant(op); |
527 int32_t value = constant->Integer32Value(); | 527 int32_t value = constant->Integer32Value(); |
528 if (r.IsInteger32()) return value; | 528 if (r.IsInteger32()) return value; |
529 ASSERT(r.IsSmiOrTagged()); | 529 DCHECK(r.IsSmiOrTagged()); |
530 return reinterpret_cast<int32_t>(Smi::FromInt(value)); | 530 return reinterpret_cast<int32_t>(Smi::FromInt(value)); |
531 } | 531 } |
532 | 532 |
533 | 533 |
534 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | 534 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { |
535 HConstant* constant = chunk_->LookupConstant(op); | 535 HConstant* constant = chunk_->LookupConstant(op); |
536 return Smi::FromInt(constant->Integer32Value()); | 536 return Smi::FromInt(constant->Integer32Value()); |
537 } | 537 } |
538 | 538 |
539 | 539 |
540 double LCodeGen::ToDouble(LConstantOperand* op) const { | 540 double LCodeGen::ToDouble(LConstantOperand* op) const { |
541 HConstant* constant = chunk_->LookupConstant(op); | 541 HConstant* constant = chunk_->LookupConstant(op); |
542 ASSERT(constant->HasDoubleValue()); | 542 DCHECK(constant->HasDoubleValue()); |
543 return constant->DoubleValue(); | 543 return constant->DoubleValue(); |
544 } | 544 } |
545 | 545 |
546 | 546 |
547 Operand LCodeGen::ToOperand(LOperand* op) { | 547 Operand LCodeGen::ToOperand(LOperand* op) { |
548 if (op->IsConstantOperand()) { | 548 if (op->IsConstantOperand()) { |
549 LConstantOperand* const_op = LConstantOperand::cast(op); | 549 LConstantOperand* const_op = LConstantOperand::cast(op); |
550 HConstant* constant = chunk()->LookupConstant(const_op); | 550 HConstant* constant = chunk()->LookupConstant(const_op); |
551 Representation r = chunk_->LookupLiteralRepresentation(const_op); | 551 Representation r = chunk_->LookupLiteralRepresentation(const_op); |
552 if (r.IsSmi()) { | 552 if (r.IsSmi()) { |
553 ASSERT(constant->HasSmiValue()); | 553 DCHECK(constant->HasSmiValue()); |
554 return Operand(Smi::FromInt(constant->Integer32Value())); | 554 return Operand(Smi::FromInt(constant->Integer32Value())); |
555 } else if (r.IsInteger32()) { | 555 } else if (r.IsInteger32()) { |
556 ASSERT(constant->HasInteger32Value()); | 556 DCHECK(constant->HasInteger32Value()); |
557 return Operand(constant->Integer32Value()); | 557 return Operand(constant->Integer32Value()); |
558 } else if (r.IsDouble()) { | 558 } else if (r.IsDouble()) { |
559 Abort(kToOperandUnsupportedDoubleImmediate); | 559 Abort(kToOperandUnsupportedDoubleImmediate); |
560 } | 560 } |
561 ASSERT(r.IsTagged()); | 561 DCHECK(r.IsTagged()); |
562 return Operand(constant->handle(isolate())); | 562 return Operand(constant->handle(isolate())); |
563 } else if (op->IsRegister()) { | 563 } else if (op->IsRegister()) { |
564 return Operand(ToRegister(op)); | 564 return Operand(ToRegister(op)); |
565 } else if (op->IsDoubleRegister()) { | 565 } else if (op->IsDoubleRegister()) { |
566 Abort(kToOperandIsDoubleRegisterUnimplemented); | 566 Abort(kToOperandIsDoubleRegisterUnimplemented); |
567 return Operand::Zero(); | 567 return Operand::Zero(); |
568 } | 568 } |
569 // Stack slots not implemented, use ToMemOperand instead. | 569 // Stack slots not implemented, use ToMemOperand instead. |
570 UNREACHABLE(); | 570 UNREACHABLE(); |
571 return Operand::Zero(); | 571 return Operand::Zero(); |
572 } | 572 } |
573 | 573 |
574 | 574 |
575 static int ArgumentsOffsetWithoutFrame(int index) { | 575 static int ArgumentsOffsetWithoutFrame(int index) { |
576 ASSERT(index < 0); | 576 DCHECK(index < 0); |
577 return -(index + 1) * kPointerSize; | 577 return -(index + 1) * kPointerSize; |
578 } | 578 } |
579 | 579 |
580 | 580 |
581 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | 581 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { |
582 ASSERT(!op->IsRegister()); | 582 DCHECK(!op->IsRegister()); |
583 ASSERT(!op->IsDoubleRegister()); | 583 DCHECK(!op->IsDoubleRegister()); |
584 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 584 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); |
585 if (NeedsEagerFrame()) { | 585 if (NeedsEagerFrame()) { |
586 return MemOperand(fp, StackSlotOffset(op->index())); | 586 return MemOperand(fp, StackSlotOffset(op->index())); |
587 } else { | 587 } else { |
588 // Retrieve parameter without eager stack-frame relative to the | 588 // Retrieve parameter without eager stack-frame relative to the |
589 // stack-pointer. | 589 // stack-pointer. |
590 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); | 590 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); |
591 } | 591 } |
592 } | 592 } |
593 | 593 |
594 | 594 |
595 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { | 595 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { |
596 ASSERT(op->IsDoubleStackSlot()); | 596 DCHECK(op->IsDoubleStackSlot()); |
597 if (NeedsEagerFrame()) { | 597 if (NeedsEagerFrame()) { |
598 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); | 598 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); |
599 } else { | 599 } else { |
600 // Retrieve parameter without eager stack-frame relative to the | 600 // Retrieve parameter without eager stack-frame relative to the |
601 // stack-pointer. | 601 // stack-pointer. |
602 return MemOperand( | 602 return MemOperand( |
603 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); | 603 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); |
604 } | 604 } |
605 } | 605 } |
606 | 606 |
(...skipping 15 matching lines...) Expand all Loading... |
622 : Translation::kSelfLiteralId; | 622 : Translation::kSelfLiteralId; |
623 | 623 |
624 switch (environment->frame_type()) { | 624 switch (environment->frame_type()) { |
625 case JS_FUNCTION: | 625 case JS_FUNCTION: |
626 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 626 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
627 break; | 627 break; |
628 case JS_CONSTRUCT: | 628 case JS_CONSTRUCT: |
629 translation->BeginConstructStubFrame(closure_id, translation_size); | 629 translation->BeginConstructStubFrame(closure_id, translation_size); |
630 break; | 630 break; |
631 case JS_GETTER: | 631 case JS_GETTER: |
632 ASSERT(translation_size == 1); | 632 DCHECK(translation_size == 1); |
633 ASSERT(height == 0); | 633 DCHECK(height == 0); |
634 translation->BeginGetterStubFrame(closure_id); | 634 translation->BeginGetterStubFrame(closure_id); |
635 break; | 635 break; |
636 case JS_SETTER: | 636 case JS_SETTER: |
637 ASSERT(translation_size == 2); | 637 DCHECK(translation_size == 2); |
638 ASSERT(height == 0); | 638 DCHECK(height == 0); |
639 translation->BeginSetterStubFrame(closure_id); | 639 translation->BeginSetterStubFrame(closure_id); |
640 break; | 640 break; |
641 case STUB: | 641 case STUB: |
642 translation->BeginCompiledStubFrame(); | 642 translation->BeginCompiledStubFrame(); |
643 break; | 643 break; |
644 case ARGUMENTS_ADAPTOR: | 644 case ARGUMENTS_ADAPTOR: |
645 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 645 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
646 break; | 646 break; |
647 } | 647 } |
648 | 648 |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
745 TargetAddressStorageMode storage_mode) { | 745 TargetAddressStorageMode storage_mode) { |
746 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); | 746 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); |
747 } | 747 } |
748 | 748 |
749 | 749 |
750 void LCodeGen::CallCodeGeneric(Handle<Code> code, | 750 void LCodeGen::CallCodeGeneric(Handle<Code> code, |
751 RelocInfo::Mode mode, | 751 RelocInfo::Mode mode, |
752 LInstruction* instr, | 752 LInstruction* instr, |
753 SafepointMode safepoint_mode, | 753 SafepointMode safepoint_mode, |
754 TargetAddressStorageMode storage_mode) { | 754 TargetAddressStorageMode storage_mode) { |
755 ASSERT(instr != NULL); | 755 DCHECK(instr != NULL); |
756 // Block literal pool emission to ensure nop indicating no inlined smi code | 756 // Block literal pool emission to ensure nop indicating no inlined smi code |
757 // is in the correct position. | 757 // is in the correct position. |
758 Assembler::BlockConstPoolScope block_const_pool(masm()); | 758 Assembler::BlockConstPoolScope block_const_pool(masm()); |
759 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); | 759 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); |
760 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | 760 RecordSafepointWithLazyDeopt(instr, safepoint_mode); |
761 | 761 |
762 // Signal that we don't inline smi code before these stubs in the | 762 // Signal that we don't inline smi code before these stubs in the |
763 // optimizing code generator. | 763 // optimizing code generator. |
764 if (code->kind() == Code::BINARY_OP_IC || | 764 if (code->kind() == Code::BINARY_OP_IC || |
765 code->kind() == Code::COMPARE_IC) { | 765 code->kind() == Code::COMPARE_IC) { |
766 __ nop(); | 766 __ nop(); |
767 } | 767 } |
768 } | 768 } |
769 | 769 |
770 | 770 |
771 void LCodeGen::CallRuntime(const Runtime::Function* function, | 771 void LCodeGen::CallRuntime(const Runtime::Function* function, |
772 int num_arguments, | 772 int num_arguments, |
773 LInstruction* instr, | 773 LInstruction* instr, |
774 SaveFPRegsMode save_doubles) { | 774 SaveFPRegsMode save_doubles) { |
775 ASSERT(instr != NULL); | 775 DCHECK(instr != NULL); |
776 | 776 |
777 __ CallRuntime(function, num_arguments, save_doubles); | 777 __ CallRuntime(function, num_arguments, save_doubles); |
778 | 778 |
779 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 779 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
780 } | 780 } |
781 | 781 |
782 | 782 |
783 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | 783 void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
784 if (context->IsRegister()) { | 784 if (context->IsRegister()) { |
785 __ Move(cp, ToRegister(context)); | 785 __ Move(cp, ToRegister(context)); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
840 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 840 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
841 deoptimizations_.Add(environment, zone()); | 841 deoptimizations_.Add(environment, zone()); |
842 } | 842 } |
843 } | 843 } |
844 | 844 |
845 | 845 |
846 void LCodeGen::DeoptimizeIf(Condition condition, | 846 void LCodeGen::DeoptimizeIf(Condition condition, |
847 LEnvironment* environment, | 847 LEnvironment* environment, |
848 Deoptimizer::BailoutType bailout_type) { | 848 Deoptimizer::BailoutType bailout_type) { |
849 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 849 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
850 ASSERT(environment->HasBeenRegistered()); | 850 DCHECK(environment->HasBeenRegistered()); |
851 int id = environment->deoptimization_index(); | 851 int id = environment->deoptimization_index(); |
852 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 852 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
853 Address entry = | 853 Address entry = |
854 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 854 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
855 if (entry == NULL) { | 855 if (entry == NULL) { |
856 Abort(kBailoutWasNotPrepared); | 856 Abort(kBailoutWasNotPrepared); |
857 return; | 857 return; |
858 } | 858 } |
859 | 859 |
860 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | 860 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
861 Register scratch = scratch0(); | 861 Register scratch = scratch0(); |
862 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | 862 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
(...skipping 26 matching lines...) Expand all Loading... |
889 if (condition != al) { | 889 if (condition != al) { |
890 condition = ne; | 890 condition = ne; |
891 __ cmp(scratch, Operand::Zero()); | 891 __ cmp(scratch, Operand::Zero()); |
892 } | 892 } |
893 } | 893 } |
894 | 894 |
895 if (info()->ShouldTrapOnDeopt()) { | 895 if (info()->ShouldTrapOnDeopt()) { |
896 __ stop("trap_on_deopt", condition); | 896 __ stop("trap_on_deopt", condition); |
897 } | 897 } |
898 | 898 |
899 ASSERT(info()->IsStub() || frame_is_built_); | 899 DCHECK(info()->IsStub() || frame_is_built_); |
900 // Go through jump table if we need to handle condition, build frame, or | 900 // Go through jump table if we need to handle condition, build frame, or |
901 // restore caller doubles. | 901 // restore caller doubles. |
902 if (condition == al && frame_is_built_ && | 902 if (condition == al && frame_is_built_ && |
903 !info()->saves_caller_doubles()) { | 903 !info()->saves_caller_doubles()) { |
904 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 904 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
905 } else { | 905 } else { |
906 // We often have several deopts to the same entry, reuse the last | 906 // We often have several deopts to the same entry, reuse the last |
907 // jump entry if this is the case. | 907 // jump entry if this is the case. |
908 if (deopt_jump_table_.is_empty() || | 908 if (deopt_jump_table_.is_empty() || |
909 (deopt_jump_table_.last().address != entry) || | 909 (deopt_jump_table_.last().address != entry) || |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
976 int result = deoptimization_literals_.length(); | 976 int result = deoptimization_literals_.length(); |
977 for (int i = 0; i < deoptimization_literals_.length(); ++i) { | 977 for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
978 if (deoptimization_literals_[i].is_identical_to(literal)) return i; | 978 if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
979 } | 979 } |
980 deoptimization_literals_.Add(literal, zone()); | 980 deoptimization_literals_.Add(literal, zone()); |
981 return result; | 981 return result; |
982 } | 982 } |
983 | 983 |
984 | 984 |
985 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | 985 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
986 ASSERT(deoptimization_literals_.length() == 0); | 986 DCHECK(deoptimization_literals_.length() == 0); |
987 | 987 |
988 const ZoneList<Handle<JSFunction> >* inlined_closures = | 988 const ZoneList<Handle<JSFunction> >* inlined_closures = |
989 chunk()->inlined_closures(); | 989 chunk()->inlined_closures(); |
990 | 990 |
991 for (int i = 0, length = inlined_closures->length(); | 991 for (int i = 0, length = inlined_closures->length(); |
992 i < length; | 992 i < length; |
993 i++) { | 993 i++) { |
994 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 994 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
995 } | 995 } |
996 | 996 |
997 inlined_function_count_ = deoptimization_literals_.length(); | 997 inlined_function_count_ = deoptimization_literals_.length(); |
998 } | 998 } |
999 | 999 |
1000 | 1000 |
1001 void LCodeGen::RecordSafepointWithLazyDeopt( | 1001 void LCodeGen::RecordSafepointWithLazyDeopt( |
1002 LInstruction* instr, SafepointMode safepoint_mode) { | 1002 LInstruction* instr, SafepointMode safepoint_mode) { |
1003 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 1003 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
1004 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | 1004 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
1005 } else { | 1005 } else { |
1006 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 1006 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
1007 RecordSafepointWithRegisters( | 1007 RecordSafepointWithRegisters( |
1008 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | 1008 instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
1009 } | 1009 } |
1010 } | 1010 } |
1011 | 1011 |
1012 | 1012 |
1013 void LCodeGen::RecordSafepoint( | 1013 void LCodeGen::RecordSafepoint( |
1014 LPointerMap* pointers, | 1014 LPointerMap* pointers, |
1015 Safepoint::Kind kind, | 1015 Safepoint::Kind kind, |
1016 int arguments, | 1016 int arguments, |
1017 Safepoint::DeoptMode deopt_mode) { | 1017 Safepoint::DeoptMode deopt_mode) { |
1018 ASSERT(expected_safepoint_kind_ == kind); | 1018 DCHECK(expected_safepoint_kind_ == kind); |
1019 | 1019 |
1020 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | 1020 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
1021 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | 1021 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
1022 kind, arguments, deopt_mode); | 1022 kind, arguments, deopt_mode); |
1023 for (int i = 0; i < operands->length(); i++) { | 1023 for (int i = 0; i < operands->length(); i++) { |
1024 LOperand* pointer = operands->at(i); | 1024 LOperand* pointer = operands->at(i); |
1025 if (pointer->IsStackSlot()) { | 1025 if (pointer->IsStackSlot()) { |
1026 safepoint.DefinePointerSlot(pointer->index(), zone()); | 1026 safepoint.DefinePointerSlot(pointer->index(), zone()); |
1027 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | 1027 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
1028 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); | 1028 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1101 DoGap(instr); | 1101 DoGap(instr); |
1102 } | 1102 } |
1103 | 1103 |
1104 | 1104 |
1105 void LCodeGen::DoParameter(LParameter* instr) { | 1105 void LCodeGen::DoParameter(LParameter* instr) { |
1106 // Nothing to do. | 1106 // Nothing to do. |
1107 } | 1107 } |
1108 | 1108 |
1109 | 1109 |
1110 void LCodeGen::DoCallStub(LCallStub* instr) { | 1110 void LCodeGen::DoCallStub(LCallStub* instr) { |
1111 ASSERT(ToRegister(instr->context()).is(cp)); | 1111 DCHECK(ToRegister(instr->context()).is(cp)); |
1112 ASSERT(ToRegister(instr->result()).is(r0)); | 1112 DCHECK(ToRegister(instr->result()).is(r0)); |
1113 switch (instr->hydrogen()->major_key()) { | 1113 switch (instr->hydrogen()->major_key()) { |
1114 case CodeStub::RegExpExec: { | 1114 case CodeStub::RegExpExec: { |
1115 RegExpExecStub stub(isolate()); | 1115 RegExpExecStub stub(isolate()); |
1116 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 1116 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
1117 break; | 1117 break; |
1118 } | 1118 } |
1119 case CodeStub::SubString: { | 1119 case CodeStub::SubString: { |
1120 SubStringStub stub(isolate()); | 1120 SubStringStub stub(isolate()); |
1121 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 1121 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
1122 break; | 1122 break; |
(...skipping 10 matching lines...) Expand all Loading... |
1133 | 1133 |
1134 | 1134 |
1135 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | 1135 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
1136 GenerateOsrPrologue(); | 1136 GenerateOsrPrologue(); |
1137 } | 1137 } |
1138 | 1138 |
1139 | 1139 |
1140 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { | 1140 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
1141 Register dividend = ToRegister(instr->dividend()); | 1141 Register dividend = ToRegister(instr->dividend()); |
1142 int32_t divisor = instr->divisor(); | 1142 int32_t divisor = instr->divisor(); |
1143 ASSERT(dividend.is(ToRegister(instr->result()))); | 1143 DCHECK(dividend.is(ToRegister(instr->result()))); |
1144 | 1144 |
1145 // Theoretically, a variation of the branch-free code for integer division by | 1145 // Theoretically, a variation of the branch-free code for integer division by |
1146 // a power of 2 (calculating the remainder via an additional multiplication | 1146 // a power of 2 (calculating the remainder via an additional multiplication |
1147 // (which gets simplified to an 'and') and subtraction) should be faster, and | 1147 // (which gets simplified to an 'and') and subtraction) should be faster, and |
1148 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to | 1148 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to |
1149 // indicate that positive dividends are heavily favored, so the branching | 1149 // indicate that positive dividends are heavily favored, so the branching |
1150 // version performs better. | 1150 // version performs better. |
1151 HMod* hmod = instr->hydrogen(); | 1151 HMod* hmod = instr->hydrogen(); |
1152 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1152 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1153 Label dividend_is_not_negative, done; | 1153 Label dividend_is_not_negative, done; |
(...skipping 13 matching lines...) Expand all Loading... |
1167 __ bind(÷nd_is_not_negative); | 1167 __ bind(÷nd_is_not_negative); |
1168 __ and_(dividend, dividend, Operand(mask)); | 1168 __ and_(dividend, dividend, Operand(mask)); |
1169 __ bind(&done); | 1169 __ bind(&done); |
1170 } | 1170 } |
1171 | 1171 |
1172 | 1172 |
1173 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1173 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
1174 Register dividend = ToRegister(instr->dividend()); | 1174 Register dividend = ToRegister(instr->dividend()); |
1175 int32_t divisor = instr->divisor(); | 1175 int32_t divisor = instr->divisor(); |
1176 Register result = ToRegister(instr->result()); | 1176 Register result = ToRegister(instr->result()); |
1177 ASSERT(!dividend.is(result)); | 1177 DCHECK(!dividend.is(result)); |
1178 | 1178 |
1179 if (divisor == 0) { | 1179 if (divisor == 0) { |
1180 DeoptimizeIf(al, instr->environment()); | 1180 DeoptimizeIf(al, instr->environment()); |
1181 return; | 1181 return; |
1182 } | 1182 } |
1183 | 1183 |
1184 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1184 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1185 __ mov(ip, Operand(Abs(divisor))); | 1185 __ mov(ip, Operand(Abs(divisor))); |
1186 __ smull(result, ip, result, ip); | 1186 __ smull(result, ip, result, ip); |
1187 __ sub(result, dividend, result, SetCC); | 1187 __ sub(result, dividend, result, SetCC); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1247 DeoptimizeIf(lt, instr->environment()); | 1247 DeoptimizeIf(lt, instr->environment()); |
1248 } | 1248 } |
1249 __ bind(&done); | 1249 __ bind(&done); |
1250 | 1250 |
1251 } else { | 1251 } else { |
1252 // General case, without any SDIV support. | 1252 // General case, without any SDIV support. |
1253 Register left_reg = ToRegister(instr->left()); | 1253 Register left_reg = ToRegister(instr->left()); |
1254 Register right_reg = ToRegister(instr->right()); | 1254 Register right_reg = ToRegister(instr->right()); |
1255 Register result_reg = ToRegister(instr->result()); | 1255 Register result_reg = ToRegister(instr->result()); |
1256 Register scratch = scratch0(); | 1256 Register scratch = scratch0(); |
1257 ASSERT(!scratch.is(left_reg)); | 1257 DCHECK(!scratch.is(left_reg)); |
1258 ASSERT(!scratch.is(right_reg)); | 1258 DCHECK(!scratch.is(right_reg)); |
1259 ASSERT(!scratch.is(result_reg)); | 1259 DCHECK(!scratch.is(result_reg)); |
1260 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1260 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
1261 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1261 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
1262 ASSERT(!divisor.is(dividend)); | 1262 DCHECK(!divisor.is(dividend)); |
1263 LowDwVfpRegister quotient = double_scratch0(); | 1263 LowDwVfpRegister quotient = double_scratch0(); |
1264 ASSERT(!quotient.is(dividend)); | 1264 DCHECK(!quotient.is(dividend)); |
1265 ASSERT(!quotient.is(divisor)); | 1265 DCHECK(!quotient.is(divisor)); |
1266 | 1266 |
1267 Label done; | 1267 Label done; |
1268 // Check for x % 0, we have to deopt in this case because we can't return a | 1268 // Check for x % 0, we have to deopt in this case because we can't return a |
1269 // NaN. | 1269 // NaN. |
1270 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1270 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
1271 __ cmp(right_reg, Operand::Zero()); | 1271 __ cmp(right_reg, Operand::Zero()); |
1272 DeoptimizeIf(eq, instr->environment()); | 1272 DeoptimizeIf(eq, instr->environment()); |
1273 } | 1273 } |
1274 | 1274 |
1275 __ Move(result_reg, left_reg); | 1275 __ Move(result_reg, left_reg); |
(...skipping 27 matching lines...) Expand all Loading... |
1303 } | 1303 } |
1304 __ bind(&done); | 1304 __ bind(&done); |
1305 } | 1305 } |
1306 } | 1306 } |
1307 | 1307 |
1308 | 1308 |
1309 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1309 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
1310 Register dividend = ToRegister(instr->dividend()); | 1310 Register dividend = ToRegister(instr->dividend()); |
1311 int32_t divisor = instr->divisor(); | 1311 int32_t divisor = instr->divisor(); |
1312 Register result = ToRegister(instr->result()); | 1312 Register result = ToRegister(instr->result()); |
1313 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); | 1313 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor))); |
1314 ASSERT(!result.is(dividend)); | 1314 DCHECK(!result.is(dividend)); |
1315 | 1315 |
1316 // Check for (0 / -x) that will produce negative zero. | 1316 // Check for (0 / -x) that will produce negative zero. |
1317 HDiv* hdiv = instr->hydrogen(); | 1317 HDiv* hdiv = instr->hydrogen(); |
1318 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1318 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1319 __ cmp(dividend, Operand::Zero()); | 1319 __ cmp(dividend, Operand::Zero()); |
1320 DeoptimizeIf(eq, instr->environment()); | 1320 DeoptimizeIf(eq, instr->environment()); |
1321 } | 1321 } |
1322 // Check for (kMinInt / -1). | 1322 // Check for (kMinInt / -1). |
1323 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1323 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
1324 __ cmp(dividend, Operand(kMinInt)); | 1324 __ cmp(dividend, Operand(kMinInt)); |
(...skipping 22 matching lines...) Expand all Loading... |
1347 } | 1347 } |
1348 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1348 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
1349 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1349 if (divisor < 0) __ rsb(result, result, Operand(0)); |
1350 } | 1350 } |
1351 | 1351 |
1352 | 1352 |
1353 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1353 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
1354 Register dividend = ToRegister(instr->dividend()); | 1354 Register dividend = ToRegister(instr->dividend()); |
1355 int32_t divisor = instr->divisor(); | 1355 int32_t divisor = instr->divisor(); |
1356 Register result = ToRegister(instr->result()); | 1356 Register result = ToRegister(instr->result()); |
1357 ASSERT(!dividend.is(result)); | 1357 DCHECK(!dividend.is(result)); |
1358 | 1358 |
1359 if (divisor == 0) { | 1359 if (divisor == 0) { |
1360 DeoptimizeIf(al, instr->environment()); | 1360 DeoptimizeIf(al, instr->environment()); |
1361 return; | 1361 return; |
1362 } | 1362 } |
1363 | 1363 |
1364 // Check for (0 / -x) that will produce negative zero. | 1364 // Check for (0 / -x) that will produce negative zero. |
1365 HDiv* hdiv = instr->hydrogen(); | 1365 HDiv* hdiv = instr->hydrogen(); |
1366 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1366 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1367 __ cmp(dividend, Operand::Zero()); | 1367 __ cmp(dividend, Operand::Zero()); |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1441 } | 1441 } |
1442 } | 1442 } |
1443 | 1443 |
1444 | 1444 |
1445 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1445 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
1446 DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1446 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
1447 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1447 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
1448 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1448 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
1449 | 1449 |
1450 // This is computed in-place. | 1450 // This is computed in-place. |
1451 ASSERT(addend.is(ToDoubleRegister(instr->result()))); | 1451 DCHECK(addend.is(ToDoubleRegister(instr->result()))); |
1452 | 1452 |
1453 __ vmla(addend, multiplier, multiplicand); | 1453 __ vmla(addend, multiplier, multiplicand); |
1454 } | 1454 } |
1455 | 1455 |
1456 | 1456 |
1457 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { | 1457 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { |
1458 DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); | 1458 DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); |
1459 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1459 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
1460 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1460 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
1461 | 1461 |
1462 // This is computed in-place. | 1462 // This is computed in-place. |
1463 ASSERT(minuend.is(ToDoubleRegister(instr->result()))); | 1463 DCHECK(minuend.is(ToDoubleRegister(instr->result()))); |
1464 | 1464 |
1465 __ vmls(minuend, multiplier, multiplicand); | 1465 __ vmls(minuend, multiplier, multiplicand); |
1466 } | 1466 } |
1467 | 1467 |
1468 | 1468 |
1469 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1469 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
1470 Register dividend = ToRegister(instr->dividend()); | 1470 Register dividend = ToRegister(instr->dividend()); |
1471 Register result = ToRegister(instr->result()); | 1471 Register result = ToRegister(instr->result()); |
1472 int32_t divisor = instr->divisor(); | 1472 int32_t divisor = instr->divisor(); |
1473 | 1473 |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1507 | 1507 |
1508 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1508 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
1509 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1509 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
1510 } | 1510 } |
1511 | 1511 |
1512 | 1512 |
1513 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1513 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
1514 Register dividend = ToRegister(instr->dividend()); | 1514 Register dividend = ToRegister(instr->dividend()); |
1515 int32_t divisor = instr->divisor(); | 1515 int32_t divisor = instr->divisor(); |
1516 Register result = ToRegister(instr->result()); | 1516 Register result = ToRegister(instr->result()); |
1517 ASSERT(!dividend.is(result)); | 1517 DCHECK(!dividend.is(result)); |
1518 | 1518 |
1519 if (divisor == 0) { | 1519 if (divisor == 0) { |
1520 DeoptimizeIf(al, instr->environment()); | 1520 DeoptimizeIf(al, instr->environment()); |
1521 return; | 1521 return; |
1522 } | 1522 } |
1523 | 1523 |
1524 // Check for (0 / -x) that will produce negative zero. | 1524 // Check for (0 / -x) that will produce negative zero. |
1525 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1525 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
1526 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1526 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1527 __ cmp(dividend, Operand::Zero()); | 1527 __ cmp(dividend, Operand::Zero()); |
1528 DeoptimizeIf(eq, instr->environment()); | 1528 DeoptimizeIf(eq, instr->environment()); |
1529 } | 1529 } |
1530 | 1530 |
1531 // Easy case: We need no dynamic check for the dividend and the flooring | 1531 // Easy case: We need no dynamic check for the dividend and the flooring |
1532 // division is the same as the truncating division. | 1532 // division is the same as the truncating division. |
1533 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1533 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
1534 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1534 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
1535 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1535 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1536 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1536 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
1537 return; | 1537 return; |
1538 } | 1538 } |
1539 | 1539 |
1540 // In the general case we may need to adjust before and after the truncating | 1540 // In the general case we may need to adjust before and after the truncating |
1541 // division to get a flooring division. | 1541 // division to get a flooring division. |
1542 Register temp = ToRegister(instr->temp()); | 1542 Register temp = ToRegister(instr->temp()); |
1543 ASSERT(!temp.is(dividend) && !temp.is(result)); | 1543 DCHECK(!temp.is(dividend) && !temp.is(result)); |
1544 Label needs_adjustment, done; | 1544 Label needs_adjustment, done; |
1545 __ cmp(dividend, Operand::Zero()); | 1545 __ cmp(dividend, Operand::Zero()); |
1546 __ b(divisor > 0 ? lt : gt, &needs_adjustment); | 1546 __ b(divisor > 0 ? lt : gt, &needs_adjustment); |
1547 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1547 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1548 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1548 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
1549 __ jmp(&done); | 1549 __ jmp(&done); |
1550 __ bind(&needs_adjustment); | 1550 __ bind(&needs_adjustment); |
1551 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); | 1551 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); |
1552 __ TruncatingDiv(result, temp, Abs(divisor)); | 1552 __ TruncatingDiv(result, temp, Abs(divisor)); |
1553 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1553 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1683 // Correct the sign of the result is the constant is negative. | 1683 // Correct the sign of the result is the constant is negative. |
1684 if (constant < 0) __ rsb(result, result, Operand::Zero()); | 1684 if (constant < 0) __ rsb(result, result, Operand::Zero()); |
1685 } else { | 1685 } else { |
1686 // Generate standard code. | 1686 // Generate standard code. |
1687 __ mov(ip, Operand(constant)); | 1687 __ mov(ip, Operand(constant)); |
1688 __ mul(result, left, ip); | 1688 __ mul(result, left, ip); |
1689 } | 1689 } |
1690 } | 1690 } |
1691 | 1691 |
1692 } else { | 1692 } else { |
1693 ASSERT(right_op->IsRegister()); | 1693 DCHECK(right_op->IsRegister()); |
1694 Register right = ToRegister(right_op); | 1694 Register right = ToRegister(right_op); |
1695 | 1695 |
1696 if (overflow) { | 1696 if (overflow) { |
1697 Register scratch = scratch0(); | 1697 Register scratch = scratch0(); |
1698 // scratch:result = left * right. | 1698 // scratch:result = left * right. |
1699 if (instr->hydrogen()->representation().IsSmi()) { | 1699 if (instr->hydrogen()->representation().IsSmi()) { |
1700 __ SmiUntag(result, left); | 1700 __ SmiUntag(result, left); |
1701 __ smull(result, scratch, result, right); | 1701 __ smull(result, scratch, result, right); |
1702 } else { | 1702 } else { |
1703 __ smull(result, scratch, left, right); | 1703 __ smull(result, scratch, left, right); |
(...skipping 18 matching lines...) Expand all Loading... |
1722 DeoptimizeIf(eq, instr->environment()); | 1722 DeoptimizeIf(eq, instr->environment()); |
1723 __ bind(&done); | 1723 __ bind(&done); |
1724 } | 1724 } |
1725 } | 1725 } |
1726 } | 1726 } |
1727 | 1727 |
1728 | 1728 |
1729 void LCodeGen::DoBitI(LBitI* instr) { | 1729 void LCodeGen::DoBitI(LBitI* instr) { |
1730 LOperand* left_op = instr->left(); | 1730 LOperand* left_op = instr->left(); |
1731 LOperand* right_op = instr->right(); | 1731 LOperand* right_op = instr->right(); |
1732 ASSERT(left_op->IsRegister()); | 1732 DCHECK(left_op->IsRegister()); |
1733 Register left = ToRegister(left_op); | 1733 Register left = ToRegister(left_op); |
1734 Register result = ToRegister(instr->result()); | 1734 Register result = ToRegister(instr->result()); |
1735 Operand right(no_reg); | 1735 Operand right(no_reg); |
1736 | 1736 |
1737 if (right_op->IsStackSlot()) { | 1737 if (right_op->IsStackSlot()) { |
1738 right = Operand(EmitLoadRegister(right_op, ip)); | 1738 right = Operand(EmitLoadRegister(right_op, ip)); |
1739 } else { | 1739 } else { |
1740 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); | 1740 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); |
1741 right = ToOperand(right_op); | 1741 right = ToOperand(right_op); |
1742 } | 1742 } |
1743 | 1743 |
1744 switch (instr->op()) { | 1744 switch (instr->op()) { |
1745 case Token::BIT_AND: | 1745 case Token::BIT_AND: |
1746 __ and_(result, left, right); | 1746 __ and_(result, left, right); |
1747 break; | 1747 break; |
1748 case Token::BIT_OR: | 1748 case Token::BIT_OR: |
1749 __ orr(result, left, right); | 1749 __ orr(result, left, right); |
1750 break; | 1750 break; |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1854 LOperand* left = instr->left(); | 1854 LOperand* left = instr->left(); |
1855 LOperand* right = instr->right(); | 1855 LOperand* right = instr->right(); |
1856 LOperand* result = instr->result(); | 1856 LOperand* result = instr->result(); |
1857 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1857 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1858 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1858 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
1859 | 1859 |
1860 if (right->IsStackSlot()) { | 1860 if (right->IsStackSlot()) { |
1861 Register right_reg = EmitLoadRegister(right, ip); | 1861 Register right_reg = EmitLoadRegister(right, ip); |
1862 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1862 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
1863 } else { | 1863 } else { |
1864 ASSERT(right->IsRegister() || right->IsConstantOperand()); | 1864 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1865 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1865 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
1866 } | 1866 } |
1867 | 1867 |
1868 if (can_overflow) { | 1868 if (can_overflow) { |
1869 DeoptimizeIf(vs, instr->environment()); | 1869 DeoptimizeIf(vs, instr->environment()); |
1870 } | 1870 } |
1871 } | 1871 } |
1872 | 1872 |
1873 | 1873 |
1874 void LCodeGen::DoRSubI(LRSubI* instr) { | 1874 void LCodeGen::DoRSubI(LRSubI* instr) { |
1875 LOperand* left = instr->left(); | 1875 LOperand* left = instr->left(); |
1876 LOperand* right = instr->right(); | 1876 LOperand* right = instr->right(); |
1877 LOperand* result = instr->result(); | 1877 LOperand* result = instr->result(); |
1878 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1878 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1879 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1879 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
1880 | 1880 |
1881 if (right->IsStackSlot()) { | 1881 if (right->IsStackSlot()) { |
1882 Register right_reg = EmitLoadRegister(right, ip); | 1882 Register right_reg = EmitLoadRegister(right, ip); |
1883 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1883 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
1884 } else { | 1884 } else { |
1885 ASSERT(right->IsRegister() || right->IsConstantOperand()); | 1885 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1886 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1886 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
1887 } | 1887 } |
1888 | 1888 |
1889 if (can_overflow) { | 1889 if (can_overflow) { |
1890 DeoptimizeIf(vs, instr->environment()); | 1890 DeoptimizeIf(vs, instr->environment()); |
1891 } | 1891 } |
1892 } | 1892 } |
1893 | 1893 |
1894 | 1894 |
1895 void LCodeGen::DoConstantI(LConstantI* instr) { | 1895 void LCodeGen::DoConstantI(LConstantI* instr) { |
1896 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1896 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
1897 } | 1897 } |
1898 | 1898 |
1899 | 1899 |
1900 void LCodeGen::DoConstantS(LConstantS* instr) { | 1900 void LCodeGen::DoConstantS(LConstantS* instr) { |
1901 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1901 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
1902 } | 1902 } |
1903 | 1903 |
1904 | 1904 |
1905 void LCodeGen::DoConstantD(LConstantD* instr) { | 1905 void LCodeGen::DoConstantD(LConstantD* instr) { |
1906 ASSERT(instr->result()->IsDoubleRegister()); | 1906 DCHECK(instr->result()->IsDoubleRegister()); |
1907 DwVfpRegister result = ToDoubleRegister(instr->result()); | 1907 DwVfpRegister result = ToDoubleRegister(instr->result()); |
1908 double v = instr->value(); | 1908 double v = instr->value(); |
1909 __ Vmov(result, v, scratch0()); | 1909 __ Vmov(result, v, scratch0()); |
1910 } | 1910 } |
1911 | 1911 |
1912 | 1912 |
1913 void LCodeGen::DoConstantE(LConstantE* instr) { | 1913 void LCodeGen::DoConstantE(LConstantE* instr) { |
1914 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1914 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
1915 } | 1915 } |
1916 | 1916 |
(...skipping 11 matching lines...) Expand all Loading... |
1928 __ EnumLength(result, map); | 1928 __ EnumLength(result, map); |
1929 } | 1929 } |
1930 | 1930 |
1931 | 1931 |
1932 void LCodeGen::DoDateField(LDateField* instr) { | 1932 void LCodeGen::DoDateField(LDateField* instr) { |
1933 Register object = ToRegister(instr->date()); | 1933 Register object = ToRegister(instr->date()); |
1934 Register result = ToRegister(instr->result()); | 1934 Register result = ToRegister(instr->result()); |
1935 Register scratch = ToRegister(instr->temp()); | 1935 Register scratch = ToRegister(instr->temp()); |
1936 Smi* index = instr->index(); | 1936 Smi* index = instr->index(); |
1937 Label runtime, done; | 1937 Label runtime, done; |
1938 ASSERT(object.is(result)); | 1938 DCHECK(object.is(result)); |
1939 ASSERT(object.is(r0)); | 1939 DCHECK(object.is(r0)); |
1940 ASSERT(!scratch.is(scratch0())); | 1940 DCHECK(!scratch.is(scratch0())); |
1941 ASSERT(!scratch.is(object)); | 1941 DCHECK(!scratch.is(object)); |
1942 | 1942 |
1943 __ SmiTst(object); | 1943 __ SmiTst(object); |
1944 DeoptimizeIf(eq, instr->environment()); | 1944 DeoptimizeIf(eq, instr->environment()); |
1945 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1945 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
1946 DeoptimizeIf(ne, instr->environment()); | 1946 DeoptimizeIf(ne, instr->environment()); |
1947 | 1947 |
1948 if (index->value() == 0) { | 1948 if (index->value() == 0) { |
1949 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1949 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
1950 } else { | 1950 } else { |
1951 if (index->value() < JSDate::kFirstUncachedField) { | 1951 if (index->value() < JSDate::kFirstUncachedField) { |
(...skipping 21 matching lines...) Expand all Loading... |
1973 String::Encoding encoding) { | 1973 String::Encoding encoding) { |
1974 if (index->IsConstantOperand()) { | 1974 if (index->IsConstantOperand()) { |
1975 int offset = ToInteger32(LConstantOperand::cast(index)); | 1975 int offset = ToInteger32(LConstantOperand::cast(index)); |
1976 if (encoding == String::TWO_BYTE_ENCODING) { | 1976 if (encoding == String::TWO_BYTE_ENCODING) { |
1977 offset *= kUC16Size; | 1977 offset *= kUC16Size; |
1978 } | 1978 } |
1979 STATIC_ASSERT(kCharSize == 1); | 1979 STATIC_ASSERT(kCharSize == 1); |
1980 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | 1980 return FieldMemOperand(string, SeqString::kHeaderSize + offset); |
1981 } | 1981 } |
1982 Register scratch = scratch0(); | 1982 Register scratch = scratch0(); |
1983 ASSERT(!scratch.is(string)); | 1983 DCHECK(!scratch.is(string)); |
1984 ASSERT(!scratch.is(ToRegister(index))); | 1984 DCHECK(!scratch.is(ToRegister(index))); |
1985 if (encoding == String::ONE_BYTE_ENCODING) { | 1985 if (encoding == String::ONE_BYTE_ENCODING) { |
1986 __ add(scratch, string, Operand(ToRegister(index))); | 1986 __ add(scratch, string, Operand(ToRegister(index))); |
1987 } else { | 1987 } else { |
1988 STATIC_ASSERT(kUC16Size == 2); | 1988 STATIC_ASSERT(kUC16Size == 2); |
1989 __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); | 1989 __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); |
1990 } | 1990 } |
1991 return FieldMemOperand(scratch, SeqString::kHeaderSize); | 1991 return FieldMemOperand(scratch, SeqString::kHeaderSize); |
1992 } | 1992 } |
1993 | 1993 |
1994 | 1994 |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2048 LOperand* left = instr->left(); | 2048 LOperand* left = instr->left(); |
2049 LOperand* right = instr->right(); | 2049 LOperand* right = instr->right(); |
2050 LOperand* result = instr->result(); | 2050 LOperand* result = instr->result(); |
2051 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 2051 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
2052 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 2052 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
2053 | 2053 |
2054 if (right->IsStackSlot()) { | 2054 if (right->IsStackSlot()) { |
2055 Register right_reg = EmitLoadRegister(right, ip); | 2055 Register right_reg = EmitLoadRegister(right, ip); |
2056 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 2056 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
2057 } else { | 2057 } else { |
2058 ASSERT(right->IsRegister() || right->IsConstantOperand()); | 2058 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
2059 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 2059 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
2060 } | 2060 } |
2061 | 2061 |
2062 if (can_overflow) { | 2062 if (can_overflow) { |
2063 DeoptimizeIf(vs, instr->environment()); | 2063 DeoptimizeIf(vs, instr->environment()); |
2064 } | 2064 } |
2065 } | 2065 } |
2066 | 2066 |
2067 | 2067 |
2068 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 2068 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
2069 LOperand* left = instr->left(); | 2069 LOperand* left = instr->left(); |
2070 LOperand* right = instr->right(); | 2070 LOperand* right = instr->right(); |
2071 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 2071 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
2072 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 2072 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
2073 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 2073 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
2074 Register left_reg = ToRegister(left); | 2074 Register left_reg = ToRegister(left); |
2075 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) | 2075 Operand right_op = (right->IsRegister() || right->IsConstantOperand()) |
2076 ? ToOperand(right) | 2076 ? ToOperand(right) |
2077 : Operand(EmitLoadRegister(right, ip)); | 2077 : Operand(EmitLoadRegister(right, ip)); |
2078 Register result_reg = ToRegister(instr->result()); | 2078 Register result_reg = ToRegister(instr->result()); |
2079 __ cmp(left_reg, right_op); | 2079 __ cmp(left_reg, right_op); |
2080 __ Move(result_reg, left_reg, condition); | 2080 __ Move(result_reg, left_reg, condition); |
2081 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); | 2081 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); |
2082 } else { | 2082 } else { |
2083 ASSERT(instr->hydrogen()->representation().IsDouble()); | 2083 DCHECK(instr->hydrogen()->representation().IsDouble()); |
2084 DwVfpRegister left_reg = ToDoubleRegister(left); | 2084 DwVfpRegister left_reg = ToDoubleRegister(left); |
2085 DwVfpRegister right_reg = ToDoubleRegister(right); | 2085 DwVfpRegister right_reg = ToDoubleRegister(right); |
2086 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); | 2086 DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |
2087 Label result_is_nan, return_left, return_right, check_zero, done; | 2087 Label result_is_nan, return_left, return_right, check_zero, done; |
2088 __ VFPCompareAndSetFlags(left_reg, right_reg); | 2088 __ VFPCompareAndSetFlags(left_reg, right_reg); |
2089 if (operation == HMathMinMax::kMathMin) { | 2089 if (operation == HMathMinMax::kMathMin) { |
2090 __ b(mi, &return_left); | 2090 __ b(mi, &return_left); |
2091 __ b(gt, &return_right); | 2091 __ b(gt, &return_right); |
2092 } else { | 2092 } else { |
2093 __ b(mi, &return_right); | 2093 __ b(mi, &return_right); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2160 break; | 2160 break; |
2161 } | 2161 } |
2162 default: | 2162 default: |
2163 UNREACHABLE(); | 2163 UNREACHABLE(); |
2164 break; | 2164 break; |
2165 } | 2165 } |
2166 } | 2166 } |
2167 | 2167 |
2168 | 2168 |
2169 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2169 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
2170 ASSERT(ToRegister(instr->context()).is(cp)); | 2170 DCHECK(ToRegister(instr->context()).is(cp)); |
2171 ASSERT(ToRegister(instr->left()).is(r1)); | 2171 DCHECK(ToRegister(instr->left()).is(r1)); |
2172 ASSERT(ToRegister(instr->right()).is(r0)); | 2172 DCHECK(ToRegister(instr->right()).is(r0)); |
2173 ASSERT(ToRegister(instr->result()).is(r0)); | 2173 DCHECK(ToRegister(instr->result()).is(r0)); |
2174 | 2174 |
2175 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); | 2175 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); |
2176 // Block literal pool emission to ensure nop indicating no inlined smi code | 2176 // Block literal pool emission to ensure nop indicating no inlined smi code |
2177 // is in the correct position. | 2177 // is in the correct position. |
2178 Assembler::BlockConstPoolScope block_const_pool(masm()); | 2178 Assembler::BlockConstPoolScope block_const_pool(masm()); |
2179 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2179 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
2180 } | 2180 } |
2181 | 2181 |
2182 | 2182 |
2183 template<class InstrType> | 2183 template<class InstrType> |
(...skipping 24 matching lines...) Expand all Loading... |
2208 | 2208 |
2209 | 2209 |
2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { | 2210 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
2211 __ stop("LBreak"); | 2211 __ stop("LBreak"); |
2212 } | 2212 } |
2213 | 2213 |
2214 | 2214 |
2215 void LCodeGen::DoBranch(LBranch* instr) { | 2215 void LCodeGen::DoBranch(LBranch* instr) { |
2216 Representation r = instr->hydrogen()->value()->representation(); | 2216 Representation r = instr->hydrogen()->value()->representation(); |
2217 if (r.IsInteger32() || r.IsSmi()) { | 2217 if (r.IsInteger32() || r.IsSmi()) { |
2218 ASSERT(!info()->IsStub()); | 2218 DCHECK(!info()->IsStub()); |
2219 Register reg = ToRegister(instr->value()); | 2219 Register reg = ToRegister(instr->value()); |
2220 __ cmp(reg, Operand::Zero()); | 2220 __ cmp(reg, Operand::Zero()); |
2221 EmitBranch(instr, ne); | 2221 EmitBranch(instr, ne); |
2222 } else if (r.IsDouble()) { | 2222 } else if (r.IsDouble()) { |
2223 ASSERT(!info()->IsStub()); | 2223 DCHECK(!info()->IsStub()); |
2224 DwVfpRegister reg = ToDoubleRegister(instr->value()); | 2224 DwVfpRegister reg = ToDoubleRegister(instr->value()); |
2225 // Test the double value. Zero and NaN are false. | 2225 // Test the double value. Zero and NaN are false. |
2226 __ VFPCompareAndSetFlags(reg, 0.0); | 2226 __ VFPCompareAndSetFlags(reg, 0.0); |
2227 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) | 2227 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) |
2228 EmitBranch(instr, ne); | 2228 EmitBranch(instr, ne); |
2229 } else { | 2229 } else { |
2230 ASSERT(r.IsTagged()); | 2230 DCHECK(r.IsTagged()); |
2231 Register reg = ToRegister(instr->value()); | 2231 Register reg = ToRegister(instr->value()); |
2232 HType type = instr->hydrogen()->value()->type(); | 2232 HType type = instr->hydrogen()->value()->type(); |
2233 if (type.IsBoolean()) { | 2233 if (type.IsBoolean()) { |
2234 ASSERT(!info()->IsStub()); | 2234 DCHECK(!info()->IsStub()); |
2235 __ CompareRoot(reg, Heap::kTrueValueRootIndex); | 2235 __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
2236 EmitBranch(instr, eq); | 2236 EmitBranch(instr, eq); |
2237 } else if (type.IsSmi()) { | 2237 } else if (type.IsSmi()) { |
2238 ASSERT(!info()->IsStub()); | 2238 DCHECK(!info()->IsStub()); |
2239 __ cmp(reg, Operand::Zero()); | 2239 __ cmp(reg, Operand::Zero()); |
2240 EmitBranch(instr, ne); | 2240 EmitBranch(instr, ne); |
2241 } else if (type.IsJSArray()) { | 2241 } else if (type.IsJSArray()) { |
2242 ASSERT(!info()->IsStub()); | 2242 DCHECK(!info()->IsStub()); |
2243 EmitBranch(instr, al); | 2243 EmitBranch(instr, al); |
2244 } else if (type.IsHeapNumber()) { | 2244 } else if (type.IsHeapNumber()) { |
2245 ASSERT(!info()->IsStub()); | 2245 DCHECK(!info()->IsStub()); |
2246 DwVfpRegister dbl_scratch = double_scratch0(); | 2246 DwVfpRegister dbl_scratch = double_scratch0(); |
2247 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 2247 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
2248 // Test the double value. Zero and NaN are false. | 2248 // Test the double value. Zero and NaN are false. |
2249 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2249 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
2250 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) | 2250 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) |
2251 EmitBranch(instr, ne); | 2251 EmitBranch(instr, ne); |
2252 } else if (type.IsString()) { | 2252 } else if (type.IsString()) { |
2253 ASSERT(!info()->IsStub()); | 2253 DCHECK(!info()->IsStub()); |
2254 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); | 2254 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); |
2255 __ cmp(ip, Operand::Zero()); | 2255 __ cmp(ip, Operand::Zero()); |
2256 EmitBranch(instr, ne); | 2256 EmitBranch(instr, ne); |
2257 } else { | 2257 } else { |
2258 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | 2258 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
2259 // Avoid deopts in the case where we've never executed this path before. | 2259 // Avoid deopts in the case where we've never executed this path before. |
2260 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | 2260 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
2261 | 2261 |
2262 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | 2262 if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
2263 // undefined -> false. | 2263 // undefined -> false. |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2465 | 2465 |
2466 Register scratch = scratch0(); | 2466 Register scratch = scratch0(); |
2467 __ VmovHigh(scratch, input_reg); | 2467 __ VmovHigh(scratch, input_reg); |
2468 __ cmp(scratch, Operand(kHoleNanUpper32)); | 2468 __ cmp(scratch, Operand(kHoleNanUpper32)); |
2469 EmitBranch(instr, eq); | 2469 EmitBranch(instr, eq); |
2470 } | 2470 } |
2471 | 2471 |
2472 | 2472 |
2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | 2473 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
2474 Representation rep = instr->hydrogen()->value()->representation(); | 2474 Representation rep = instr->hydrogen()->value()->representation(); |
2475 ASSERT(!rep.IsInteger32()); | 2475 DCHECK(!rep.IsInteger32()); |
2476 Register scratch = ToRegister(instr->temp()); | 2476 Register scratch = ToRegister(instr->temp()); |
2477 | 2477 |
2478 if (rep.IsDouble()) { | 2478 if (rep.IsDouble()) { |
2479 DwVfpRegister value = ToDoubleRegister(instr->value()); | 2479 DwVfpRegister value = ToDoubleRegister(instr->value()); |
2480 __ VFPCompareAndSetFlags(value, 0.0); | 2480 __ VFPCompareAndSetFlags(value, 0.0); |
2481 EmitFalseBranch(instr, ne); | 2481 EmitFalseBranch(instr, ne); |
2482 __ VmovHigh(scratch, value); | 2482 __ VmovHigh(scratch, value); |
2483 __ cmp(scratch, Operand(0x80000000)); | 2483 __ cmp(scratch, Operand(0x80000000)); |
2484 } else { | 2484 } else { |
2485 Register value = ToRegister(instr->value()); | 2485 Register value = ToRegister(instr->value()); |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2598 case Token::GTE: | 2598 case Token::GTE: |
2599 return ge; | 2599 return ge; |
2600 default: | 2600 default: |
2601 UNREACHABLE(); | 2601 UNREACHABLE(); |
2602 return kNoCondition; | 2602 return kNoCondition; |
2603 } | 2603 } |
2604 } | 2604 } |
2605 | 2605 |
2606 | 2606 |
2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | 2607 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
2608 ASSERT(ToRegister(instr->context()).is(cp)); | 2608 DCHECK(ToRegister(instr->context()).is(cp)); |
2609 Token::Value op = instr->op(); | 2609 Token::Value op = instr->op(); |
2610 | 2610 |
2611 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | 2611 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2612 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2612 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2613 // This instruction also signals no smi code inlined. | 2613 // This instruction also signals no smi code inlined. |
2614 __ cmp(r0, Operand::Zero()); | 2614 __ cmp(r0, Operand::Zero()); |
2615 | 2615 |
2616 Condition condition = ComputeCompareCondition(op); | 2616 Condition condition = ComputeCompareCondition(op); |
2617 | 2617 |
2618 EmitBranch(instr, condition); | 2618 EmitBranch(instr, condition); |
2619 } | 2619 } |
2620 | 2620 |
2621 | 2621 |
2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | 2622 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
2623 InstanceType from = instr->from(); | 2623 InstanceType from = instr->from(); |
2624 InstanceType to = instr->to(); | 2624 InstanceType to = instr->to(); |
2625 if (from == FIRST_TYPE) return to; | 2625 if (from == FIRST_TYPE) return to; |
2626 ASSERT(from == to || to == LAST_TYPE); | 2626 DCHECK(from == to || to == LAST_TYPE); |
2627 return from; | 2627 return from; |
2628 } | 2628 } |
2629 | 2629 |
2630 | 2630 |
2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | 2631 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
2632 InstanceType from = instr->from(); | 2632 InstanceType from = instr->from(); |
2633 InstanceType to = instr->to(); | 2633 InstanceType to = instr->to(); |
2634 if (from == to) return eq; | 2634 if (from == to) return eq; |
2635 if (to == LAST_TYPE) return hs; | 2635 if (to == LAST_TYPE) return hs; |
2636 if (from == FIRST_TYPE) return ls; | 2636 if (from == FIRST_TYPE) return ls; |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2676 | 2676 |
2677 | 2677 |
2678 // Branches to a label or falls through with the answer in flags. Trashes | 2678 // Branches to a label or falls through with the answer in flags. Trashes |
2679 // the temp registers, but not the input. | 2679 // the temp registers, but not the input. |
2680 void LCodeGen::EmitClassOfTest(Label* is_true, | 2680 void LCodeGen::EmitClassOfTest(Label* is_true, |
2681 Label* is_false, | 2681 Label* is_false, |
2682 Handle<String>class_name, | 2682 Handle<String>class_name, |
2683 Register input, | 2683 Register input, |
2684 Register temp, | 2684 Register temp, |
2685 Register temp2) { | 2685 Register temp2) { |
2686 ASSERT(!input.is(temp)); | 2686 DCHECK(!input.is(temp)); |
2687 ASSERT(!input.is(temp2)); | 2687 DCHECK(!input.is(temp2)); |
2688 ASSERT(!temp.is(temp2)); | 2688 DCHECK(!temp.is(temp2)); |
2689 | 2689 |
2690 __ JumpIfSmi(input, is_false); | 2690 __ JumpIfSmi(input, is_false); |
2691 | 2691 |
2692 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { | 2692 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { |
2693 // Assuming the following assertions, we can use the same compares to test | 2693 // Assuming the following assertions, we can use the same compares to test |
2694 // for both being a function type and being in the object type range. | 2694 // for both being a function type and being in the object type range. |
2695 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | 2695 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
2696 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | 2696 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
2697 FIRST_SPEC_OBJECT_TYPE + 1); | 2697 FIRST_SPEC_OBJECT_TYPE + 1); |
2698 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | 2698 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2759 Register reg = ToRegister(instr->value()); | 2759 Register reg = ToRegister(instr->value()); |
2760 Register temp = ToRegister(instr->temp()); | 2760 Register temp = ToRegister(instr->temp()); |
2761 | 2761 |
2762 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2762 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
2763 __ cmp(temp, Operand(instr->map())); | 2763 __ cmp(temp, Operand(instr->map())); |
2764 EmitBranch(instr, eq); | 2764 EmitBranch(instr, eq); |
2765 } | 2765 } |
2766 | 2766 |
2767 | 2767 |
2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | 2768 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
2769 ASSERT(ToRegister(instr->context()).is(cp)); | 2769 DCHECK(ToRegister(instr->context()).is(cp)); |
2770 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. | 2770 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. |
2771 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. | 2771 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. |
2772 | 2772 |
2773 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); | 2773 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); |
2774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2774 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
2775 | 2775 |
2776 __ cmp(r0, Operand::Zero()); | 2776 __ cmp(r0, Operand::Zero()); |
2777 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); | 2777 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); |
2778 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); | 2778 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); |
2779 } | 2779 } |
2780 | 2780 |
2781 | 2781 |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2914 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 2914 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
2915 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); | 2915 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
2916 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 2916 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
2917 // Put the result value (r0) into the result register slot and | 2917 // Put the result value (r0) into the result register slot and |
2918 // restore all registers. | 2918 // restore all registers. |
2919 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); | 2919 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); |
2920 } | 2920 } |
2921 | 2921 |
2922 | 2922 |
2923 void LCodeGen::DoCmpT(LCmpT* instr) { | 2923 void LCodeGen::DoCmpT(LCmpT* instr) { |
2924 ASSERT(ToRegister(instr->context()).is(cp)); | 2924 DCHECK(ToRegister(instr->context()).is(cp)); |
2925 Token::Value op = instr->op(); | 2925 Token::Value op = instr->op(); |
2926 | 2926 |
2927 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | 2927 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2928 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 2928 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2929 // This instruction also signals no smi code inlined. | 2929 // This instruction also signals no smi code inlined. |
2930 __ cmp(r0, Operand::Zero()); | 2930 __ cmp(r0, Operand::Zero()); |
2931 | 2931 |
2932 Condition condition = ComputeCompareCondition(op); | 2932 Condition condition = ComputeCompareCondition(op); |
2933 __ LoadRoot(ToRegister(instr->result()), | 2933 __ LoadRoot(ToRegister(instr->result()), |
2934 Heap::kTrueValueRootIndex, | 2934 Heap::kTrueValueRootIndex, |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2983 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 2983 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); |
2984 if (instr->hydrogen()->RequiresHoleCheck()) { | 2984 if (instr->hydrogen()->RequiresHoleCheck()) { |
2985 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2985 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
2986 __ cmp(result, ip); | 2986 __ cmp(result, ip); |
2987 DeoptimizeIf(eq, instr->environment()); | 2987 DeoptimizeIf(eq, instr->environment()); |
2988 } | 2988 } |
2989 } | 2989 } |
2990 | 2990 |
2991 | 2991 |
2992 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | 2992 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
2993 ASSERT(ToRegister(instr->context()).is(cp)); | 2993 DCHECK(ToRegister(instr->context()).is(cp)); |
2994 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); | 2994 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); |
2995 ASSERT(ToRegister(instr->result()).is(r0)); | 2995 DCHECK(ToRegister(instr->result()).is(r0)); |
2996 | 2996 |
2997 __ mov(LoadIC::NameRegister(), Operand(instr->name())); | 2997 __ mov(LoadIC::NameRegister(), Operand(instr->name())); |
2998 if (FLAG_vector_ics) { | 2998 if (FLAG_vector_ics) { |
2999 Register vector = ToRegister(instr->temp_vector()); | 2999 Register vector = ToRegister(instr->temp_vector()); |
3000 ASSERT(vector.is(LoadIC::VectorRegister())); | 3000 DCHECK(vector.is(LoadIC::VectorRegister())); |
3001 __ Move(vector, instr->hydrogen()->feedback_vector()); | 3001 __ Move(vector, instr->hydrogen()->feedback_vector()); |
3002 // No need to allocate this register. | 3002 // No need to allocate this register. |
3003 ASSERT(LoadIC::SlotRegister().is(r0)); | 3003 DCHECK(LoadIC::SlotRegister().is(r0)); |
3004 __ mov(LoadIC::SlotRegister(), | 3004 __ mov(LoadIC::SlotRegister(), |
3005 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 3005 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
3006 } | 3006 } |
3007 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; | 3007 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; |
3008 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); | 3008 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); |
3009 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 3009 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3010 } | 3010 } |
3011 | 3011 |
3012 | 3012 |
3013 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { | 3013 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3111 if (!access.IsInobject()) { | 3111 if (!access.IsInobject()) { |
3112 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 3112 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
3113 object = result; | 3113 object = result; |
3114 } | 3114 } |
3115 MemOperand operand = FieldMemOperand(object, offset); | 3115 MemOperand operand = FieldMemOperand(object, offset); |
3116 __ Load(result, operand, access.representation()); | 3116 __ Load(result, operand, access.representation()); |
3117 } | 3117 } |
3118 | 3118 |
3119 | 3119 |
3120 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | 3120 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
3121 ASSERT(ToRegister(instr->context()).is(cp)); | 3121 DCHECK(ToRegister(instr->context()).is(cp)); |
3122 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); | 3122 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); |
3123 ASSERT(ToRegister(instr->result()).is(r0)); | 3123 DCHECK(ToRegister(instr->result()).is(r0)); |
3124 | 3124 |
3125 // Name is always in r2. | 3125 // Name is always in r2. |
3126 __ mov(LoadIC::NameRegister(), Operand(instr->name())); | 3126 __ mov(LoadIC::NameRegister(), Operand(instr->name())); |
3127 if (FLAG_vector_ics) { | 3127 if (FLAG_vector_ics) { |
3128 Register vector = ToRegister(instr->temp_vector()); | 3128 Register vector = ToRegister(instr->temp_vector()); |
3129 ASSERT(vector.is(LoadIC::VectorRegister())); | 3129 DCHECK(vector.is(LoadIC::VectorRegister())); |
3130 __ Move(vector, instr->hydrogen()->feedback_vector()); | 3130 __ Move(vector, instr->hydrogen()->feedback_vector()); |
3131 // No need to allocate this register. | 3131 // No need to allocate this register. |
3132 ASSERT(LoadIC::SlotRegister().is(r0)); | 3132 DCHECK(LoadIC::SlotRegister().is(r0)); |
3133 __ mov(LoadIC::SlotRegister(), | 3133 __ mov(LoadIC::SlotRegister(), |
3134 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 3134 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
3135 } | 3135 } |
3136 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); | 3136 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); |
3137 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); | 3137 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
3138 } | 3138 } |
3139 | 3139 |
3140 | 3140 |
3141 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3141 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
3142 Register scratch = scratch0(); | 3142 Register scratch = scratch0(); |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3392 int shift_size, | 3392 int shift_size, |
3393 int base_offset) { | 3393 int base_offset) { |
3394 if (key_is_constant) { | 3394 if (key_is_constant) { |
3395 return MemOperand(base, (constant_key << element_size) + base_offset); | 3395 return MemOperand(base, (constant_key << element_size) + base_offset); |
3396 } | 3396 } |
3397 | 3397 |
3398 if (base_offset == 0) { | 3398 if (base_offset == 0) { |
3399 if (shift_size >= 0) { | 3399 if (shift_size >= 0) { |
3400 return MemOperand(base, key, LSL, shift_size); | 3400 return MemOperand(base, key, LSL, shift_size); |
3401 } else { | 3401 } else { |
3402 ASSERT_EQ(-1, shift_size); | 3402 DCHECK_EQ(-1, shift_size); |
3403 return MemOperand(base, key, LSR, 1); | 3403 return MemOperand(base, key, LSR, 1); |
3404 } | 3404 } |
3405 } | 3405 } |
3406 | 3406 |
3407 if (shift_size >= 0) { | 3407 if (shift_size >= 0) { |
3408 __ add(scratch0(), base, Operand(key, LSL, shift_size)); | 3408 __ add(scratch0(), base, Operand(key, LSL, shift_size)); |
3409 return MemOperand(scratch0(), base_offset); | 3409 return MemOperand(scratch0(), base_offset); |
3410 } else { | 3410 } else { |
3411 ASSERT_EQ(-1, shift_size); | 3411 DCHECK_EQ(-1, shift_size); |
3412 __ add(scratch0(), base, Operand(key, ASR, 1)); | 3412 __ add(scratch0(), base, Operand(key, ASR, 1)); |
3413 return MemOperand(scratch0(), base_offset); | 3413 return MemOperand(scratch0(), base_offset); |
3414 } | 3414 } |
3415 } | 3415 } |
3416 | 3416 |
3417 | 3417 |
3418 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | 3418 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
3419 ASSERT(ToRegister(instr->context()).is(cp)); | 3419 DCHECK(ToRegister(instr->context()).is(cp)); |
3420 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); | 3420 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); |
3421 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister())); | 3421 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister())); |
3422 | 3422 |
3423 if (FLAG_vector_ics) { | 3423 if (FLAG_vector_ics) { |
3424 Register vector = ToRegister(instr->temp_vector()); | 3424 Register vector = ToRegister(instr->temp_vector()); |
3425 ASSERT(vector.is(LoadIC::VectorRegister())); | 3425 DCHECK(vector.is(LoadIC::VectorRegister())); |
3426 __ Move(vector, instr->hydrogen()->feedback_vector()); | 3426 __ Move(vector, instr->hydrogen()->feedback_vector()); |
3427 // No need to allocate this register. | 3427 // No need to allocate this register. |
3428 ASSERT(LoadIC::SlotRegister().is(r0)); | 3428 DCHECK(LoadIC::SlotRegister().is(r0)); |
3429 __ mov(LoadIC::SlotRegister(), | 3429 __ mov(LoadIC::SlotRegister(), |
3430 Operand(Smi::FromInt(instr->hydrogen()->slot()))); | 3430 Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
3431 } | 3431 } |
3432 | 3432 |
3433 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); | 3433 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
3434 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); | 3434 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
3435 } | 3435 } |
3436 | 3436 |
3437 | 3437 |
3438 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | 3438 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3537 } | 3537 } |
3538 } | 3538 } |
3539 | 3539 |
3540 | 3540 |
3541 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 3541 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
3542 Register receiver = ToRegister(instr->receiver()); | 3542 Register receiver = ToRegister(instr->receiver()); |
3543 Register function = ToRegister(instr->function()); | 3543 Register function = ToRegister(instr->function()); |
3544 Register length = ToRegister(instr->length()); | 3544 Register length = ToRegister(instr->length()); |
3545 Register elements = ToRegister(instr->elements()); | 3545 Register elements = ToRegister(instr->elements()); |
3546 Register scratch = scratch0(); | 3546 Register scratch = scratch0(); |
3547 ASSERT(receiver.is(r0)); // Used for parameter count. | 3547 DCHECK(receiver.is(r0)); // Used for parameter count. |
3548 ASSERT(function.is(r1)); // Required by InvokeFunction. | 3548 DCHECK(function.is(r1)); // Required by InvokeFunction. |
3549 ASSERT(ToRegister(instr->result()).is(r0)); | 3549 DCHECK(ToRegister(instr->result()).is(r0)); |
3550 | 3550 |
3551 // Copy the arguments to this function possibly from the | 3551 // Copy the arguments to this function possibly from the |
3552 // adaptor frame below it. | 3552 // adaptor frame below it. |
3553 const uint32_t kArgumentsLimit = 1 * KB; | 3553 const uint32_t kArgumentsLimit = 1 * KB; |
3554 __ cmp(length, Operand(kArgumentsLimit)); | 3554 __ cmp(length, Operand(kArgumentsLimit)); |
3555 DeoptimizeIf(hi, instr->environment()); | 3555 DeoptimizeIf(hi, instr->environment()); |
3556 | 3556 |
3557 // Push the receiver and use the register to keep the original | 3557 // Push the receiver and use the register to keep the original |
3558 // number of arguments. | 3558 // number of arguments. |
3559 __ push(receiver); | 3559 __ push(receiver); |
3560 __ mov(receiver, length); | 3560 __ mov(receiver, length); |
3561 // The arguments are at a one pointer size offset from elements. | 3561 // The arguments are at a one pointer size offset from elements. |
3562 __ add(elements, elements, Operand(1 * kPointerSize)); | 3562 __ add(elements, elements, Operand(1 * kPointerSize)); |
3563 | 3563 |
3564 // Loop through the arguments pushing them onto the execution | 3564 // Loop through the arguments pushing them onto the execution |
3565 // stack. | 3565 // stack. |
3566 Label invoke, loop; | 3566 Label invoke, loop; |
3567 // length is a small non-negative integer, due to the test above. | 3567 // length is a small non-negative integer, due to the test above. |
3568 __ cmp(length, Operand::Zero()); | 3568 __ cmp(length, Operand::Zero()); |
3569 __ b(eq, &invoke); | 3569 __ b(eq, &invoke); |
3570 __ bind(&loop); | 3570 __ bind(&loop); |
3571 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); | 3571 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); |
3572 __ push(scratch); | 3572 __ push(scratch); |
3573 __ sub(length, length, Operand(1), SetCC); | 3573 __ sub(length, length, Operand(1), SetCC); |
3574 __ b(ne, &loop); | 3574 __ b(ne, &loop); |
3575 | 3575 |
3576 __ bind(&invoke); | 3576 __ bind(&invoke); |
3577 ASSERT(instr->HasPointerMap()); | 3577 DCHECK(instr->HasPointerMap()); |
3578 LPointerMap* pointers = instr->pointer_map(); | 3578 LPointerMap* pointers = instr->pointer_map(); |
3579 SafepointGenerator safepoint_generator( | 3579 SafepointGenerator safepoint_generator( |
3580 this, pointers, Safepoint::kLazyDeopt); | 3580 this, pointers, Safepoint::kLazyDeopt); |
3581 // The number of arguments is stored in receiver which is r0, as expected | 3581 // The number of arguments is stored in receiver which is r0, as expected |
3582 // by InvokeFunction. | 3582 // by InvokeFunction. |
3583 ParameterCount actual(receiver); | 3583 ParameterCount actual(receiver); |
3584 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); | 3584 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); |
3585 } | 3585 } |
3586 | 3586 |
3587 | 3587 |
(...skipping 19 matching lines...) Expand all Loading... |
3607 } | 3607 } |
3608 | 3608 |
3609 | 3609 |
3610 void LCodeGen::DoContext(LContext* instr) { | 3610 void LCodeGen::DoContext(LContext* instr) { |
3611 // If there is a non-return use, the context must be moved to a register. | 3611 // If there is a non-return use, the context must be moved to a register. |
3612 Register result = ToRegister(instr->result()); | 3612 Register result = ToRegister(instr->result()); |
3613 if (info()->IsOptimizing()) { | 3613 if (info()->IsOptimizing()) { |
3614 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3614 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
3615 } else { | 3615 } else { |
3616 // If there is no frame, the context must be in cp. | 3616 // If there is no frame, the context must be in cp. |
3617 ASSERT(result.is(cp)); | 3617 DCHECK(result.is(cp)); |
3618 } | 3618 } |
3619 } | 3619 } |
3620 | 3620 |
3621 | 3621 |
3622 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { | 3622 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { |
3623 ASSERT(ToRegister(instr->context()).is(cp)); | 3623 DCHECK(ToRegister(instr->context()).is(cp)); |
3624 __ push(cp); // The context is the first argument. | 3624 __ push(cp); // The context is the first argument. |
3625 __ Move(scratch0(), instr->hydrogen()->pairs()); | 3625 __ Move(scratch0(), instr->hydrogen()->pairs()); |
3626 __ push(scratch0()); | 3626 __ push(scratch0()); |
3627 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); | 3627 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); |
3628 __ push(scratch0()); | 3628 __ push(scratch0()); |
3629 CallRuntime(Runtime::kDeclareGlobals, 3, instr); | 3629 CallRuntime(Runtime::kDeclareGlobals, 3, instr); |
3630 } | 3630 } |
3631 | 3631 |
3632 | 3632 |
3633 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 3633 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3665 } else { | 3665 } else { |
3666 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3666 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3667 ParameterCount count(arity); | 3667 ParameterCount count(arity); |
3668 ParameterCount expected(formal_parameter_count); | 3668 ParameterCount expected(formal_parameter_count); |
3669 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); | 3669 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); |
3670 } | 3670 } |
3671 } | 3671 } |
3672 | 3672 |
3673 | 3673 |
3674 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3674 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
3675 ASSERT(instr->context() != NULL); | 3675 DCHECK(instr->context() != NULL); |
3676 ASSERT(ToRegister(instr->context()).is(cp)); | 3676 DCHECK(ToRegister(instr->context()).is(cp)); |
3677 Register input = ToRegister(instr->value()); | 3677 Register input = ToRegister(instr->value()); |
3678 Register result = ToRegister(instr->result()); | 3678 Register result = ToRegister(instr->result()); |
3679 Register scratch = scratch0(); | 3679 Register scratch = scratch0(); |
3680 | 3680 |
3681 // Deoptimize if not a heap number. | 3681 // Deoptimize if not a heap number. |
3682 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3682 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
3683 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3683 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
3684 __ cmp(scratch, Operand(ip)); | 3684 __ cmp(scratch, Operand(ip)); |
3685 DeoptimizeIf(ne, instr->environment()); | 3685 DeoptimizeIf(ne, instr->environment()); |
3686 | 3686 |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3882 __ vadd(result, input, kDoubleRegZero); | 3882 __ vadd(result, input, kDoubleRegZero); |
3883 __ vsqrt(result, result); | 3883 __ vsqrt(result, result); |
3884 __ bind(&done); | 3884 __ bind(&done); |
3885 } | 3885 } |
3886 | 3886 |
3887 | 3887 |
3888 void LCodeGen::DoPower(LPower* instr) { | 3888 void LCodeGen::DoPower(LPower* instr) { |
3889 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3889 Representation exponent_type = instr->hydrogen()->right()->representation(); |
3890 // Having marked this as a call, we can use any registers. | 3890 // Having marked this as a call, we can use any registers. |
3891 // Just make sure that the input/output registers are the expected ones. | 3891 // Just make sure that the input/output registers are the expected ones. |
3892 ASSERT(!instr->right()->IsDoubleRegister() || | 3892 DCHECK(!instr->right()->IsDoubleRegister() || |
3893 ToDoubleRegister(instr->right()).is(d1)); | 3893 ToDoubleRegister(instr->right()).is(d1)); |
3894 ASSERT(!instr->right()->IsRegister() || | 3894 DCHECK(!instr->right()->IsRegister() || |
3895 ToRegister(instr->right()).is(r2)); | 3895 ToRegister(instr->right()).is(r2)); |
3896 ASSERT(ToDoubleRegister(instr->left()).is(d0)); | 3896 DCHECK(ToDoubleRegister(instr->left()).is(d0)); |
3897 ASSERT(ToDoubleRegister(instr->result()).is(d2)); | 3897 DCHECK(ToDoubleRegister(instr->result()).is(d2)); |
3898 | 3898 |
3899 if (exponent_type.IsSmi()) { | 3899 if (exponent_type.IsSmi()) { |
3900 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3900 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3901 __ CallStub(&stub); | 3901 __ CallStub(&stub); |
3902 } else if (exponent_type.IsTagged()) { | 3902 } else if (exponent_type.IsTagged()) { |
3903 Label no_deopt; | 3903 Label no_deopt; |
3904 __ JumpIfSmi(r2, &no_deopt); | 3904 __ JumpIfSmi(r2, &no_deopt); |
3905 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset)); | 3905 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset)); |
3906 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3906 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
3907 __ cmp(r6, Operand(ip)); | 3907 __ cmp(r6, Operand(ip)); |
3908 DeoptimizeIf(ne, instr->environment()); | 3908 DeoptimizeIf(ne, instr->environment()); |
3909 __ bind(&no_deopt); | 3909 __ bind(&no_deopt); |
3910 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3910 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3911 __ CallStub(&stub); | 3911 __ CallStub(&stub); |
3912 } else if (exponent_type.IsInteger32()) { | 3912 } else if (exponent_type.IsInteger32()) { |
3913 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3913 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
3914 __ CallStub(&stub); | 3914 __ CallStub(&stub); |
3915 } else { | 3915 } else { |
3916 ASSERT(exponent_type.IsDouble()); | 3916 DCHECK(exponent_type.IsDouble()); |
3917 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3917 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
3918 __ CallStub(&stub); | 3918 __ CallStub(&stub); |
3919 } | 3919 } |
3920 } | 3920 } |
3921 | 3921 |
3922 | 3922 |
3923 void LCodeGen::DoMathExp(LMathExp* instr) { | 3923 void LCodeGen::DoMathExp(LMathExp* instr) { |
3924 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3924 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3925 DwVfpRegister result = ToDoubleRegister(instr->result()); | 3925 DwVfpRegister result = ToDoubleRegister(instr->result()); |
3926 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | 3926 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); |
(...skipping 17 matching lines...) Expand all Loading... |
3944 | 3944 |
3945 | 3945 |
3946 void LCodeGen::DoMathClz32(LMathClz32* instr) { | 3946 void LCodeGen::DoMathClz32(LMathClz32* instr) { |
3947 Register input = ToRegister(instr->value()); | 3947 Register input = ToRegister(instr->value()); |
3948 Register result = ToRegister(instr->result()); | 3948 Register result = ToRegister(instr->result()); |
3949 __ clz(result, input); | 3949 __ clz(result, input); |
3950 } | 3950 } |
3951 | 3951 |
3952 | 3952 |
3953 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 3953 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
3954 ASSERT(ToRegister(instr->context()).is(cp)); | 3954 DCHECK(ToRegister(instr->context()).is(cp)); |
3955 ASSERT(ToRegister(instr->function()).is(r1)); | 3955 DCHECK(ToRegister(instr->function()).is(r1)); |
3956 ASSERT(instr->HasPointerMap()); | 3956 DCHECK(instr->HasPointerMap()); |
3957 | 3957 |
3958 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | 3958 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
3959 if (known_function.is_null()) { | 3959 if (known_function.is_null()) { |
3960 LPointerMap* pointers = instr->pointer_map(); | 3960 LPointerMap* pointers = instr->pointer_map(); |
3961 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3961 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3962 ParameterCount count(instr->arity()); | 3962 ParameterCount count(instr->arity()); |
3963 __ InvokeFunction(r1, count, CALL_FUNCTION, generator); | 3963 __ InvokeFunction(r1, count, CALL_FUNCTION, generator); |
3964 } else { | 3964 } else { |
3965 CallKnownFunction(known_function, | 3965 CallKnownFunction(known_function, |
3966 instr->hydrogen()->formal_parameter_count(), | 3966 instr->hydrogen()->formal_parameter_count(), |
3967 instr->arity(), | 3967 instr->arity(), |
3968 instr, | 3968 instr, |
3969 R1_CONTAINS_TARGET); | 3969 R1_CONTAINS_TARGET); |
3970 } | 3970 } |
3971 } | 3971 } |
3972 | 3972 |
3973 | 3973 |
3974 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { | 3974 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
3975 ASSERT(ToRegister(instr->result()).is(r0)); | 3975 DCHECK(ToRegister(instr->result()).is(r0)); |
3976 | 3976 |
3977 LPointerMap* pointers = instr->pointer_map(); | 3977 LPointerMap* pointers = instr->pointer_map(); |
3978 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3978 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
3979 | 3979 |
3980 if (instr->target()->IsConstantOperand()) { | 3980 if (instr->target()->IsConstantOperand()) { |
3981 LConstantOperand* target = LConstantOperand::cast(instr->target()); | 3981 LConstantOperand* target = LConstantOperand::cast(instr->target()); |
3982 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | 3982 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); |
3983 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); | 3983 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); |
3984 PlatformInterfaceDescriptor* call_descriptor = | 3984 PlatformInterfaceDescriptor* call_descriptor = |
3985 instr->descriptor()->platform_specific_descriptor(); | 3985 instr->descriptor()->platform_specific_descriptor(); |
3986 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al, | 3986 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al, |
3987 call_descriptor->storage_mode()); | 3987 call_descriptor->storage_mode()); |
3988 } else { | 3988 } else { |
3989 ASSERT(instr->target()->IsRegister()); | 3989 DCHECK(instr->target()->IsRegister()); |
3990 Register target = ToRegister(instr->target()); | 3990 Register target = ToRegister(instr->target()); |
3991 generator.BeforeCall(__ CallSize(target)); | 3991 generator.BeforeCall(__ CallSize(target)); |
3992 // Make sure we don't emit any additional entries in the constant pool | 3992 // Make sure we don't emit any additional entries in the constant pool |
3993 // before the call to ensure that the CallCodeSize() calculated the correct | 3993 // before the call to ensure that the CallCodeSize() calculated the correct |
3994 // number of instructions for the constant pool load. | 3994 // number of instructions for the constant pool load. |
3995 { | 3995 { |
3996 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); | 3996 ConstantPoolUnavailableScope constant_pool_unavailable(masm_); |
3997 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3997 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
3998 } | 3998 } |
3999 __ Call(target); | 3999 __ Call(target); |
4000 } | 4000 } |
4001 generator.AfterCall(); | 4001 generator.AfterCall(); |
4002 } | 4002 } |
4003 | 4003 |
4004 | 4004 |
4005 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { | 4005 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { |
4006 ASSERT(ToRegister(instr->function()).is(r1)); | 4006 DCHECK(ToRegister(instr->function()).is(r1)); |
4007 ASSERT(ToRegister(instr->result()).is(r0)); | 4007 DCHECK(ToRegister(instr->result()).is(r0)); |
4008 | 4008 |
4009 if (instr->hydrogen()->pass_argument_count()) { | 4009 if (instr->hydrogen()->pass_argument_count()) { |
4010 __ mov(r0, Operand(instr->arity())); | 4010 __ mov(r0, Operand(instr->arity())); |
4011 } | 4011 } |
4012 | 4012 |
4013 // Change context. | 4013 // Change context. |
4014 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 4014 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
4015 | 4015 |
4016 // Load the code entry address | 4016 // Load the code entry address |
4017 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 4017 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
4018 __ Call(ip); | 4018 __ Call(ip); |
4019 | 4019 |
4020 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 4020 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
4021 } | 4021 } |
4022 | 4022 |
4023 | 4023 |
4024 void LCodeGen::DoCallFunction(LCallFunction* instr) { | 4024 void LCodeGen::DoCallFunction(LCallFunction* instr) { |
4025 ASSERT(ToRegister(instr->context()).is(cp)); | 4025 DCHECK(ToRegister(instr->context()).is(cp)); |
4026 ASSERT(ToRegister(instr->function()).is(r1)); | 4026 DCHECK(ToRegister(instr->function()).is(r1)); |
4027 ASSERT(ToRegister(instr->result()).is(r0)); | 4027 DCHECK(ToRegister(instr->result()).is(r0)); |
4028 | 4028 |
4029 int arity = instr->arity(); | 4029 int arity = instr->arity(); |
4030 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); | 4030 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); |
4031 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4031 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4032 } | 4032 } |
4033 | 4033 |
4034 | 4034 |
4035 void LCodeGen::DoCallNew(LCallNew* instr) { | 4035 void LCodeGen::DoCallNew(LCallNew* instr) { |
4036 ASSERT(ToRegister(instr->context()).is(cp)); | 4036 DCHECK(ToRegister(instr->context()).is(cp)); |
4037 ASSERT(ToRegister(instr->constructor()).is(r1)); | 4037 DCHECK(ToRegister(instr->constructor()).is(r1)); |
4038 ASSERT(ToRegister(instr->result()).is(r0)); | 4038 DCHECK(ToRegister(instr->result()).is(r0)); |
4039 | 4039 |
4040 __ mov(r0, Operand(instr->arity())); | 4040 __ mov(r0, Operand(instr->arity())); |
4041 // No cell in r2 for construct type feedback in optimized code | 4041 // No cell in r2 for construct type feedback in optimized code |
4042 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 4042 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
4043 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | 4043 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); |
4044 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | 4044 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
4045 } | 4045 } |
4046 | 4046 |
4047 | 4047 |
4048 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | 4048 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
4049 ASSERT(ToRegister(instr->context()).is(cp)); | 4049 DCHECK(ToRegister(instr->context()).is(cp)); |
4050 ASSERT(ToRegister(instr->constructor()).is(r1)); | 4050 DCHECK(ToRegister(instr->constructor()).is(r1)); |
4051 ASSERT(ToRegister(instr->result()).is(r0)); | 4051 DCHECK(ToRegister(instr->result()).is(r0)); |
4052 | 4052 |
4053 __ mov(r0, Operand(instr->arity())); | 4053 __ mov(r0, Operand(instr->arity())); |
4054 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 4054 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
4055 ElementsKind kind = instr->hydrogen()->elements_kind(); | 4055 ElementsKind kind = instr->hydrogen()->elements_kind(); |
4056 AllocationSiteOverrideMode override_mode = | 4056 AllocationSiteOverrideMode override_mode = |
4057 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | 4057 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
4058 ? DISABLE_ALLOCATION_SITES | 4058 ? DISABLE_ALLOCATION_SITES |
4059 : DONT_OVERRIDE; | 4059 : DONT_OVERRIDE; |
4060 | 4060 |
4061 if (instr->arity() == 0) { | 4061 if (instr->arity() == 0) { |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4127 | 4127 |
4128 if (access.IsExternalMemory()) { | 4128 if (access.IsExternalMemory()) { |
4129 Register value = ToRegister(instr->value()); | 4129 Register value = ToRegister(instr->value()); |
4130 MemOperand operand = MemOperand(object, offset); | 4130 MemOperand operand = MemOperand(object, offset); |
4131 __ Store(value, operand, representation); | 4131 __ Store(value, operand, representation); |
4132 return; | 4132 return; |
4133 } | 4133 } |
4134 | 4134 |
4135 __ AssertNotSmi(object); | 4135 __ AssertNotSmi(object); |
4136 | 4136 |
4137 ASSERT(!representation.IsSmi() || | 4137 DCHECK(!representation.IsSmi() || |
4138 !instr->value()->IsConstantOperand() || | 4138 !instr->value()->IsConstantOperand() || |
4139 IsSmi(LConstantOperand::cast(instr->value()))); | 4139 IsSmi(LConstantOperand::cast(instr->value()))); |
4140 if (representation.IsDouble()) { | 4140 if (representation.IsDouble()) { |
4141 ASSERT(access.IsInobject()); | 4141 DCHECK(access.IsInobject()); |
4142 ASSERT(!instr->hydrogen()->has_transition()); | 4142 DCHECK(!instr->hydrogen()->has_transition()); |
4143 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 4143 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
4144 DwVfpRegister value = ToDoubleRegister(instr->value()); | 4144 DwVfpRegister value = ToDoubleRegister(instr->value()); |
4145 __ vstr(value, FieldMemOperand(object, offset)); | 4145 __ vstr(value, FieldMemOperand(object, offset)); |
4146 return; | 4146 return; |
4147 } | 4147 } |
4148 | 4148 |
4149 if (instr->hydrogen()->has_transition()) { | 4149 if (instr->hydrogen()->has_transition()) { |
4150 Handle<Map> transition = instr->hydrogen()->transition_map(); | 4150 Handle<Map> transition = instr->hydrogen()->transition_map(); |
4151 AddDeprecationDependency(transition); | 4151 AddDeprecationDependency(transition); |
4152 __ mov(scratch, Operand(transition)); | 4152 __ mov(scratch, Operand(transition)); |
4153 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 4153 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4194 kSaveFPRegs, | 4194 kSaveFPRegs, |
4195 EMIT_REMEMBERED_SET, | 4195 EMIT_REMEMBERED_SET, |
4196 instr->hydrogen()->SmiCheckForWriteBarrier(), | 4196 instr->hydrogen()->SmiCheckForWriteBarrier(), |
4197 instr->hydrogen()->PointersToHereCheckForValue()); | 4197 instr->hydrogen()->PointersToHereCheckForValue()); |
4198 } | 4198 } |
4199 } | 4199 } |
4200 } | 4200 } |
4201 | 4201 |
4202 | 4202 |
4203 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 4203 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
4204 ASSERT(ToRegister(instr->context()).is(cp)); | 4204 DCHECK(ToRegister(instr->context()).is(cp)); |
4205 ASSERT(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); | 4205 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); |
4206 ASSERT(ToRegister(instr->value()).is(StoreIC::ValueRegister())); | 4206 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister())); |
4207 | 4207 |
4208 __ mov(StoreIC::NameRegister(), Operand(instr->name())); | 4208 __ mov(StoreIC::NameRegister(), Operand(instr->name())); |
4209 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); | 4209 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); |
4210 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); | 4210 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
4211 } | 4211 } |
4212 | 4212 |
4213 | 4213 |
4214 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { | 4214 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
4215 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; | 4215 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; |
4216 if (instr->index()->IsConstantOperand()) { | 4216 if (instr->index()->IsConstantOperand()) { |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4367 Register value = ToRegister(instr->value()); | 4367 Register value = ToRegister(instr->value()); |
4368 Register elements = ToRegister(instr->elements()); | 4368 Register elements = ToRegister(instr->elements()); |
4369 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) | 4369 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) |
4370 : no_reg; | 4370 : no_reg; |
4371 Register scratch = scratch0(); | 4371 Register scratch = scratch0(); |
4372 Register store_base = scratch; | 4372 Register store_base = scratch; |
4373 int offset = instr->base_offset(); | 4373 int offset = instr->base_offset(); |
4374 | 4374 |
4375 // Do the store. | 4375 // Do the store. |
4376 if (instr->key()->IsConstantOperand()) { | 4376 if (instr->key()->IsConstantOperand()) { |
4377 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 4377 DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
4378 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | 4378 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
4379 offset += ToInteger32(const_operand) * kPointerSize; | 4379 offset += ToInteger32(const_operand) * kPointerSize; |
4380 store_base = elements; | 4380 store_base = elements; |
4381 } else { | 4381 } else { |
4382 // Even though the HLoadKeyed instruction forces the input | 4382 // Even though the HLoadKeyed instruction forces the input |
4383 // representation for the key to be an integer, the input gets replaced | 4383 // representation for the key to be an integer, the input gets replaced |
4384 // during bound check elimination with the index argument to the bounds | 4384 // during bound check elimination with the index argument to the bounds |
4385 // check, which can be tagged, so that case must be handled here, too. | 4385 // check, which can be tagged, so that case must be handled here, too. |
4386 if (instr->hydrogen()->key()->representation().IsSmi()) { | 4386 if (instr->hydrogen()->key()->representation().IsSmi()) { |
4387 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); | 4387 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); |
(...skipping 27 matching lines...) Expand all Loading... |
4415 DoStoreKeyedExternalArray(instr); | 4415 DoStoreKeyedExternalArray(instr); |
4416 } else if (instr->hydrogen()->value()->representation().IsDouble()) { | 4416 } else if (instr->hydrogen()->value()->representation().IsDouble()) { |
4417 DoStoreKeyedFixedDoubleArray(instr); | 4417 DoStoreKeyedFixedDoubleArray(instr); |
4418 } else { | 4418 } else { |
4419 DoStoreKeyedFixedArray(instr); | 4419 DoStoreKeyedFixedArray(instr); |
4420 } | 4420 } |
4421 } | 4421 } |
4422 | 4422 |
4423 | 4423 |
4424 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | 4424 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
4425 ASSERT(ToRegister(instr->context()).is(cp)); | 4425 DCHECK(ToRegister(instr->context()).is(cp)); |
4426 ASSERT(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); | 4426 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); |
4427 ASSERT(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); | 4427 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); |
4428 ASSERT(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); | 4428 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); |
4429 | 4429 |
4430 Handle<Code> ic = instr->strict_mode() == STRICT | 4430 Handle<Code> ic = instr->strict_mode() == STRICT |
4431 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 4431 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
4432 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 4432 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
4433 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); | 4433 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
4434 } | 4434 } |
4435 | 4435 |
4436 | 4436 |
4437 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4437 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
4438 Register object_reg = ToRegister(instr->object()); | 4438 Register object_reg = ToRegister(instr->object()); |
(...skipping 13 matching lines...) Expand all Loading... |
4452 Register new_map_reg = ToRegister(instr->new_map_temp()); | 4452 Register new_map_reg = ToRegister(instr->new_map_temp()); |
4453 __ mov(new_map_reg, Operand(to_map)); | 4453 __ mov(new_map_reg, Operand(to_map)); |
4454 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); | 4454 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
4455 // Write barrier. | 4455 // Write barrier. |
4456 __ RecordWriteForMap(object_reg, | 4456 __ RecordWriteForMap(object_reg, |
4457 new_map_reg, | 4457 new_map_reg, |
4458 scratch, | 4458 scratch, |
4459 GetLinkRegisterState(), | 4459 GetLinkRegisterState(), |
4460 kDontSaveFPRegs); | 4460 kDontSaveFPRegs); |
4461 } else { | 4461 } else { |
4462 ASSERT(ToRegister(instr->context()).is(cp)); | 4462 DCHECK(ToRegister(instr->context()).is(cp)); |
4463 ASSERT(object_reg.is(r0)); | 4463 DCHECK(object_reg.is(r0)); |
4464 PushSafepointRegistersScope scope(this); | 4464 PushSafepointRegistersScope scope(this); |
4465 __ Move(r1, to_map); | 4465 __ Move(r1, to_map); |
4466 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | 4466 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; |
4467 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | 4467 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); |
4468 __ CallStub(&stub); | 4468 __ CallStub(&stub); |
4469 RecordSafepointWithRegisters( | 4469 RecordSafepointWithRegisters( |
4470 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | 4470 instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
4471 } | 4471 } |
4472 __ bind(¬_applicable); | 4472 __ bind(¬_applicable); |
4473 } | 4473 } |
4474 | 4474 |
4475 | 4475 |
4476 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4476 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
4477 Register object = ToRegister(instr->object()); | 4477 Register object = ToRegister(instr->object()); |
4478 Register temp = ToRegister(instr->temp()); | 4478 Register temp = ToRegister(instr->temp()); |
4479 Label no_memento_found; | 4479 Label no_memento_found; |
4480 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4480 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
4481 DeoptimizeIf(eq, instr->environment()); | 4481 DeoptimizeIf(eq, instr->environment()); |
4482 __ bind(&no_memento_found); | 4482 __ bind(&no_memento_found); |
4483 } | 4483 } |
4484 | 4484 |
4485 | 4485 |
4486 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4486 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4487 ASSERT(ToRegister(instr->context()).is(cp)); | 4487 DCHECK(ToRegister(instr->context()).is(cp)); |
4488 ASSERT(ToRegister(instr->left()).is(r1)); | 4488 DCHECK(ToRegister(instr->left()).is(r1)); |
4489 ASSERT(ToRegister(instr->right()).is(r0)); | 4489 DCHECK(ToRegister(instr->right()).is(r0)); |
4490 StringAddStub stub(isolate(), | 4490 StringAddStub stub(isolate(), |
4491 instr->hydrogen()->flags(), | 4491 instr->hydrogen()->flags(), |
4492 instr->hydrogen()->pretenure_flag()); | 4492 instr->hydrogen()->pretenure_flag()); |
4493 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4493 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4494 } | 4494 } |
4495 | 4495 |
4496 | 4496 |
4497 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | 4497 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
4498 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { | 4498 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4499 public: | 4499 public: |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4559 codegen()->DoDeferredStringCharFromCode(instr_); | 4559 codegen()->DoDeferredStringCharFromCode(instr_); |
4560 } | 4560 } |
4561 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4561 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4562 private: | 4562 private: |
4563 LStringCharFromCode* instr_; | 4563 LStringCharFromCode* instr_; |
4564 }; | 4564 }; |
4565 | 4565 |
4566 DeferredStringCharFromCode* deferred = | 4566 DeferredStringCharFromCode* deferred = |
4567 new(zone()) DeferredStringCharFromCode(this, instr); | 4567 new(zone()) DeferredStringCharFromCode(this, instr); |
4568 | 4568 |
4569 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | 4569 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); |
4570 Register char_code = ToRegister(instr->char_code()); | 4570 Register char_code = ToRegister(instr->char_code()); |
4571 Register result = ToRegister(instr->result()); | 4571 Register result = ToRegister(instr->result()); |
4572 ASSERT(!char_code.is(result)); | 4572 DCHECK(!char_code.is(result)); |
4573 | 4573 |
4574 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); | 4574 __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); |
4575 __ b(hi, deferred->entry()); | 4575 __ b(hi, deferred->entry()); |
4576 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | 4576 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
4577 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); | 4577 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); |
4578 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | 4578 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
4579 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4579 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
4580 __ cmp(result, ip); | 4580 __ cmp(result, ip); |
4581 __ b(eq, deferred->entry()); | 4581 __ b(eq, deferred->entry()); |
4582 __ bind(deferred->exit()); | 4582 __ bind(deferred->exit()); |
(...skipping 12 matching lines...) Expand all Loading... |
4595 PushSafepointRegistersScope scope(this); | 4595 PushSafepointRegistersScope scope(this); |
4596 __ SmiTag(char_code); | 4596 __ SmiTag(char_code); |
4597 __ push(char_code); | 4597 __ push(char_code); |
4598 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); | 4598 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); |
4599 __ StoreToSafepointRegisterSlot(r0, result); | 4599 __ StoreToSafepointRegisterSlot(r0, result); |
4600 } | 4600 } |
4601 | 4601 |
4602 | 4602 |
4603 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4603 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4604 LOperand* input = instr->value(); | 4604 LOperand* input = instr->value(); |
4605 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4605 DCHECK(input->IsRegister() || input->IsStackSlot()); |
4606 LOperand* output = instr->result(); | 4606 LOperand* output = instr->result(); |
4607 ASSERT(output->IsDoubleRegister()); | 4607 DCHECK(output->IsDoubleRegister()); |
4608 SwVfpRegister single_scratch = double_scratch0().low(); | 4608 SwVfpRegister single_scratch = double_scratch0().low(); |
4609 if (input->IsStackSlot()) { | 4609 if (input->IsStackSlot()) { |
4610 Register scratch = scratch0(); | 4610 Register scratch = scratch0(); |
4611 __ ldr(scratch, ToMemOperand(input)); | 4611 __ ldr(scratch, ToMemOperand(input)); |
4612 __ vmov(single_scratch, scratch); | 4612 __ vmov(single_scratch, scratch); |
4613 } else { | 4613 } else { |
4614 __ vmov(single_scratch, ToRegister(input)); | 4614 __ vmov(single_scratch, ToRegister(input)); |
4615 } | 4615 } |
4616 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); | 4616 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); |
4617 } | 4617 } |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4839 | 4839 |
4840 | 4840 |
4841 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4841 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4842 DwVfpRegister result_reg, | 4842 DwVfpRegister result_reg, |
4843 bool can_convert_undefined_to_nan, | 4843 bool can_convert_undefined_to_nan, |
4844 bool deoptimize_on_minus_zero, | 4844 bool deoptimize_on_minus_zero, |
4845 LEnvironment* env, | 4845 LEnvironment* env, |
4846 NumberUntagDMode mode) { | 4846 NumberUntagDMode mode) { |
4847 Register scratch = scratch0(); | 4847 Register scratch = scratch0(); |
4848 SwVfpRegister flt_scratch = double_scratch0().low(); | 4848 SwVfpRegister flt_scratch = double_scratch0().low(); |
4849 ASSERT(!result_reg.is(double_scratch0())); | 4849 DCHECK(!result_reg.is(double_scratch0())); |
4850 Label convert, load_smi, done; | 4850 Label convert, load_smi, done; |
4851 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4851 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4852 // Smi check. | 4852 // Smi check. |
4853 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4853 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
4854 // Heap number map check. | 4854 // Heap number map check. |
4855 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4855 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4856 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4856 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
4857 __ cmp(scratch, Operand(ip)); | 4857 __ cmp(scratch, Operand(ip)); |
4858 if (can_convert_undefined_to_nan) { | 4858 if (can_convert_undefined_to_nan) { |
4859 __ b(ne, &convert); | 4859 __ b(ne, &convert); |
(...skipping 16 matching lines...) Expand all Loading... |
4876 // Convert undefined (and hole) to NaN. | 4876 // Convert undefined (and hole) to NaN. |
4877 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4877 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
4878 __ cmp(input_reg, Operand(ip)); | 4878 __ cmp(input_reg, Operand(ip)); |
4879 DeoptimizeIf(ne, env); | 4879 DeoptimizeIf(ne, env); |
4880 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4880 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4881 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4881 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
4882 __ jmp(&done); | 4882 __ jmp(&done); |
4883 } | 4883 } |
4884 } else { | 4884 } else { |
4885 __ SmiUntag(scratch, input_reg); | 4885 __ SmiUntag(scratch, input_reg); |
4886 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | 4886 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4887 } | 4887 } |
4888 // Smi to double register conversion | 4888 // Smi to double register conversion |
4889 __ bind(&load_smi); | 4889 __ bind(&load_smi); |
4890 // scratch: untagged value of input_reg | 4890 // scratch: untagged value of input_reg |
4891 __ vmov(flt_scratch, scratch); | 4891 __ vmov(flt_scratch, scratch); |
4892 __ vcvt_f64_s32(result_reg, flt_scratch); | 4892 __ vcvt_f64_s32(result_reg, flt_scratch); |
4893 __ bind(&done); | 4893 __ bind(&done); |
4894 } | 4894 } |
4895 | 4895 |
4896 | 4896 |
4897 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { | 4897 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
4898 Register input_reg = ToRegister(instr->value()); | 4898 Register input_reg = ToRegister(instr->value()); |
4899 Register scratch1 = scratch0(); | 4899 Register scratch1 = scratch0(); |
4900 Register scratch2 = ToRegister(instr->temp()); | 4900 Register scratch2 = ToRegister(instr->temp()); |
4901 LowDwVfpRegister double_scratch = double_scratch0(); | 4901 LowDwVfpRegister double_scratch = double_scratch0(); |
4902 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); | 4902 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); |
4903 | 4903 |
4904 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); | 4904 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); |
4905 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); | 4905 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); |
4906 | 4906 |
4907 Label done; | 4907 Label done; |
4908 | 4908 |
4909 // The input was optimistically untagged; revert it. | 4909 // The input was optimistically untagged; revert it. |
4910 // The carry flag is set when we reach this deferred code as we just executed | 4910 // The carry flag is set when we reach this deferred code as we just executed |
4911 // SmiUntag(heap_object, SetCC) | 4911 // SmiUntag(heap_object, SetCC) |
4912 STATIC_ASSERT(kHeapObjectTag == 1); | 4912 STATIC_ASSERT(kHeapObjectTag == 1); |
4913 __ adc(scratch2, input_reg, Operand(input_reg)); | 4913 __ adc(scratch2, input_reg, Operand(input_reg)); |
4914 | 4914 |
4915 // Heap number map check. | 4915 // Heap number map check. |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4975 : LDeferredCode(codegen), instr_(instr) { } | 4975 : LDeferredCode(codegen), instr_(instr) { } |
4976 virtual void Generate() V8_OVERRIDE { | 4976 virtual void Generate() V8_OVERRIDE { |
4977 codegen()->DoDeferredTaggedToI(instr_); | 4977 codegen()->DoDeferredTaggedToI(instr_); |
4978 } | 4978 } |
4979 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4979 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4980 private: | 4980 private: |
4981 LTaggedToI* instr_; | 4981 LTaggedToI* instr_; |
4982 }; | 4982 }; |
4983 | 4983 |
4984 LOperand* input = instr->value(); | 4984 LOperand* input = instr->value(); |
4985 ASSERT(input->IsRegister()); | 4985 DCHECK(input->IsRegister()); |
4986 ASSERT(input->Equals(instr->result())); | 4986 DCHECK(input->Equals(instr->result())); |
4987 | 4987 |
4988 Register input_reg = ToRegister(input); | 4988 Register input_reg = ToRegister(input); |
4989 | 4989 |
4990 if (instr->hydrogen()->value()->representation().IsSmi()) { | 4990 if (instr->hydrogen()->value()->representation().IsSmi()) { |
4991 __ SmiUntag(input_reg); | 4991 __ SmiUntag(input_reg); |
4992 } else { | 4992 } else { |
4993 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); | 4993 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
4994 | 4994 |
4995 // Optimistically untag the input. | 4995 // Optimistically untag the input. |
4996 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4996 // If the input is a HeapObject, SmiUntag will set the carry flag. |
4997 __ SmiUntag(input_reg, SetCC); | 4997 __ SmiUntag(input_reg, SetCC); |
4998 // Branch to deferred code if the input was tagged. | 4998 // Branch to deferred code if the input was tagged. |
4999 // The deferred code will take care of restoring the tag. | 4999 // The deferred code will take care of restoring the tag. |
5000 __ b(cs, deferred->entry()); | 5000 __ b(cs, deferred->entry()); |
5001 __ bind(deferred->exit()); | 5001 __ bind(deferred->exit()); |
5002 } | 5002 } |
5003 } | 5003 } |
5004 | 5004 |
5005 | 5005 |
5006 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 5006 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
5007 LOperand* input = instr->value(); | 5007 LOperand* input = instr->value(); |
5008 ASSERT(input->IsRegister()); | 5008 DCHECK(input->IsRegister()); |
5009 LOperand* result = instr->result(); | 5009 LOperand* result = instr->result(); |
5010 ASSERT(result->IsDoubleRegister()); | 5010 DCHECK(result->IsDoubleRegister()); |
5011 | 5011 |
5012 Register input_reg = ToRegister(input); | 5012 Register input_reg = ToRegister(input); |
5013 DwVfpRegister result_reg = ToDoubleRegister(result); | 5013 DwVfpRegister result_reg = ToDoubleRegister(result); |
5014 | 5014 |
5015 HValue* value = instr->hydrogen()->value(); | 5015 HValue* value = instr->hydrogen()->value(); |
5016 NumberUntagDMode mode = value->representation().IsSmi() | 5016 NumberUntagDMode mode = value->representation().IsSmi() |
5017 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 5017 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
5018 | 5018 |
5019 EmitNumberUntagD(input_reg, result_reg, | 5019 EmitNumberUntagD(input_reg, result_reg, |
5020 instr->hydrogen()->can_convert_undefined_to_nan(), | 5020 instr->hydrogen()->can_convert_undefined_to_nan(), |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5116 __ cmp(scratch, Operand(last)); | 5116 __ cmp(scratch, Operand(last)); |
5117 DeoptimizeIf(hi, instr->environment()); | 5117 DeoptimizeIf(hi, instr->environment()); |
5118 } | 5118 } |
5119 } | 5119 } |
5120 } else { | 5120 } else { |
5121 uint8_t mask; | 5121 uint8_t mask; |
5122 uint8_t tag; | 5122 uint8_t tag; |
5123 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5123 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
5124 | 5124 |
5125 if (IsPowerOf2(mask)) { | 5125 if (IsPowerOf2(mask)) { |
5126 ASSERT(tag == 0 || IsPowerOf2(tag)); | 5126 DCHECK(tag == 0 || IsPowerOf2(tag)); |
5127 __ tst(scratch, Operand(mask)); | 5127 __ tst(scratch, Operand(mask)); |
5128 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); | 5128 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); |
5129 } else { | 5129 } else { |
5130 __ and_(scratch, scratch, Operand(mask)); | 5130 __ and_(scratch, scratch, Operand(mask)); |
5131 __ cmp(scratch, Operand(tag)); | 5131 __ cmp(scratch, Operand(tag)); |
5132 DeoptimizeIf(ne, instr->environment()); | 5132 DeoptimizeIf(ne, instr->environment()); |
5133 } | 5133 } |
5134 } | 5134 } |
5135 } | 5135 } |
5136 | 5136 |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5189 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); | 5189 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); |
5190 for (int i = 0; i < maps->size(); ++i) { | 5190 for (int i = 0; i < maps->size(); ++i) { |
5191 AddStabilityDependency(maps->at(i).handle()); | 5191 AddStabilityDependency(maps->at(i).handle()); |
5192 } | 5192 } |
5193 return; | 5193 return; |
5194 } | 5194 } |
5195 | 5195 |
5196 Register map_reg = scratch0(); | 5196 Register map_reg = scratch0(); |
5197 | 5197 |
5198 LOperand* input = instr->value(); | 5198 LOperand* input = instr->value(); |
5199 ASSERT(input->IsRegister()); | 5199 DCHECK(input->IsRegister()); |
5200 Register reg = ToRegister(input); | 5200 Register reg = ToRegister(input); |
5201 | 5201 |
5202 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | 5202 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
5203 | 5203 |
5204 DeferredCheckMaps* deferred = NULL; | 5204 DeferredCheckMaps* deferred = NULL; |
5205 if (instr->hydrogen()->HasMigrationTarget()) { | 5205 if (instr->hydrogen()->HasMigrationTarget()) { |
5206 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); | 5206 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
5207 __ bind(deferred->check_maps()); | 5207 __ bind(deferred->check_maps()); |
5208 } | 5208 } |
5209 | 5209 |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5316 Register result = ToRegister(instr->result()); | 5316 Register result = ToRegister(instr->result()); |
5317 Register scratch = ToRegister(instr->temp1()); | 5317 Register scratch = ToRegister(instr->temp1()); |
5318 Register scratch2 = ToRegister(instr->temp2()); | 5318 Register scratch2 = ToRegister(instr->temp2()); |
5319 | 5319 |
5320 // Allocate memory for the object. | 5320 // Allocate memory for the object. |
5321 AllocationFlags flags = TAG_OBJECT; | 5321 AllocationFlags flags = TAG_OBJECT; |
5322 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | 5322 if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
5323 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | 5323 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
5324 } | 5324 } |
5325 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5325 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
5326 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | 5326 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
5327 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5327 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5328 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); | 5328 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); |
5329 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | 5329 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
5330 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5330 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5331 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); | 5331 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); |
5332 } | 5332 } |
5333 | 5333 |
5334 if (instr->size()->IsConstantOperand()) { | 5334 if (instr->size()->IsConstantOperand()) { |
5335 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | 5335 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5336 if (size <= Page::kMaxRegularHeapObjectSize) { | 5336 if (size <= Page::kMaxRegularHeapObjectSize) { |
5337 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); | 5337 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); |
5338 } else { | 5338 } else { |
5339 __ jmp(deferred->entry()); | 5339 __ jmp(deferred->entry()); |
5340 } | 5340 } |
(...skipping 26 matching lines...) Expand all Loading... |
5367 Register result = ToRegister(instr->result()); | 5367 Register result = ToRegister(instr->result()); |
5368 | 5368 |
5369 // TODO(3095996): Get rid of this. For now, we need to make the | 5369 // TODO(3095996): Get rid of this. For now, we need to make the |
5370 // result register contain a valid pointer because it is already | 5370 // result register contain a valid pointer because it is already |
5371 // contained in the register pointer map. | 5371 // contained in the register pointer map. |
5372 __ mov(result, Operand(Smi::FromInt(0))); | 5372 __ mov(result, Operand(Smi::FromInt(0))); |
5373 | 5373 |
5374 PushSafepointRegistersScope scope(this); | 5374 PushSafepointRegistersScope scope(this); |
5375 if (instr->size()->IsRegister()) { | 5375 if (instr->size()->IsRegister()) { |
5376 Register size = ToRegister(instr->size()); | 5376 Register size = ToRegister(instr->size()); |
5377 ASSERT(!size.is(result)); | 5377 DCHECK(!size.is(result)); |
5378 __ SmiTag(size); | 5378 __ SmiTag(size); |
5379 __ push(size); | 5379 __ push(size); |
5380 } else { | 5380 } else { |
5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | 5381 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5382 if (size >= 0 && size <= Smi::kMaxValue) { | 5382 if (size >= 0 && size <= Smi::kMaxValue) { |
5383 __ Push(Smi::FromInt(size)); | 5383 __ Push(Smi::FromInt(size)); |
5384 } else { | 5384 } else { |
5385 // We should never get here at runtime => abort | 5385 // We should never get here at runtime => abort |
5386 __ stop("invalid allocation size"); | 5386 __ stop("invalid allocation size"); |
5387 return; | 5387 return; |
5388 } | 5388 } |
5389 } | 5389 } |
5390 | 5390 |
5391 int flags = AllocateDoubleAlignFlag::encode( | 5391 int flags = AllocateDoubleAlignFlag::encode( |
5392 instr->hydrogen()->MustAllocateDoubleAligned()); | 5392 instr->hydrogen()->MustAllocateDoubleAligned()); |
5393 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | 5393 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
5394 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | 5394 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
5395 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5395 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5396 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); | 5396 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); |
5397 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | 5397 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
5398 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | 5398 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); |
5399 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); | 5399 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); |
5400 } else { | 5400 } else { |
5401 flags = AllocateTargetSpace::update(flags, NEW_SPACE); | 5401 flags = AllocateTargetSpace::update(flags, NEW_SPACE); |
5402 } | 5402 } |
5403 __ Push(Smi::FromInt(flags)); | 5403 __ Push(Smi::FromInt(flags)); |
5404 | 5404 |
5405 CallRuntimeFromDeferred( | 5405 CallRuntimeFromDeferred( |
5406 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); | 5406 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); |
5407 __ StoreToSafepointRegisterSlot(r0, result); | 5407 __ StoreToSafepointRegisterSlot(r0, result); |
5408 } | 5408 } |
5409 | 5409 |
5410 | 5410 |
5411 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | 5411 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
5412 ASSERT(ToRegister(instr->value()).is(r0)); | 5412 DCHECK(ToRegister(instr->value()).is(r0)); |
5413 __ push(r0); | 5413 __ push(r0); |
5414 CallRuntime(Runtime::kToFastProperties, 1, instr); | 5414 CallRuntime(Runtime::kToFastProperties, 1, instr); |
5415 } | 5415 } |
5416 | 5416 |
5417 | 5417 |
5418 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | 5418 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
5419 ASSERT(ToRegister(instr->context()).is(cp)); | 5419 DCHECK(ToRegister(instr->context()).is(cp)); |
5420 Label materialized; | 5420 Label materialized; |
5421 // Registers will be used as follows: | 5421 // Registers will be used as follows: |
5422 // r6 = literals array. | 5422 // r6 = literals array. |
5423 // r1 = regexp literal. | 5423 // r1 = regexp literal. |
5424 // r0 = regexp literal clone. | 5424 // r0 = regexp literal clone. |
5425 // r2-5 are used as temporaries. | 5425 // r2-5 are used as temporaries. |
5426 int literal_offset = | 5426 int literal_offset = |
5427 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); | 5427 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
5428 __ Move(r6, instr->hydrogen()->literals()); | 5428 __ Move(r6, instr->hydrogen()->literals()); |
5429 __ ldr(r1, FieldMemOperand(r6, literal_offset)); | 5429 __ ldr(r1, FieldMemOperand(r6, literal_offset)); |
(...skipping 23 matching lines...) Expand all Loading... |
5453 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); | 5453 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); |
5454 __ pop(r1); | 5454 __ pop(r1); |
5455 | 5455 |
5456 __ bind(&allocated); | 5456 __ bind(&allocated); |
5457 // Copy the content into the newly allocated memory. | 5457 // Copy the content into the newly allocated memory. |
5458 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); | 5458 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); |
5459 } | 5459 } |
5460 | 5460 |
5461 | 5461 |
5462 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 5462 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
5463 ASSERT(ToRegister(instr->context()).is(cp)); | 5463 DCHECK(ToRegister(instr->context()).is(cp)); |
5464 // Use the fast case closure allocation code that allocates in new | 5464 // Use the fast case closure allocation code that allocates in new |
5465 // space for nested functions that don't need literals cloning. | 5465 // space for nested functions that don't need literals cloning. |
5466 bool pretenure = instr->hydrogen()->pretenure(); | 5466 bool pretenure = instr->hydrogen()->pretenure(); |
5467 if (!pretenure && instr->hydrogen()->has_no_literals()) { | 5467 if (!pretenure && instr->hydrogen()->has_no_literals()) { |
5468 FastNewClosureStub stub(isolate(), | 5468 FastNewClosureStub stub(isolate(), |
5469 instr->hydrogen()->strict_mode(), | 5469 instr->hydrogen()->strict_mode(), |
5470 instr->hydrogen()->is_generator()); | 5470 instr->hydrogen()->is_generator()); |
5471 __ mov(r2, Operand(instr->hydrogen()->shared_info())); | 5471 __ mov(r2, Operand(instr->hydrogen()->shared_info())); |
5472 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 5472 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
5473 } else { | 5473 } else { |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5576 | 5576 |
5577 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { | 5577 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
5578 Register temp1 = ToRegister(instr->temp()); | 5578 Register temp1 = ToRegister(instr->temp()); |
5579 | 5579 |
5580 EmitIsConstructCall(temp1, scratch0()); | 5580 EmitIsConstructCall(temp1, scratch0()); |
5581 EmitBranch(instr, eq); | 5581 EmitBranch(instr, eq); |
5582 } | 5582 } |
5583 | 5583 |
5584 | 5584 |
5585 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { | 5585 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
5586 ASSERT(!temp1.is(temp2)); | 5586 DCHECK(!temp1.is(temp2)); |
5587 // Get the frame pointer for the calling frame. | 5587 // Get the frame pointer for the calling frame. |
5588 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 5588 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
5589 | 5589 |
5590 // Skip the arguments adaptor frame if it exists. | 5590 // Skip the arguments adaptor frame if it exists. |
5591 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | 5591 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
5592 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 5592 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
5593 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); | 5593 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); |
5594 | 5594 |
5595 // Check the marker in the calling frame. | 5595 // Check the marker in the calling frame. |
5596 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 5596 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
5597 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | 5597 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
5598 } | 5598 } |
5599 | 5599 |
5600 | 5600 |
5601 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 5601 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
5602 if (!info()->IsStub()) { | 5602 if (!info()->IsStub()) { |
5603 // Ensure that we have enough space after the previous lazy-bailout | 5603 // Ensure that we have enough space after the previous lazy-bailout |
5604 // instruction for patching the code here. | 5604 // instruction for patching the code here. |
5605 int current_pc = masm()->pc_offset(); | 5605 int current_pc = masm()->pc_offset(); |
5606 if (current_pc < last_lazy_deopt_pc_ + space_needed) { | 5606 if (current_pc < last_lazy_deopt_pc_ + space_needed) { |
5607 // Block literal pool emission for duration of padding. | 5607 // Block literal pool emission for duration of padding. |
5608 Assembler::BlockConstPoolScope block_const_pool(masm()); | 5608 Assembler::BlockConstPoolScope block_const_pool(masm()); |
5609 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | 5609 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
5610 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 5610 DCHECK_EQ(0, padding_size % Assembler::kInstrSize); |
5611 while (padding_size > 0) { | 5611 while (padding_size > 0) { |
5612 __ nop(); | 5612 __ nop(); |
5613 padding_size -= Assembler::kInstrSize; | 5613 padding_size -= Assembler::kInstrSize; |
5614 } | 5614 } |
5615 } | 5615 } |
5616 } | 5616 } |
5617 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5617 last_lazy_deopt_pc_ = masm()->pc_offset(); |
5618 } | 5618 } |
5619 | 5619 |
5620 | 5620 |
5621 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 5621 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
5622 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5622 last_lazy_deopt_pc_ = masm()->pc_offset(); |
5623 ASSERT(instr->HasEnvironment()); | 5623 DCHECK(instr->HasEnvironment()); |
5624 LEnvironment* env = instr->environment(); | 5624 LEnvironment* env = instr->environment(); |
5625 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5625 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5626 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5626 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5627 } | 5627 } |
5628 | 5628 |
5629 | 5629 |
5630 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 5630 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
5631 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 5631 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
5632 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 5632 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
5633 // needed return address), even though the implementation of LAZY and EAGER is | 5633 // needed return address), even though the implementation of LAZY and EAGER is |
(...skipping 17 matching lines...) Expand all Loading... |
5651 // Nothing to see here, move on! | 5651 // Nothing to see here, move on! |
5652 } | 5652 } |
5653 | 5653 |
5654 | 5654 |
5655 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | 5655 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
5656 PushSafepointRegistersScope scope(this); | 5656 PushSafepointRegistersScope scope(this); |
5657 LoadContextFromDeferred(instr->context()); | 5657 LoadContextFromDeferred(instr->context()); |
5658 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | 5658 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
5659 RecordSafepointWithLazyDeopt( | 5659 RecordSafepointWithLazyDeopt( |
5660 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 5660 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
5661 ASSERT(instr->HasEnvironment()); | 5661 DCHECK(instr->HasEnvironment()); |
5662 LEnvironment* env = instr->environment(); | 5662 LEnvironment* env = instr->environment(); |
5663 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 5663 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5664 } | 5664 } |
5665 | 5665 |
5666 | 5666 |
5667 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 5667 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
5668 class DeferredStackCheck V8_FINAL : public LDeferredCode { | 5668 class DeferredStackCheck V8_FINAL : public LDeferredCode { |
5669 public: | 5669 public: |
5670 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | 5670 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
5671 : LDeferredCode(codegen), instr_(instr) { } | 5671 : LDeferredCode(codegen), instr_(instr) { } |
5672 virtual void Generate() V8_OVERRIDE { | 5672 virtual void Generate() V8_OVERRIDE { |
5673 codegen()->DoDeferredStackCheck(instr_); | 5673 codegen()->DoDeferredStackCheck(instr_); |
5674 } | 5674 } |
5675 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 5675 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5676 private: | 5676 private: |
5677 LStackCheck* instr_; | 5677 LStackCheck* instr_; |
5678 }; | 5678 }; |
5679 | 5679 |
5680 ASSERT(instr->HasEnvironment()); | 5680 DCHECK(instr->HasEnvironment()); |
5681 LEnvironment* env = instr->environment(); | 5681 LEnvironment* env = instr->environment(); |
5682 // There is no LLazyBailout instruction for stack-checks. We have to | 5682 // There is no LLazyBailout instruction for stack-checks. We have to |
5683 // prepare for lazy deoptimization explicitly here. | 5683 // prepare for lazy deoptimization explicitly here. |
5684 if (instr->hydrogen()->is_function_entry()) { | 5684 if (instr->hydrogen()->is_function_entry()) { |
5685 // Perform stack overflow check. | 5685 // Perform stack overflow check. |
5686 Label done; | 5686 Label done; |
5687 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 5687 __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
5688 __ cmp(sp, Operand(ip)); | 5688 __ cmp(sp, Operand(ip)); |
5689 __ b(hs, &done); | 5689 __ b(hs, &done); |
5690 Handle<Code> stack_check = isolate()->builtins()->StackCheck(); | 5690 Handle<Code> stack_check = isolate()->builtins()->StackCheck(); |
5691 PredictableCodeSizeScope predictable(masm(), | 5691 PredictableCodeSizeScope predictable(masm(), |
5692 CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); | 5692 CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); |
5693 ASSERT(instr->context()->IsRegister()); | 5693 DCHECK(instr->context()->IsRegister()); |
5694 ASSERT(ToRegister(instr->context()).is(cp)); | 5694 DCHECK(ToRegister(instr->context()).is(cp)); |
5695 CallCode(stack_check, RelocInfo::CODE_TARGET, instr); | 5695 CallCode(stack_check, RelocInfo::CODE_TARGET, instr); |
5696 __ bind(&done); | 5696 __ bind(&done); |
5697 } else { | 5697 } else { |
5698 ASSERT(instr->hydrogen()->is_backwards_branch()); | 5698 DCHECK(instr->hydrogen()->is_backwards_branch()); |
5699 // Perform stack overflow check if this goto needs it before jumping. | 5699 // Perform stack overflow check if this goto needs it before jumping. |
5700 DeferredStackCheck* deferred_stack_check = | 5700 DeferredStackCheck* deferred_stack_check = |
5701 new(zone()) DeferredStackCheck(this, instr); | 5701 new(zone()) DeferredStackCheck(this, instr); |
5702 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 5702 __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
5703 __ cmp(sp, Operand(ip)); | 5703 __ cmp(sp, Operand(ip)); |
5704 __ b(lo, deferred_stack_check->entry()); | 5704 __ b(lo, deferred_stack_check->entry()); |
5705 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | 5705 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5706 __ bind(instr->done_label()); | 5706 __ bind(instr->done_label()); |
5707 deferred_stack_check->SetExit(instr->done_label()); | 5707 deferred_stack_check->SetExit(instr->done_label()); |
5708 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5708 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5709 // Don't record a deoptimization index for the safepoint here. | 5709 // Don't record a deoptimization index for the safepoint here. |
5710 // This will be done explicitly when emitting call and the safepoint in | 5710 // This will be done explicitly when emitting call and the safepoint in |
5711 // the deferred code. | 5711 // the deferred code. |
5712 } | 5712 } |
5713 } | 5713 } |
5714 | 5714 |
5715 | 5715 |
5716 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | 5716 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
5717 // This is a pseudo-instruction that ensures that the environment here is | 5717 // This is a pseudo-instruction that ensures that the environment here is |
5718 // properly registered for deoptimization and records the assembler's PC | 5718 // properly registered for deoptimization and records the assembler's PC |
5719 // offset. | 5719 // offset. |
5720 LEnvironment* environment = instr->environment(); | 5720 LEnvironment* environment = instr->environment(); |
5721 | 5721 |
5722 // If the environment were already registered, we would have no way of | 5722 // If the environment were already registered, we would have no way of |
5723 // backpatching it with the spill slot operands. | 5723 // backpatching it with the spill slot operands. |
5724 ASSERT(!environment->HasBeenRegistered()); | 5724 DCHECK(!environment->HasBeenRegistered()); |
5725 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5725 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5726 | 5726 |
5727 GenerateOsrPrologue(); | 5727 GenerateOsrPrologue(); |
5728 } | 5728 } |
5729 | 5729 |
5730 | 5730 |
5731 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5731 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
5732 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5732 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
5733 __ cmp(r0, ip); | 5733 __ cmp(r0, ip); |
5734 DeoptimizeIf(eq, instr->environment()); | 5734 DeoptimizeIf(eq, instr->environment()); |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5882 __ Push(scope_info); | 5882 __ Push(scope_info); |
5883 __ push(ToRegister(instr->function())); | 5883 __ push(ToRegister(instr->function())); |
5884 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5884 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
5885 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5885 RecordSafepoint(Safepoint::kNoLazyDeopt); |
5886 } | 5886 } |
5887 | 5887 |
5888 | 5888 |
5889 #undef __ | 5889 #undef __ |
5890 | 5890 |
5891 } } // namespace v8::internal | 5891 } } // namespace v8::internal |
OLD | NEW |