Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(70)

Side by Side Diff: src/arm64/lithium-codegen-arm64.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/lithium-codegen-arm64.h ('k') | src/arm64/lithium-gap-resolver-arm64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/arm64/lithium-codegen-arm64.h" 7 #include "src/arm64/lithium-codegen-arm64.h"
8 #include "src/arm64/lithium-gap-resolver-arm64.h" 8 #include "src/arm64/lithium-gap-resolver-arm64.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h" 10 #include "src/hydrogen-osr.h"
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after
231 : Translation::kSelfLiteralId; 231 : Translation::kSelfLiteralId;
232 232
233 switch (environment->frame_type()) { 233 switch (environment->frame_type()) {
234 case JS_FUNCTION: 234 case JS_FUNCTION:
235 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 235 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
236 break; 236 break;
237 case JS_CONSTRUCT: 237 case JS_CONSTRUCT:
238 translation->BeginConstructStubFrame(closure_id, translation_size); 238 translation->BeginConstructStubFrame(closure_id, translation_size);
239 break; 239 break;
240 case JS_GETTER: 240 case JS_GETTER:
241 ASSERT(translation_size == 1); 241 DCHECK(translation_size == 1);
242 ASSERT(height == 0); 242 DCHECK(height == 0);
243 translation->BeginGetterStubFrame(closure_id); 243 translation->BeginGetterStubFrame(closure_id);
244 break; 244 break;
245 case JS_SETTER: 245 case JS_SETTER:
246 ASSERT(translation_size == 2); 246 DCHECK(translation_size == 2);
247 ASSERT(height == 0); 247 DCHECK(height == 0);
248 translation->BeginSetterStubFrame(closure_id); 248 translation->BeginSetterStubFrame(closure_id);
249 break; 249 break;
250 case STUB: 250 case STUB:
251 translation->BeginCompiledStubFrame(); 251 translation->BeginCompiledStubFrame();
252 break; 252 break;
253 case ARGUMENTS_ADAPTOR: 253 case ARGUMENTS_ADAPTOR:
254 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 254 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
255 break; 255 break;
256 default: 256 default:
257 UNREACHABLE(); 257 UNREACHABLE();
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
379 RelocInfo::Mode mode, 379 RelocInfo::Mode mode,
380 LInstruction* instr) { 380 LInstruction* instr) {
381 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 381 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
382 } 382 }
383 383
384 384
385 void LCodeGen::CallCodeGeneric(Handle<Code> code, 385 void LCodeGen::CallCodeGeneric(Handle<Code> code,
386 RelocInfo::Mode mode, 386 RelocInfo::Mode mode,
387 LInstruction* instr, 387 LInstruction* instr,
388 SafepointMode safepoint_mode) { 388 SafepointMode safepoint_mode) {
389 ASSERT(instr != NULL); 389 DCHECK(instr != NULL);
390 390
391 Assembler::BlockPoolsScope scope(masm_); 391 Assembler::BlockPoolsScope scope(masm_);
392 __ Call(code, mode); 392 __ Call(code, mode);
393 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 393 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
394 394
395 if ((code->kind() == Code::BINARY_OP_IC) || 395 if ((code->kind() == Code::BINARY_OP_IC) ||
396 (code->kind() == Code::COMPARE_IC)) { 396 (code->kind() == Code::COMPARE_IC)) {
397 // Signal that we don't inline smi code before these stubs in the 397 // Signal that we don't inline smi code before these stubs in the
398 // optimizing code generator. 398 // optimizing code generator.
399 InlineSmiCheckInfo::EmitNotInlined(masm()); 399 InlineSmiCheckInfo::EmitNotInlined(masm());
400 } 400 }
401 } 401 }
402 402
403 403
404 void LCodeGen::DoCallFunction(LCallFunction* instr) { 404 void LCodeGen::DoCallFunction(LCallFunction* instr) {
405 ASSERT(ToRegister(instr->context()).is(cp)); 405 DCHECK(ToRegister(instr->context()).is(cp));
406 ASSERT(ToRegister(instr->function()).Is(x1)); 406 DCHECK(ToRegister(instr->function()).Is(x1));
407 ASSERT(ToRegister(instr->result()).Is(x0)); 407 DCHECK(ToRegister(instr->result()).Is(x0));
408 408
409 int arity = instr->arity(); 409 int arity = instr->arity();
410 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 410 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
411 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 411 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
412 after_push_argument_ = false; 412 after_push_argument_ = false;
413 } 413 }
414 414
415 415
416 void LCodeGen::DoCallNew(LCallNew* instr) { 416 void LCodeGen::DoCallNew(LCallNew* instr) {
417 ASSERT(ToRegister(instr->context()).is(cp)); 417 DCHECK(ToRegister(instr->context()).is(cp));
418 ASSERT(instr->IsMarkedAsCall()); 418 DCHECK(instr->IsMarkedAsCall());
419 ASSERT(ToRegister(instr->constructor()).is(x1)); 419 DCHECK(ToRegister(instr->constructor()).is(x1));
420 420
421 __ Mov(x0, instr->arity()); 421 __ Mov(x0, instr->arity());
422 // No cell in x2 for construct type feedback in optimized code. 422 // No cell in x2 for construct type feedback in optimized code.
423 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); 423 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
424 424
425 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); 425 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
426 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 426 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
427 after_push_argument_ = false; 427 after_push_argument_ = false;
428 428
429 ASSERT(ToRegister(instr->result()).is(x0)); 429 DCHECK(ToRegister(instr->result()).is(x0));
430 } 430 }
431 431
432 432
433 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 433 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
434 ASSERT(instr->IsMarkedAsCall()); 434 DCHECK(instr->IsMarkedAsCall());
435 ASSERT(ToRegister(instr->context()).is(cp)); 435 DCHECK(ToRegister(instr->context()).is(cp));
436 ASSERT(ToRegister(instr->constructor()).is(x1)); 436 DCHECK(ToRegister(instr->constructor()).is(x1));
437 437
438 __ Mov(x0, Operand(instr->arity())); 438 __ Mov(x0, Operand(instr->arity()));
439 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); 439 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
440 440
441 ElementsKind kind = instr->hydrogen()->elements_kind(); 441 ElementsKind kind = instr->hydrogen()->elements_kind();
442 AllocationSiteOverrideMode override_mode = 442 AllocationSiteOverrideMode override_mode =
443 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 443 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
444 ? DISABLE_ALLOCATION_SITES 444 ? DISABLE_ALLOCATION_SITES
445 : DONT_OVERRIDE; 445 : DONT_OVERRIDE;
446 446
(...skipping 20 matching lines...) Expand all
467 467
468 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 468 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
469 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 469 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
470 __ Bind(&done); 470 __ Bind(&done);
471 } else { 471 } else {
472 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 472 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
473 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 473 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
474 } 474 }
475 after_push_argument_ = false; 475 after_push_argument_ = false;
476 476
477 ASSERT(ToRegister(instr->result()).is(x0)); 477 DCHECK(ToRegister(instr->result()).is(x0));
478 } 478 }
479 479
480 480
481 void LCodeGen::CallRuntime(const Runtime::Function* function, 481 void LCodeGen::CallRuntime(const Runtime::Function* function,
482 int num_arguments, 482 int num_arguments,
483 LInstruction* instr, 483 LInstruction* instr,
484 SaveFPRegsMode save_doubles) { 484 SaveFPRegsMode save_doubles) {
485 ASSERT(instr != NULL); 485 DCHECK(instr != NULL);
486 486
487 __ CallRuntime(function, num_arguments, save_doubles); 487 __ CallRuntime(function, num_arguments, save_doubles);
488 488
489 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 489 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
490 } 490 }
491 491
492 492
493 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 493 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
494 if (context->IsRegister()) { 494 if (context->IsRegister()) {
495 __ Mov(cp, ToRegister(context)); 495 __ Mov(cp, ToRegister(context));
(...skipping 26 matching lines...) Expand all
522 masm()->positions_recorder()->RecordPosition(position); 522 masm()->positions_recorder()->RecordPosition(position);
523 masm()->positions_recorder()->WriteRecordedPositions(); 523 masm()->positions_recorder()->WriteRecordedPositions();
524 } 524 }
525 525
526 526
527 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, 527 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
528 SafepointMode safepoint_mode) { 528 SafepointMode safepoint_mode) {
529 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 529 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
530 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 530 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
531 } else { 531 } else {
532 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 532 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
533 RecordSafepointWithRegisters( 533 RecordSafepointWithRegisters(
534 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 534 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
535 } 535 }
536 } 536 }
537 537
538 538
539 void LCodeGen::RecordSafepoint(LPointerMap* pointers, 539 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
540 Safepoint::Kind kind, 540 Safepoint::Kind kind,
541 int arguments, 541 int arguments,
542 Safepoint::DeoptMode deopt_mode) { 542 Safepoint::DeoptMode deopt_mode) {
543 ASSERT(expected_safepoint_kind_ == kind); 543 DCHECK(expected_safepoint_kind_ == kind);
544 544
545 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 545 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
546 Safepoint safepoint = safepoints_.DefineSafepoint( 546 Safepoint safepoint = safepoints_.DefineSafepoint(
547 masm(), kind, arguments, deopt_mode); 547 masm(), kind, arguments, deopt_mode);
548 548
549 for (int i = 0; i < operands->length(); i++) { 549 for (int i = 0; i < operands->length(); i++) {
550 LOperand* pointer = operands->at(i); 550 LOperand* pointer = operands->at(i);
551 if (pointer->IsStackSlot()) { 551 if (pointer->IsStackSlot()) {
552 safepoint.DefinePointerSlot(pointer->index(), zone()); 552 safepoint.DefinePointerSlot(pointer->index(), zone());
553 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 553 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
(...skipping 21 matching lines...) Expand all
575 575
576 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 576 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
577 int arguments, 577 int arguments,
578 Safepoint::DeoptMode deopt_mode) { 578 Safepoint::DeoptMode deopt_mode) {
579 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 579 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
580 } 580 }
581 581
582 582
583 bool LCodeGen::GenerateCode() { 583 bool LCodeGen::GenerateCode() {
584 LPhase phase("Z_Code generation", chunk()); 584 LPhase phase("Z_Code generation", chunk());
585 ASSERT(is_unused()); 585 DCHECK(is_unused());
586 status_ = GENERATING; 586 status_ = GENERATING;
587 587
588 // Open a frame scope to indicate that there is a frame on the stack. The 588 // Open a frame scope to indicate that there is a frame on the stack. The
589 // NONE indicates that the scope shouldn't actually generate code to set up 589 // NONE indicates that the scope shouldn't actually generate code to set up
590 // the frame (that is done in GeneratePrologue). 590 // the frame (that is done in GeneratePrologue).
591 FrameScope frame_scope(masm_, StackFrame::NONE); 591 FrameScope frame_scope(masm_, StackFrame::NONE);
592 592
593 return GeneratePrologue() && 593 return GeneratePrologue() &&
594 GenerateBody() && 594 GenerateBody() &&
595 GenerateDeferredCode() && 595 GenerateDeferredCode() &&
596 GenerateDeoptJumpTable() && 596 GenerateDeoptJumpTable() &&
597 GenerateSafepointTable(); 597 GenerateSafepointTable();
598 } 598 }
599 599
600 600
601 void LCodeGen::SaveCallerDoubles() { 601 void LCodeGen::SaveCallerDoubles() {
602 ASSERT(info()->saves_caller_doubles()); 602 DCHECK(info()->saves_caller_doubles());
603 ASSERT(NeedsEagerFrame()); 603 DCHECK(NeedsEagerFrame());
604 Comment(";;; Save clobbered callee double registers"); 604 Comment(";;; Save clobbered callee double registers");
605 BitVector* doubles = chunk()->allocated_double_registers(); 605 BitVector* doubles = chunk()->allocated_double_registers();
606 BitVector::Iterator iterator(doubles); 606 BitVector::Iterator iterator(doubles);
607 int count = 0; 607 int count = 0;
608 while (!iterator.Done()) { 608 while (!iterator.Done()) {
609 // TODO(all): Is this supposed to save just the callee-saved doubles? It 609 // TODO(all): Is this supposed to save just the callee-saved doubles? It
610 // looks like it's saving all of them. 610 // looks like it's saving all of them.
611 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); 611 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
612 __ Poke(value, count * kDoubleSize); 612 __ Poke(value, count * kDoubleSize);
613 iterator.Advance(); 613 iterator.Advance();
614 count++; 614 count++;
615 } 615 }
616 } 616 }
617 617
618 618
619 void LCodeGen::RestoreCallerDoubles() { 619 void LCodeGen::RestoreCallerDoubles() {
620 ASSERT(info()->saves_caller_doubles()); 620 DCHECK(info()->saves_caller_doubles());
621 ASSERT(NeedsEagerFrame()); 621 DCHECK(NeedsEagerFrame());
622 Comment(";;; Restore clobbered callee double registers"); 622 Comment(";;; Restore clobbered callee double registers");
623 BitVector* doubles = chunk()->allocated_double_registers(); 623 BitVector* doubles = chunk()->allocated_double_registers();
624 BitVector::Iterator iterator(doubles); 624 BitVector::Iterator iterator(doubles);
625 int count = 0; 625 int count = 0;
626 while (!iterator.Done()) { 626 while (!iterator.Done()) {
627 // TODO(all): Is this supposed to restore just the callee-saved doubles? It 627 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
628 // looks like it's restoring all of them. 628 // looks like it's restoring all of them.
629 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); 629 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
630 __ Peek(value, count * kDoubleSize); 630 __ Peek(value, count * kDoubleSize);
631 iterator.Advance(); 631 iterator.Advance();
632 count++; 632 count++;
633 } 633 }
634 } 634 }
635 635
636 636
637 bool LCodeGen::GeneratePrologue() { 637 bool LCodeGen::GeneratePrologue() {
638 ASSERT(is_generating()); 638 DCHECK(is_generating());
639 639
640 if (info()->IsOptimizing()) { 640 if (info()->IsOptimizing()) {
641 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 641 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
642 642
643 // TODO(all): Add support for stop_t FLAG in DEBUG mode. 643 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
644 644
645 // Sloppy mode functions and builtins need to replace the receiver with the 645 // Sloppy mode functions and builtins need to replace the receiver with the
646 // global proxy when called as functions (without an explicit receiver 646 // global proxy when called as functions (without an explicit receiver
647 // object). 647 // object).
648 if (info_->this_has_uses() && 648 if (info_->this_has_uses() &&
649 info_->strict_mode() == SLOPPY && 649 info_->strict_mode() == SLOPPY &&
650 !info_->is_native()) { 650 !info_->is_native()) {
651 Label ok; 651 Label ok;
652 int receiver_offset = info_->scope()->num_parameters() * kXRegSize; 652 int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
653 __ Peek(x10, receiver_offset); 653 __ Peek(x10, receiver_offset);
654 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); 654 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
655 655
656 __ Ldr(x10, GlobalObjectMemOperand()); 656 __ Ldr(x10, GlobalObjectMemOperand());
657 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset)); 657 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
658 __ Poke(x10, receiver_offset); 658 __ Poke(x10, receiver_offset);
659 659
660 __ Bind(&ok); 660 __ Bind(&ok);
661 } 661 }
662 } 662 }
663 663
664 ASSERT(__ StackPointer().Is(jssp)); 664 DCHECK(__ StackPointer().Is(jssp));
665 info()->set_prologue_offset(masm_->pc_offset()); 665 info()->set_prologue_offset(masm_->pc_offset());
666 if (NeedsEagerFrame()) { 666 if (NeedsEagerFrame()) {
667 if (info()->IsStub()) { 667 if (info()->IsStub()) {
668 __ StubPrologue(); 668 __ StubPrologue();
669 } else { 669 } else {
670 __ Prologue(info()->IsCodePreAgingActive()); 670 __ Prologue(info()->IsCodePreAgingActive());
671 } 671 }
672 frame_is_built_ = true; 672 frame_is_built_ = true;
673 info_->AddNoFrameRange(0, masm_->pc_offset()); 673 info_->AddNoFrameRange(0, masm_->pc_offset());
674 } 674 }
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
747 void LCodeGen::GenerateOsrPrologue() { 747 void LCodeGen::GenerateOsrPrologue() {
748 // Generate the OSR entry prologue at the first unknown OSR value, or if there 748 // Generate the OSR entry prologue at the first unknown OSR value, or if there
749 // are none, at the OSR entrypoint instruction. 749 // are none, at the OSR entrypoint instruction.
750 if (osr_pc_offset_ >= 0) return; 750 if (osr_pc_offset_ >= 0) return;
751 751
752 osr_pc_offset_ = masm()->pc_offset(); 752 osr_pc_offset_ = masm()->pc_offset();
753 753
754 // Adjust the frame size, subsuming the unoptimized frame into the 754 // Adjust the frame size, subsuming the unoptimized frame into the
755 // optimized frame. 755 // optimized frame.
756 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 756 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
757 ASSERT(slots >= 0); 757 DCHECK(slots >= 0);
758 __ Claim(slots); 758 __ Claim(slots);
759 } 759 }
760 760
761 761
762 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 762 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
763 if (instr->IsCall()) { 763 if (instr->IsCall()) {
764 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 764 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
765 } 765 }
766 if (!instr->IsLazyBailout() && !instr->IsGap()) { 766 if (!instr->IsLazyBailout() && !instr->IsGap()) {
767 safepoints_.BumpLastLazySafepointIndex(); 767 safepoints_.BumpLastLazySafepointIndex();
768 } 768 }
769 } 769 }
770 770
771 771
772 bool LCodeGen::GenerateDeferredCode() { 772 bool LCodeGen::GenerateDeferredCode() {
773 ASSERT(is_generating()); 773 DCHECK(is_generating());
774 if (deferred_.length() > 0) { 774 if (deferred_.length() > 0) {
775 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { 775 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
776 LDeferredCode* code = deferred_[i]; 776 LDeferredCode* code = deferred_[i];
777 777
778 HValue* value = 778 HValue* value =
779 instructions_->at(code->instruction_index())->hydrogen_value(); 779 instructions_->at(code->instruction_index())->hydrogen_value();
780 RecordAndWritePosition( 780 RecordAndWritePosition(
781 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 781 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
782 782
783 Comment(";;; <@%d,#%d> " 783 Comment(";;; <@%d,#%d> "
784 "-------------------- Deferred %s --------------------", 784 "-------------------- Deferred %s --------------------",
785 code->instruction_index(), 785 code->instruction_index(),
786 code->instr()->hydrogen_value()->id(), 786 code->instr()->hydrogen_value()->id(),
787 code->instr()->Mnemonic()); 787 code->instr()->Mnemonic());
788 788
789 __ Bind(code->entry()); 789 __ Bind(code->entry());
790 790
791 if (NeedsDeferredFrame()) { 791 if (NeedsDeferredFrame()) {
792 Comment(";;; Build frame"); 792 Comment(";;; Build frame");
793 ASSERT(!frame_is_built_); 793 DCHECK(!frame_is_built_);
794 ASSERT(info()->IsStub()); 794 DCHECK(info()->IsStub());
795 frame_is_built_ = true; 795 frame_is_built_ = true;
796 __ Push(lr, fp, cp); 796 __ Push(lr, fp, cp);
797 __ Mov(fp, Smi::FromInt(StackFrame::STUB)); 797 __ Mov(fp, Smi::FromInt(StackFrame::STUB));
798 __ Push(fp); 798 __ Push(fp);
799 __ Add(fp, __ StackPointer(), 799 __ Add(fp, __ StackPointer(),
800 StandardFrameConstants::kFixedFrameSizeFromFp); 800 StandardFrameConstants::kFixedFrameSizeFromFp);
801 Comment(";;; Deferred code"); 801 Comment(";;; Deferred code");
802 } 802 }
803 803
804 code->Generate(); 804 code->Generate();
805 805
806 if (NeedsDeferredFrame()) { 806 if (NeedsDeferredFrame()) {
807 Comment(";;; Destroy frame"); 807 Comment(";;; Destroy frame");
808 ASSERT(frame_is_built_); 808 DCHECK(frame_is_built_);
809 __ Pop(xzr, cp, fp, lr); 809 __ Pop(xzr, cp, fp, lr);
810 frame_is_built_ = false; 810 frame_is_built_ = false;
811 } 811 }
812 812
813 __ B(code->exit()); 813 __ B(code->exit());
814 } 814 }
815 } 815 }
816 816
817 // Force constant pool emission at the end of the deferred code to make 817 // Force constant pool emission at the end of the deferred code to make
818 // sure that no constant pools are emitted after deferred code because 818 // sure that no constant pools are emitted after deferred code because
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
850 // Second-level deopt table entries are contiguous and small, so instead 850 // Second-level deopt table entries are contiguous and small, so instead
851 // of loading the full, absolute address of each one, load the base 851 // of loading the full, absolute address of each one, load the base
852 // address and add an immediate offset. 852 // address and add an immediate offset.
853 __ Mov(entry_offset, entry - base); 853 __ Mov(entry_offset, entry - base);
854 854
855 // The last entry can fall through into `call_deopt_entry`, avoiding a 855 // The last entry can fall through into `call_deopt_entry`, avoiding a
856 // branch. 856 // branch.
857 bool last_entry = (i + 1) == length; 857 bool last_entry = (i + 1) == length;
858 858
859 if (deopt_jump_table_[i]->needs_frame) { 859 if (deopt_jump_table_[i]->needs_frame) {
860 ASSERT(!info()->saves_caller_doubles()); 860 DCHECK(!info()->saves_caller_doubles());
861 if (!needs_frame.is_bound()) { 861 if (!needs_frame.is_bound()) {
862 // This variant of deopt can only be used with stubs. Since we don't 862 // This variant of deopt can only be used with stubs. Since we don't
863 // have a function pointer to install in the stack frame that we're 863 // have a function pointer to install in the stack frame that we're
864 // building, install a special marker there instead. 864 // building, install a special marker there instead.
865 ASSERT(info()->IsStub()); 865 DCHECK(info()->IsStub());
866 866
867 UseScratchRegisterScope temps(masm()); 867 UseScratchRegisterScope temps(masm());
868 Register stub_marker = temps.AcquireX(); 868 Register stub_marker = temps.AcquireX();
869 __ Bind(&needs_frame); 869 __ Bind(&needs_frame);
870 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); 870 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
871 __ Push(lr, fp, cp, stub_marker); 871 __ Push(lr, fp, cp, stub_marker);
872 __ Add(fp, __ StackPointer(), 2 * kPointerSize); 872 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
873 if (!last_entry) __ B(&call_deopt_entry); 873 if (!last_entry) __ B(&call_deopt_entry);
874 } else { 874 } else {
875 // Reuse the existing needs_frame code. 875 // Reuse the existing needs_frame code.
876 __ B(&needs_frame); 876 __ B(&needs_frame);
877 } 877 }
878 } else if (info()->saves_caller_doubles()) { 878 } else if (info()->saves_caller_doubles()) {
879 ASSERT(info()->IsStub()); 879 DCHECK(info()->IsStub());
880 if (!restore_caller_doubles.is_bound()) { 880 if (!restore_caller_doubles.is_bound()) {
881 __ Bind(&restore_caller_doubles); 881 __ Bind(&restore_caller_doubles);
882 RestoreCallerDoubles(); 882 RestoreCallerDoubles();
883 if (!last_entry) __ B(&call_deopt_entry); 883 if (!last_entry) __ B(&call_deopt_entry);
884 } else { 884 } else {
885 // Reuse the existing restore_caller_doubles code. 885 // Reuse the existing restore_caller_doubles code.
886 __ B(&restore_caller_doubles); 886 __ B(&restore_caller_doubles);
887 } 887 }
888 } else { 888 } else {
889 // There is nothing special to do, so just continue to the second-level 889 // There is nothing special to do, so just continue to the second-level
(...skipping 18 matching lines...) Expand all
908 masm()->CheckConstPool(true, false); 908 masm()->CheckConstPool(true, false);
909 909
910 // The deoptimization jump table is the last part of the instruction 910 // The deoptimization jump table is the last part of the instruction
911 // sequence. Mark the generated code as done unless we bailed out. 911 // sequence. Mark the generated code as done unless we bailed out.
912 if (!is_aborted()) status_ = DONE; 912 if (!is_aborted()) status_ = DONE;
913 return !is_aborted(); 913 return !is_aborted();
914 } 914 }
915 915
916 916
917 bool LCodeGen::GenerateSafepointTable() { 917 bool LCodeGen::GenerateSafepointTable() {
918 ASSERT(is_done()); 918 DCHECK(is_done());
919 // We do not know how much data will be emitted for the safepoint table, so 919 // We do not know how much data will be emitted for the safepoint table, so
920 // force emission of the veneer pool. 920 // force emission of the veneer pool.
921 masm()->CheckVeneerPool(true, true); 921 masm()->CheckVeneerPool(true, true);
922 safepoints_.Emit(masm(), GetStackSlotCount()); 922 safepoints_.Emit(masm(), GetStackSlotCount());
923 return !is_aborted(); 923 return !is_aborted();
924 } 924 }
925 925
926 926
927 void LCodeGen::FinishCode(Handle<Code> code) { 927 void LCodeGen::FinishCode(Handle<Code> code) {
928 ASSERT(is_done()); 928 DCHECK(is_done());
929 code->set_stack_slots(GetStackSlotCount()); 929 code->set_stack_slots(GetStackSlotCount());
930 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 930 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
931 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); 931 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
932 PopulateDeoptimizationData(code); 932 PopulateDeoptimizationData(code);
933 } 933 }
934 934
935 935
936 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 936 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
937 int length = deoptimizations_.length(); 937 int length = deoptimizations_.length();
938 if (length == 0) return; 938 if (length == 0) return;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 data->SetArgumentsStackHeight(i, 973 data->SetArgumentsStackHeight(i,
974 Smi::FromInt(env->arguments_stack_height())); 974 Smi::FromInt(env->arguments_stack_height()));
975 data->SetPc(i, Smi::FromInt(env->pc_offset())); 975 data->SetPc(i, Smi::FromInt(env->pc_offset()));
976 } 976 }
977 977
978 code->set_deoptimization_data(*data); 978 code->set_deoptimization_data(*data);
979 } 979 }
980 980
981 981
982 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { 982 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
983 ASSERT(deoptimization_literals_.length() == 0); 983 DCHECK(deoptimization_literals_.length() == 0);
984 984
985 const ZoneList<Handle<JSFunction> >* inlined_closures = 985 const ZoneList<Handle<JSFunction> >* inlined_closures =
986 chunk()->inlined_closures(); 986 chunk()->inlined_closures();
987 987
988 for (int i = 0, length = inlined_closures->length(); i < length; i++) { 988 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
989 DefineDeoptimizationLiteral(inlined_closures->at(i)); 989 DefineDeoptimizationLiteral(inlined_closures->at(i));
990 } 990 }
991 991
992 inlined_function_count_ = deoptimization_literals_.length(); 992 inlined_function_count_ = deoptimization_literals_.length();
993 } 993 }
994 994
995 995
996 void LCodeGen::DeoptimizeBranch( 996 void LCodeGen::DeoptimizeBranch(
997 LEnvironment* environment, 997 LEnvironment* environment,
998 BranchType branch_type, Register reg, int bit, 998 BranchType branch_type, Register reg, int bit,
999 Deoptimizer::BailoutType* override_bailout_type) { 999 Deoptimizer::BailoutType* override_bailout_type) {
1000 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 1000 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1001 Deoptimizer::BailoutType bailout_type = 1001 Deoptimizer::BailoutType bailout_type =
1002 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 1002 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
1003 1003
1004 if (override_bailout_type != NULL) { 1004 if (override_bailout_type != NULL) {
1005 bailout_type = *override_bailout_type; 1005 bailout_type = *override_bailout_type;
1006 } 1006 }
1007 1007
1008 ASSERT(environment->HasBeenRegistered()); 1008 DCHECK(environment->HasBeenRegistered());
1009 ASSERT(info()->IsOptimizing() || info()->IsStub()); 1009 DCHECK(info()->IsOptimizing() || info()->IsStub());
1010 int id = environment->deoptimization_index(); 1010 int id = environment->deoptimization_index();
1011 Address entry = 1011 Address entry =
1012 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 1012 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1013 1013
1014 if (entry == NULL) { 1014 if (entry == NULL) {
1015 Abort(kBailoutWasNotPrepared); 1015 Abort(kBailoutWasNotPrepared);
1016 } 1016 }
1017 1017
1018 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 1018 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1019 Label not_zero; 1019 Label not_zero;
1020 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 1020 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1021 1021
1022 __ Push(x0, x1, x2); 1022 __ Push(x0, x1, x2);
1023 __ Mrs(x2, NZCV); 1023 __ Mrs(x2, NZCV);
1024 __ Mov(x0, count); 1024 __ Mov(x0, count);
1025 __ Ldr(w1, MemOperand(x0)); 1025 __ Ldr(w1, MemOperand(x0));
1026 __ Subs(x1, x1, 1); 1026 __ Subs(x1, x1, 1);
1027 __ B(gt, &not_zero); 1027 __ B(gt, &not_zero);
1028 __ Mov(w1, FLAG_deopt_every_n_times); 1028 __ Mov(w1, FLAG_deopt_every_n_times);
1029 __ Str(w1, MemOperand(x0)); 1029 __ Str(w1, MemOperand(x0));
1030 __ Pop(x2, x1, x0); 1030 __ Pop(x2, x1, x0);
1031 ASSERT(frame_is_built_); 1031 DCHECK(frame_is_built_);
1032 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1032 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1033 __ Unreachable(); 1033 __ Unreachable();
1034 1034
1035 __ Bind(&not_zero); 1035 __ Bind(&not_zero);
1036 __ Str(w1, MemOperand(x0)); 1036 __ Str(w1, MemOperand(x0));
1037 __ Msr(NZCV, x2); 1037 __ Msr(NZCV, x2);
1038 __ Pop(x2, x1, x0); 1038 __ Pop(x2, x1, x0);
1039 } 1039 }
1040 1040
1041 if (info()->ShouldTrapOnDeopt()) { 1041 if (info()->ShouldTrapOnDeopt()) {
1042 Label dont_trap; 1042 Label dont_trap;
1043 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); 1043 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
1044 __ Debug("trap_on_deopt", __LINE__, BREAK); 1044 __ Debug("trap_on_deopt", __LINE__, BREAK);
1045 __ Bind(&dont_trap); 1045 __ Bind(&dont_trap);
1046 } 1046 }
1047 1047
1048 ASSERT(info()->IsStub() || frame_is_built_); 1048 DCHECK(info()->IsStub() || frame_is_built_);
1049 // Go through jump table if we need to build frame, or restore caller doubles. 1049 // Go through jump table if we need to build frame, or restore caller doubles.
1050 if (branch_type == always && 1050 if (branch_type == always &&
1051 frame_is_built_ && !info()->saves_caller_doubles()) { 1051 frame_is_built_ && !info()->saves_caller_doubles()) {
1052 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1052 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
1053 } else { 1053 } else {
1054 // We often have several deopts to the same entry, reuse the last 1054 // We often have several deopts to the same entry, reuse the last
1055 // jump entry if this is the case. 1055 // jump entry if this is the case.
1056 if (deopt_jump_table_.is_empty() || 1056 if (deopt_jump_table_.is_empty() ||
1057 (deopt_jump_table_.last()->address != entry) || 1057 (deopt_jump_table_.last()->address != entry) ||
1058 (deopt_jump_table_.last()->bailout_type != bailout_type) || 1058 (deopt_jump_table_.last()->bailout_type != bailout_type) ||
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
1145 1145
1146 1146
1147 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 1147 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
1148 if (!info()->IsStub()) { 1148 if (!info()->IsStub()) {
1149 // Ensure that we have enough space after the previous lazy-bailout 1149 // Ensure that we have enough space after the previous lazy-bailout
1150 // instruction for patching the code here. 1150 // instruction for patching the code here.
1151 intptr_t current_pc = masm()->pc_offset(); 1151 intptr_t current_pc = masm()->pc_offset();
1152 1152
1153 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { 1153 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1154 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 1154 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1155 ASSERT((padding_size % kInstructionSize) == 0); 1155 DCHECK((padding_size % kInstructionSize) == 0);
1156 InstructionAccurateScope instruction_accurate( 1156 InstructionAccurateScope instruction_accurate(
1157 masm(), padding_size / kInstructionSize); 1157 masm(), padding_size / kInstructionSize);
1158 1158
1159 while (padding_size > 0) { 1159 while (padding_size > 0) {
1160 __ nop(); 1160 __ nop();
1161 padding_size -= kInstructionSize; 1161 padding_size -= kInstructionSize;
1162 } 1162 }
1163 } 1163 }
1164 } 1164 }
1165 last_lazy_deopt_pc_ = masm()->pc_offset(); 1165 last_lazy_deopt_pc_ = masm()->pc_offset();
1166 } 1166 }
1167 1167
1168 1168
1169 Register LCodeGen::ToRegister(LOperand* op) const { 1169 Register LCodeGen::ToRegister(LOperand* op) const {
1170 // TODO(all): support zero register results, as ToRegister32. 1170 // TODO(all): support zero register results, as ToRegister32.
1171 ASSERT((op != NULL) && op->IsRegister()); 1171 DCHECK((op != NULL) && op->IsRegister());
1172 return Register::FromAllocationIndex(op->index()); 1172 return Register::FromAllocationIndex(op->index());
1173 } 1173 }
1174 1174
1175 1175
1176 Register LCodeGen::ToRegister32(LOperand* op) const { 1176 Register LCodeGen::ToRegister32(LOperand* op) const {
1177 ASSERT(op != NULL); 1177 DCHECK(op != NULL);
1178 if (op->IsConstantOperand()) { 1178 if (op->IsConstantOperand()) {
1179 // If this is a constant operand, the result must be the zero register. 1179 // If this is a constant operand, the result must be the zero register.
1180 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0); 1180 DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
1181 return wzr; 1181 return wzr;
1182 } else { 1182 } else {
1183 return ToRegister(op).W(); 1183 return ToRegister(op).W();
1184 } 1184 }
1185 } 1185 }
1186 1186
1187 1187
1188 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 1188 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
1189 HConstant* constant = chunk_->LookupConstant(op); 1189 HConstant* constant = chunk_->LookupConstant(op);
1190 return Smi::FromInt(constant->Integer32Value()); 1190 return Smi::FromInt(constant->Integer32Value());
1191 } 1191 }
1192 1192
1193 1193
1194 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 1194 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
1195 ASSERT((op != NULL) && op->IsDoubleRegister()); 1195 DCHECK((op != NULL) && op->IsDoubleRegister());
1196 return DoubleRegister::FromAllocationIndex(op->index()); 1196 return DoubleRegister::FromAllocationIndex(op->index());
1197 } 1197 }
1198 1198
1199 1199
1200 Operand LCodeGen::ToOperand(LOperand* op) { 1200 Operand LCodeGen::ToOperand(LOperand* op) {
1201 ASSERT(op != NULL); 1201 DCHECK(op != NULL);
1202 if (op->IsConstantOperand()) { 1202 if (op->IsConstantOperand()) {
1203 LConstantOperand* const_op = LConstantOperand::cast(op); 1203 LConstantOperand* const_op = LConstantOperand::cast(op);
1204 HConstant* constant = chunk()->LookupConstant(const_op); 1204 HConstant* constant = chunk()->LookupConstant(const_op);
1205 Representation r = chunk_->LookupLiteralRepresentation(const_op); 1205 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1206 if (r.IsSmi()) { 1206 if (r.IsSmi()) {
1207 ASSERT(constant->HasSmiValue()); 1207 DCHECK(constant->HasSmiValue());
1208 return Operand(Smi::FromInt(constant->Integer32Value())); 1208 return Operand(Smi::FromInt(constant->Integer32Value()));
1209 } else if (r.IsInteger32()) { 1209 } else if (r.IsInteger32()) {
1210 ASSERT(constant->HasInteger32Value()); 1210 DCHECK(constant->HasInteger32Value());
1211 return Operand(constant->Integer32Value()); 1211 return Operand(constant->Integer32Value());
1212 } else if (r.IsDouble()) { 1212 } else if (r.IsDouble()) {
1213 Abort(kToOperandUnsupportedDoubleImmediate); 1213 Abort(kToOperandUnsupportedDoubleImmediate);
1214 } 1214 }
1215 ASSERT(r.IsTagged()); 1215 DCHECK(r.IsTagged());
1216 return Operand(constant->handle(isolate())); 1216 return Operand(constant->handle(isolate()));
1217 } else if (op->IsRegister()) { 1217 } else if (op->IsRegister()) {
1218 return Operand(ToRegister(op)); 1218 return Operand(ToRegister(op));
1219 } else if (op->IsDoubleRegister()) { 1219 } else if (op->IsDoubleRegister()) {
1220 Abort(kToOperandIsDoubleRegisterUnimplemented); 1220 Abort(kToOperandIsDoubleRegisterUnimplemented);
1221 return Operand(0); 1221 return Operand(0);
1222 } 1222 }
1223 // Stack slots not implemented, use ToMemOperand instead. 1223 // Stack slots not implemented, use ToMemOperand instead.
1224 UNREACHABLE(); 1224 UNREACHABLE();
1225 return Operand(0); 1225 return Operand(0);
1226 } 1226 }
1227 1227
1228 1228
1229 Operand LCodeGen::ToOperand32I(LOperand* op) { 1229 Operand LCodeGen::ToOperand32I(LOperand* op) {
1230 return ToOperand32(op, SIGNED_INT32); 1230 return ToOperand32(op, SIGNED_INT32);
1231 } 1231 }
1232 1232
1233 1233
1234 Operand LCodeGen::ToOperand32U(LOperand* op) { 1234 Operand LCodeGen::ToOperand32U(LOperand* op) {
1235 return ToOperand32(op, UNSIGNED_INT32); 1235 return ToOperand32(op, UNSIGNED_INT32);
1236 } 1236 }
1237 1237
1238 1238
1239 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) { 1239 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
1240 ASSERT(op != NULL); 1240 DCHECK(op != NULL);
1241 if (op->IsRegister()) { 1241 if (op->IsRegister()) {
1242 return Operand(ToRegister32(op)); 1242 return Operand(ToRegister32(op));
1243 } else if (op->IsConstantOperand()) { 1243 } else if (op->IsConstantOperand()) {
1244 LConstantOperand* const_op = LConstantOperand::cast(op); 1244 LConstantOperand* const_op = LConstantOperand::cast(op);
1245 HConstant* constant = chunk()->LookupConstant(const_op); 1245 HConstant* constant = chunk()->LookupConstant(const_op);
1246 Representation r = chunk_->LookupLiteralRepresentation(const_op); 1246 Representation r = chunk_->LookupLiteralRepresentation(const_op);
1247 if (r.IsInteger32()) { 1247 if (r.IsInteger32()) {
1248 ASSERT(constant->HasInteger32Value()); 1248 DCHECK(constant->HasInteger32Value());
1249 return (signedness == SIGNED_INT32) 1249 return (signedness == SIGNED_INT32)
1250 ? Operand(constant->Integer32Value()) 1250 ? Operand(constant->Integer32Value())
1251 : Operand(static_cast<uint32_t>(constant->Integer32Value())); 1251 : Operand(static_cast<uint32_t>(constant->Integer32Value()));
1252 } else { 1252 } else {
1253 // Other constants not implemented. 1253 // Other constants not implemented.
1254 Abort(kToOperand32UnsupportedImmediate); 1254 Abort(kToOperand32UnsupportedImmediate);
1255 } 1255 }
1256 } 1256 }
1257 // Other cases are not implemented. 1257 // Other cases are not implemented.
1258 UNREACHABLE(); 1258 UNREACHABLE();
1259 return Operand(0); 1259 return Operand(0);
1260 } 1260 }
1261 1261
1262 1262
1263 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) { 1263 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
1264 ASSERT(index < 0); 1264 DCHECK(index < 0);
1265 return -(index + 1) * kPointerSize; 1265 return -(index + 1) * kPointerSize;
1266 } 1266 }
1267 1267
1268 1268
1269 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const { 1269 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
1270 ASSERT(op != NULL); 1270 DCHECK(op != NULL);
1271 ASSERT(!op->IsRegister()); 1271 DCHECK(!op->IsRegister());
1272 ASSERT(!op->IsDoubleRegister()); 1272 DCHECK(!op->IsDoubleRegister());
1273 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 1273 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
1274 if (NeedsEagerFrame()) { 1274 if (NeedsEagerFrame()) {
1275 int fp_offset = StackSlotOffset(op->index()); 1275 int fp_offset = StackSlotOffset(op->index());
1276 if (op->index() >= 0) { 1276 if (op->index() >= 0) {
1277 // Loads and stores have a bigger reach in positive offset than negative. 1277 // Loads and stores have a bigger reach in positive offset than negative.
1278 // When the load or the store can't be done in one instruction via fp 1278 // When the load or the store can't be done in one instruction via fp
1279 // (too big negative offset), we try to access via jssp (positive offset). 1279 // (too big negative offset), we try to access via jssp (positive offset).
1280 // We can reference a stack slot from jssp only if jssp references the end 1280 // We can reference a stack slot from jssp only if jssp references the end
1281 // of the stack slots. It's not the case when: 1281 // of the stack slots. It's not the case when:
1282 // - stack_mode != kCanUseStackPointer: this is the case when a deferred 1282 // - stack_mode != kCanUseStackPointer: this is the case when a deferred
1283 // code saved the registers. 1283 // code saved the registers.
(...skipping 18 matching lines...) Expand all
1302 // Retrieve parameter without eager stack-frame relative to the 1302 // Retrieve parameter without eager stack-frame relative to the
1303 // stack-pointer. 1303 // stack-pointer.
1304 return MemOperand(masm()->StackPointer(), 1304 return MemOperand(masm()->StackPointer(),
1305 ArgumentsOffsetWithoutFrame(op->index())); 1305 ArgumentsOffsetWithoutFrame(op->index()));
1306 } 1306 }
1307 } 1307 }
1308 1308
1309 1309
1310 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 1310 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
1311 HConstant* constant = chunk_->LookupConstant(op); 1311 HConstant* constant = chunk_->LookupConstant(op);
1312 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 1312 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
1313 return constant->handle(isolate()); 1313 return constant->handle(isolate());
1314 } 1314 }
1315 1315
1316 1316
1317 template<class LI> 1317 template<class LI>
1318 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info, 1318 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
1319 IntegerSignedness signedness) { 1319 IntegerSignedness signedness) {
1320 if (shift_info->shift() == NO_SHIFT) { 1320 if (shift_info->shift() == NO_SHIFT) {
1321 return (signedness == SIGNED_INT32) ? ToOperand32I(right) 1321 return (signedness == SIGNED_INT32) ? ToOperand32I(right)
1322 : ToOperand32U(right); 1322 : ToOperand32U(right);
(...skipping 17 matching lines...) Expand all
1340 1340
1341 1341
1342 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 1342 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
1343 HConstant* constant = chunk_->LookupConstant(op); 1343 HConstant* constant = chunk_->LookupConstant(op);
1344 return constant->Integer32Value(); 1344 return constant->Integer32Value();
1345 } 1345 }
1346 1346
1347 1347
1348 double LCodeGen::ToDouble(LConstantOperand* op) const { 1348 double LCodeGen::ToDouble(LConstantOperand* op) const {
1349 HConstant* constant = chunk_->LookupConstant(op); 1349 HConstant* constant = chunk_->LookupConstant(op);
1350 ASSERT(constant->HasDoubleValue()); 1350 DCHECK(constant->HasDoubleValue());
1351 return constant->DoubleValue(); 1351 return constant->DoubleValue();
1352 } 1352 }
1353 1353
1354 1354
1355 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { 1355 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1356 Condition cond = nv; 1356 Condition cond = nv;
1357 switch (op) { 1357 switch (op) {
1358 case Token::EQ: 1358 case Token::EQ:
1359 case Token::EQ_STRICT: 1359 case Token::EQ_STRICT:
1360 cond = eq; 1360 cond = eq;
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1400 branch.Emit(chunk_->GetAssemblyLabel(left_block)); 1400 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1401 } else { 1401 } else {
1402 branch.Emit(chunk_->GetAssemblyLabel(left_block)); 1402 branch.Emit(chunk_->GetAssemblyLabel(left_block));
1403 __ B(chunk_->GetAssemblyLabel(right_block)); 1403 __ B(chunk_->GetAssemblyLabel(right_block));
1404 } 1404 }
1405 } 1405 }
1406 1406
1407 1407
1408 template<class InstrType> 1408 template<class InstrType>
1409 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 1409 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
1410 ASSERT((condition != al) && (condition != nv)); 1410 DCHECK((condition != al) && (condition != nv));
1411 BranchOnCondition branch(this, condition); 1411 BranchOnCondition branch(this, condition);
1412 EmitBranchGeneric(instr, branch); 1412 EmitBranchGeneric(instr, branch);
1413 } 1413 }
1414 1414
1415 1415
1416 template<class InstrType> 1416 template<class InstrType>
1417 void LCodeGen::EmitCompareAndBranch(InstrType instr, 1417 void LCodeGen::EmitCompareAndBranch(InstrType instr,
1418 Condition condition, 1418 Condition condition,
1419 const Register& lhs, 1419 const Register& lhs,
1420 const Operand& rhs) { 1420 const Operand& rhs) {
1421 ASSERT((condition != al) && (condition != nv)); 1421 DCHECK((condition != al) && (condition != nv));
1422 CompareAndBranch branch(this, condition, lhs, rhs); 1422 CompareAndBranch branch(this, condition, lhs, rhs);
1423 EmitBranchGeneric(instr, branch); 1423 EmitBranchGeneric(instr, branch);
1424 } 1424 }
1425 1425
1426 1426
1427 template<class InstrType> 1427 template<class InstrType>
1428 void LCodeGen::EmitTestAndBranch(InstrType instr, 1428 void LCodeGen::EmitTestAndBranch(InstrType instr,
1429 Condition condition, 1429 Condition condition,
1430 const Register& value, 1430 const Register& value,
1431 uint64_t mask) { 1431 uint64_t mask) {
1432 ASSERT((condition != al) && (condition != nv)); 1432 DCHECK((condition != al) && (condition != nv));
1433 TestAndBranch branch(this, condition, value, mask); 1433 TestAndBranch branch(this, condition, value, mask);
1434 EmitBranchGeneric(instr, branch); 1434 EmitBranchGeneric(instr, branch);
1435 } 1435 }
1436 1436
1437 1437
1438 template<class InstrType> 1438 template<class InstrType>
1439 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr, 1439 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
1440 const FPRegister& value, 1440 const FPRegister& value,
1441 const FPRegister& scratch) { 1441 const FPRegister& scratch) {
1442 BranchIfNonZeroNumber branch(this, value, scratch); 1442 BranchIfNonZeroNumber branch(this, value, scratch);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1509 } 1509 }
1510 1510
1511 1511
1512 void LCodeGen::DoAddE(LAddE* instr) { 1512 void LCodeGen::DoAddE(LAddE* instr) {
1513 Register result = ToRegister(instr->result()); 1513 Register result = ToRegister(instr->result());
1514 Register left = ToRegister(instr->left()); 1514 Register left = ToRegister(instr->left());
1515 Operand right = (instr->right()->IsConstantOperand()) 1515 Operand right = (instr->right()->IsConstantOperand())
1516 ? ToInteger32(LConstantOperand::cast(instr->right())) 1516 ? ToInteger32(LConstantOperand::cast(instr->right()))
1517 : Operand(ToRegister32(instr->right()), SXTW); 1517 : Operand(ToRegister32(instr->right()), SXTW);
1518 1518
1519 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); 1519 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
1520 __ Add(result, left, right); 1520 __ Add(result, left, right);
1521 } 1521 }
1522 1522
1523 1523
1524 void LCodeGen::DoAddI(LAddI* instr) { 1524 void LCodeGen::DoAddI(LAddI* instr) {
1525 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1525 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1526 Register result = ToRegister32(instr->result()); 1526 Register result = ToRegister32(instr->result());
1527 Register left = ToRegister32(instr->left()); 1527 Register left = ToRegister32(instr->left());
1528 Operand right = ToShiftedRightOperand32I(instr->right(), instr); 1528 Operand right = ToShiftedRightOperand32I(instr->right(), instr);
1529 1529
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1567 Register temp1 = ToRegister(instr->temp1()); 1567 Register temp1 = ToRegister(instr->temp1());
1568 Register temp2 = ToRegister(instr->temp2()); 1568 Register temp2 = ToRegister(instr->temp2());
1569 1569
1570 // Allocate memory for the object. 1570 // Allocate memory for the object.
1571 AllocationFlags flags = TAG_OBJECT; 1571 AllocationFlags flags = TAG_OBJECT;
1572 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 1572 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
1573 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 1573 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
1574 } 1574 }
1575 1575
1576 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 1576 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1577 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 1577 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1578 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1578 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1579 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); 1579 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
1580 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 1580 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1581 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1581 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1582 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); 1582 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
1583 } 1583 }
1584 1584
1585 if (instr->size()->IsConstantOperand()) { 1585 if (instr->size()->IsConstantOperand()) {
1586 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 1586 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1587 if (size <= Page::kMaxRegularHeapObjectSize) { 1587 if (size <= Page::kMaxRegularHeapObjectSize) {
1588 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); 1588 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
1589 } else { 1589 } else {
1590 __ B(deferred->entry()); 1590 __ B(deferred->entry());
1591 } 1591 }
(...skipping 14 matching lines...) Expand all
1606 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 1606 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
1607 __ Mov(filler_count, size / kPointerSize); 1607 __ Mov(filler_count, size / kPointerSize);
1608 } else { 1608 } else {
1609 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2); 1609 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
1610 } 1610 }
1611 1611
1612 __ Sub(untagged_result, result, kHeapObjectTag); 1612 __ Sub(untagged_result, result, kHeapObjectTag);
1613 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map())); 1613 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
1614 __ FillFields(untagged_result, filler_count, filler); 1614 __ FillFields(untagged_result, filler_count, filler);
1615 } else { 1615 } else {
1616 ASSERT(instr->temp3() == NULL); 1616 DCHECK(instr->temp3() == NULL);
1617 } 1617 }
1618 } 1618 }
1619 1619
1620 1620
1621 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 1621 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
1622 // TODO(3095996): Get rid of this. For now, we need to make the 1622 // TODO(3095996): Get rid of this. For now, we need to make the
1623 // result register contain a valid pointer because it is already 1623 // result register contain a valid pointer because it is already
1624 // contained in the register pointer map. 1624 // contained in the register pointer map.
1625 __ Mov(ToRegister(instr->result()), Smi::FromInt(0)); 1625 __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
1626 1626
1627 PushSafepointRegistersScope scope(this); 1627 PushSafepointRegistersScope scope(this);
1628 // We're in a SafepointRegistersScope so we can use any scratch registers. 1628 // We're in a SafepointRegistersScope so we can use any scratch registers.
1629 Register size = x0; 1629 Register size = x0;
1630 if (instr->size()->IsConstantOperand()) { 1630 if (instr->size()->IsConstantOperand()) {
1631 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size()))); 1631 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
1632 } else { 1632 } else {
1633 __ SmiTag(size, ToRegister32(instr->size()).X()); 1633 __ SmiTag(size, ToRegister32(instr->size()).X());
1634 } 1634 }
1635 int flags = AllocateDoubleAlignFlag::encode( 1635 int flags = AllocateDoubleAlignFlag::encode(
1636 instr->hydrogen()->MustAllocateDoubleAligned()); 1636 instr->hydrogen()->MustAllocateDoubleAligned());
1637 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 1637 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
1638 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 1638 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
1639 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1639 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1640 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 1640 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
1641 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 1641 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
1642 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 1642 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
1643 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 1643 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
1644 } else { 1644 } else {
1645 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 1645 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
1646 } 1646 }
1647 __ Mov(x10, Smi::FromInt(flags)); 1647 __ Mov(x10, Smi::FromInt(flags));
1648 __ Push(size, x10); 1648 __ Push(size, x10);
1649 1649
1650 CallRuntimeFromDeferred( 1650 CallRuntimeFromDeferred(
1651 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 1651 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
1652 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); 1652 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
1653 } 1653 }
1654 1654
1655 1655
1656 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 1656 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1657 Register receiver = ToRegister(instr->receiver()); 1657 Register receiver = ToRegister(instr->receiver());
1658 Register function = ToRegister(instr->function()); 1658 Register function = ToRegister(instr->function());
1659 Register length = ToRegister32(instr->length()); 1659 Register length = ToRegister32(instr->length());
1660 1660
1661 Register elements = ToRegister(instr->elements()); 1661 Register elements = ToRegister(instr->elements());
1662 Register scratch = x5; 1662 Register scratch = x5;
1663 ASSERT(receiver.Is(x0)); // Used for parameter count. 1663 DCHECK(receiver.Is(x0)); // Used for parameter count.
1664 ASSERT(function.Is(x1)); // Required by InvokeFunction. 1664 DCHECK(function.Is(x1)); // Required by InvokeFunction.
1665 ASSERT(ToRegister(instr->result()).Is(x0)); 1665 DCHECK(ToRegister(instr->result()).Is(x0));
1666 ASSERT(instr->IsMarkedAsCall()); 1666 DCHECK(instr->IsMarkedAsCall());
1667 1667
1668 // Copy the arguments to this function possibly from the 1668 // Copy the arguments to this function possibly from the
1669 // adaptor frame below it. 1669 // adaptor frame below it.
1670 const uint32_t kArgumentsLimit = 1 * KB; 1670 const uint32_t kArgumentsLimit = 1 * KB;
1671 __ Cmp(length, kArgumentsLimit); 1671 __ Cmp(length, kArgumentsLimit);
1672 DeoptimizeIf(hi, instr->environment()); 1672 DeoptimizeIf(hi, instr->environment());
1673 1673
1674 // Push the receiver and use the register to keep the original 1674 // Push the receiver and use the register to keep the original
1675 // number of arguments. 1675 // number of arguments.
1676 __ Push(receiver); 1676 __ Push(receiver);
1677 Register argc = receiver; 1677 Register argc = receiver;
1678 receiver = NoReg; 1678 receiver = NoReg;
1679 __ Sxtw(argc, length); 1679 __ Sxtw(argc, length);
1680 // The arguments are at a one pointer size offset from elements. 1680 // The arguments are at a one pointer size offset from elements.
1681 __ Add(elements, elements, 1 * kPointerSize); 1681 __ Add(elements, elements, 1 * kPointerSize);
1682 1682
1683 // Loop through the arguments pushing them onto the execution 1683 // Loop through the arguments pushing them onto the execution
1684 // stack. 1684 // stack.
1685 Label invoke, loop; 1685 Label invoke, loop;
1686 // length is a small non-negative integer, due to the test above. 1686 // length is a small non-negative integer, due to the test above.
1687 __ Cbz(length, &invoke); 1687 __ Cbz(length, &invoke);
1688 __ Bind(&loop); 1688 __ Bind(&loop);
1689 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2)); 1689 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
1690 __ Push(scratch); 1690 __ Push(scratch);
1691 __ Subs(length, length, 1); 1691 __ Subs(length, length, 1);
1692 __ B(ne, &loop); 1692 __ B(ne, &loop);
1693 1693
1694 __ Bind(&invoke); 1694 __ Bind(&invoke);
1695 ASSERT(instr->HasPointerMap()); 1695 DCHECK(instr->HasPointerMap());
1696 LPointerMap* pointers = instr->pointer_map(); 1696 LPointerMap* pointers = instr->pointer_map();
1697 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 1697 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
1698 // The number of arguments is stored in argc (receiver) which is x0, as 1698 // The number of arguments is stored in argc (receiver) which is x0, as
1699 // expected by InvokeFunction. 1699 // expected by InvokeFunction.
1700 ParameterCount actual(argc); 1700 ParameterCount actual(argc);
1701 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 1701 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
1702 } 1702 }
1703 1703
1704 1704
1705 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 1705 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1706 // We push some arguments and they will be pop in an other block. We can't 1706 // We push some arguments and they will be pop in an other block. We can't
1707 // trust that jssp references the end of the stack slots until the end of 1707 // trust that jssp references the end of the stack slots until the end of
1708 // the function. 1708 // the function.
1709 inlined_arguments_ = true; 1709 inlined_arguments_ = true;
1710 Register result = ToRegister(instr->result()); 1710 Register result = ToRegister(instr->result());
1711 1711
1712 if (instr->hydrogen()->from_inlined()) { 1712 if (instr->hydrogen()->from_inlined()) {
1713 // When we are inside an inlined function, the arguments are the last things 1713 // When we are inside an inlined function, the arguments are the last things
1714 // that have been pushed on the stack. Therefore the arguments array can be 1714 // that have been pushed on the stack. Therefore the arguments array can be
1715 // accessed directly from jssp. 1715 // accessed directly from jssp.
1716 // However in the normal case, it is accessed via fp but there are two words 1716 // However in the normal case, it is accessed via fp but there are two words
1717 // on the stack between fp and the arguments (the saved lr and fp) and the 1717 // on the stack between fp and the arguments (the saved lr and fp) and the
1718 // LAccessArgumentsAt implementation take that into account. 1718 // LAccessArgumentsAt implementation take that into account.
1719 // In the inlined case we need to subtract the size of 2 words to jssp to 1719 // In the inlined case we need to subtract the size of 2 words to jssp to
1720 // get a pointer which will work well with LAccessArgumentsAt. 1720 // get a pointer which will work well with LAccessArgumentsAt.
1721 ASSERT(masm()->StackPointer().Is(jssp)); 1721 DCHECK(masm()->StackPointer().Is(jssp));
1722 __ Sub(result, jssp, 2 * kPointerSize); 1722 __ Sub(result, jssp, 2 * kPointerSize);
1723 } else { 1723 } else {
1724 ASSERT(instr->temp() != NULL); 1724 DCHECK(instr->temp() != NULL);
1725 Register previous_fp = ToRegister(instr->temp()); 1725 Register previous_fp = ToRegister(instr->temp());
1726 1726
1727 __ Ldr(previous_fp, 1727 __ Ldr(previous_fp,
1728 MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 1728 MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1729 __ Ldr(result, 1729 __ Ldr(result,
1730 MemOperand(previous_fp, StandardFrameConstants::kContextOffset)); 1730 MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
1731 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); 1731 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1732 __ Csel(result, fp, previous_fp, ne); 1732 __ Csel(result, fp, previous_fp, ne);
1733 } 1733 }
1734 } 1734 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1768 case Token::MOD: { 1768 case Token::MOD: {
1769 // The ECMA-262 remainder operator is the remainder from a truncating 1769 // The ECMA-262 remainder operator is the remainder from a truncating
1770 // (round-towards-zero) division. Note that this differs from IEEE-754. 1770 // (round-towards-zero) division. Note that this differs from IEEE-754.
1771 // 1771 //
1772 // TODO(jbramley): See if it's possible to do this inline, rather than by 1772 // TODO(jbramley): See if it's possible to do this inline, rather than by
1773 // calling a helper function. With frintz (to produce the intermediate 1773 // calling a helper function. With frintz (to produce the intermediate
1774 // quotient) and fmsub (to calculate the remainder without loss of 1774 // quotient) and fmsub (to calculate the remainder without loss of
1775 // precision), it should be possible. However, we would need support for 1775 // precision), it should be possible. However, we would need support for
1776 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't 1776 // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
1777 // support that yet. 1777 // support that yet.
1778 ASSERT(left.Is(d0)); 1778 DCHECK(left.Is(d0));
1779 ASSERT(right.Is(d1)); 1779 DCHECK(right.Is(d1));
1780 __ CallCFunction( 1780 __ CallCFunction(
1781 ExternalReference::mod_two_doubles_operation(isolate()), 1781 ExternalReference::mod_two_doubles_operation(isolate()),
1782 0, 2); 1782 0, 2);
1783 ASSERT(result.Is(d0)); 1783 DCHECK(result.Is(d0));
1784 break; 1784 break;
1785 } 1785 }
1786 default: 1786 default:
1787 UNREACHABLE(); 1787 UNREACHABLE();
1788 break; 1788 break;
1789 } 1789 }
1790 } 1790 }
1791 1791
1792 1792
1793 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 1793 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1794 ASSERT(ToRegister(instr->context()).is(cp)); 1794 DCHECK(ToRegister(instr->context()).is(cp));
1795 ASSERT(ToRegister(instr->left()).is(x1)); 1795 DCHECK(ToRegister(instr->left()).is(x1));
1796 ASSERT(ToRegister(instr->right()).is(x0)); 1796 DCHECK(ToRegister(instr->right()).is(x0));
1797 ASSERT(ToRegister(instr->result()).is(x0)); 1797 DCHECK(ToRegister(instr->result()).is(x0));
1798 1798
1799 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); 1799 BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
1800 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 1800 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1801 } 1801 }
1802 1802
1803 1803
1804 void LCodeGen::DoBitI(LBitI* instr) { 1804 void LCodeGen::DoBitI(LBitI* instr) {
1805 Register result = ToRegister32(instr->result()); 1805 Register result = ToRegister32(instr->result());
1806 Register left = ToRegister32(instr->left()); 1806 Register left = ToRegister32(instr->left());
1807 Operand right = ToShiftedRightOperand32U(instr->right(), instr); 1807 Operand right = ToShiftedRightOperand32U(instr->right(), instr);
(...skipping 20 matching lines...) Expand all
1828 case Token::BIT_XOR: __ Eor(result, left, right); break; 1828 case Token::BIT_XOR: __ Eor(result, left, right); break;
1829 default: 1829 default:
1830 UNREACHABLE(); 1830 UNREACHABLE();
1831 break; 1831 break;
1832 } 1832 }
1833 } 1833 }
1834 1834
1835 1835
1836 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { 1836 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
1837 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs; 1837 Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
1838 ASSERT(instr->hydrogen()->index()->representation().IsInteger32()); 1838 DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
1839 ASSERT(instr->hydrogen()->length()->representation().IsInteger32()); 1839 DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
1840 if (instr->index()->IsConstantOperand()) { 1840 if (instr->index()->IsConstantOperand()) {
1841 Operand index = ToOperand32I(instr->index()); 1841 Operand index = ToOperand32I(instr->index());
1842 Register length = ToRegister32(instr->length()); 1842 Register length = ToRegister32(instr->length());
1843 __ Cmp(length, index); 1843 __ Cmp(length, index);
1844 cond = CommuteCondition(cond); 1844 cond = CommuteCondition(cond);
1845 } else { 1845 } else {
1846 Register index = ToRegister32(instr->index()); 1846 Register index = ToRegister32(instr->index());
1847 Operand length = ToOperand32I(instr->length()); 1847 Operand length = ToOperand32I(instr->length());
1848 __ Cmp(index, length); 1848 __ Cmp(index, length);
1849 } 1849 }
1850 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 1850 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
1851 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); 1851 __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
1852 } else { 1852 } else {
1853 DeoptimizeIf(cond, instr->environment()); 1853 DeoptimizeIf(cond, instr->environment());
1854 } 1854 }
1855 } 1855 }
1856 1856
1857 1857
1858 void LCodeGen::DoBranch(LBranch* instr) { 1858 void LCodeGen::DoBranch(LBranch* instr) {
1859 Representation r = instr->hydrogen()->value()->representation(); 1859 Representation r = instr->hydrogen()->value()->representation();
1860 Label* true_label = instr->TrueLabel(chunk_); 1860 Label* true_label = instr->TrueLabel(chunk_);
1861 Label* false_label = instr->FalseLabel(chunk_); 1861 Label* false_label = instr->FalseLabel(chunk_);
1862 1862
1863 if (r.IsInteger32()) { 1863 if (r.IsInteger32()) {
1864 ASSERT(!info()->IsStub()); 1864 DCHECK(!info()->IsStub());
1865 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); 1865 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
1866 } else if (r.IsSmi()) { 1866 } else if (r.IsSmi()) {
1867 ASSERT(!info()->IsStub()); 1867 DCHECK(!info()->IsStub());
1868 STATIC_ASSERT(kSmiTag == 0); 1868 STATIC_ASSERT(kSmiTag == 0);
1869 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); 1869 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
1870 } else if (r.IsDouble()) { 1870 } else if (r.IsDouble()) {
1871 DoubleRegister value = ToDoubleRegister(instr->value()); 1871 DoubleRegister value = ToDoubleRegister(instr->value());
1872 // Test the double value. Zero and NaN are false. 1872 // Test the double value. Zero and NaN are false.
1873 EmitBranchIfNonZeroNumber(instr, value, double_scratch()); 1873 EmitBranchIfNonZeroNumber(instr, value, double_scratch());
1874 } else { 1874 } else {
1875 ASSERT(r.IsTagged()); 1875 DCHECK(r.IsTagged());
1876 Register value = ToRegister(instr->value()); 1876 Register value = ToRegister(instr->value());
1877 HType type = instr->hydrogen()->value()->type(); 1877 HType type = instr->hydrogen()->value()->type();
1878 1878
1879 if (type.IsBoolean()) { 1879 if (type.IsBoolean()) {
1880 ASSERT(!info()->IsStub()); 1880 DCHECK(!info()->IsStub());
1881 __ CompareRoot(value, Heap::kTrueValueRootIndex); 1881 __ CompareRoot(value, Heap::kTrueValueRootIndex);
1882 EmitBranch(instr, eq); 1882 EmitBranch(instr, eq);
1883 } else if (type.IsSmi()) { 1883 } else if (type.IsSmi()) {
1884 ASSERT(!info()->IsStub()); 1884 DCHECK(!info()->IsStub());
1885 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0)); 1885 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
1886 } else if (type.IsJSArray()) { 1886 } else if (type.IsJSArray()) {
1887 ASSERT(!info()->IsStub()); 1887 DCHECK(!info()->IsStub());
1888 EmitGoto(instr->TrueDestination(chunk())); 1888 EmitGoto(instr->TrueDestination(chunk()));
1889 } else if (type.IsHeapNumber()) { 1889 } else if (type.IsHeapNumber()) {
1890 ASSERT(!info()->IsStub()); 1890 DCHECK(!info()->IsStub());
1891 __ Ldr(double_scratch(), FieldMemOperand(value, 1891 __ Ldr(double_scratch(), FieldMemOperand(value,
1892 HeapNumber::kValueOffset)); 1892 HeapNumber::kValueOffset));
1893 // Test the double value. Zero and NaN are false. 1893 // Test the double value. Zero and NaN are false.
1894 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); 1894 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
1895 } else if (type.IsString()) { 1895 } else if (type.IsString()) {
1896 ASSERT(!info()->IsStub()); 1896 DCHECK(!info()->IsStub());
1897 Register temp = ToRegister(instr->temp1()); 1897 Register temp = ToRegister(instr->temp1());
1898 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); 1898 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
1899 EmitCompareAndBranch(instr, ne, temp, 0); 1899 EmitCompareAndBranch(instr, ne, temp, 0);
1900 } else { 1900 } else {
1901 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 1901 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1902 // Avoid deopts in the case where we've never executed this path before. 1902 // Avoid deopts in the case where we've never executed this path before.
1903 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 1903 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
1904 1904
1905 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 1905 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1906 // undefined -> false. 1906 // undefined -> false.
(...skipping 10 matching lines...) Expand all
1917 } 1917 }
1918 1918
1919 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { 1919 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1920 // 'null' -> false. 1920 // 'null' -> false.
1921 __ JumpIfRoot( 1921 __ JumpIfRoot(
1922 value, Heap::kNullValueRootIndex, false_label); 1922 value, Heap::kNullValueRootIndex, false_label);
1923 } 1923 }
1924 1924
1925 if (expected.Contains(ToBooleanStub::SMI)) { 1925 if (expected.Contains(ToBooleanStub::SMI)) {
1926 // Smis: 0 -> false, all other -> true. 1926 // Smis: 0 -> false, all other -> true.
1927 ASSERT(Smi::FromInt(0) == 0); 1927 DCHECK(Smi::FromInt(0) == 0);
1928 __ Cbz(value, false_label); 1928 __ Cbz(value, false_label);
1929 __ JumpIfSmi(value, true_label); 1929 __ JumpIfSmi(value, true_label);
1930 } else if (expected.NeedsMap()) { 1930 } else if (expected.NeedsMap()) {
1931 // If we need a map later and have a smi, deopt. 1931 // If we need a map later and have a smi, deopt.
1932 DeoptimizeIfSmi(value, instr->environment()); 1932 DeoptimizeIfSmi(value, instr->environment());
1933 } 1933 }
1934 1934
1935 Register map = NoReg; 1935 Register map = NoReg;
1936 Register scratch = NoReg; 1936 Register scratch = NoReg;
1937 1937
1938 if (expected.NeedsMap()) { 1938 if (expected.NeedsMap()) {
1939 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); 1939 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
1940 map = ToRegister(instr->temp1()); 1940 map = ToRegister(instr->temp1());
1941 scratch = ToRegister(instr->temp2()); 1941 scratch = ToRegister(instr->temp2());
1942 1942
1943 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 1943 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
1944 1944
1945 if (expected.CanBeUndetectable()) { 1945 if (expected.CanBeUndetectable()) {
1946 // Undetectable -> false. 1946 // Undetectable -> false.
1947 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 1947 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
1948 __ TestAndBranchIfAnySet( 1948 __ TestAndBranchIfAnySet(
1949 scratch, 1 << Map::kIsUndetectable, false_label); 1949 scratch, 1 << Map::kIsUndetectable, false_label);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
2001 int formal_parameter_count, 2001 int formal_parameter_count,
2002 int arity, 2002 int arity,
2003 LInstruction* instr, 2003 LInstruction* instr,
2004 Register function_reg) { 2004 Register function_reg) {
2005 bool dont_adapt_arguments = 2005 bool dont_adapt_arguments =
2006 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 2006 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2007 bool can_invoke_directly = 2007 bool can_invoke_directly =
2008 dont_adapt_arguments || formal_parameter_count == arity; 2008 dont_adapt_arguments || formal_parameter_count == arity;
2009 2009
2010 // The function interface relies on the following register assignments. 2010 // The function interface relies on the following register assignments.
2011 ASSERT(function_reg.Is(x1) || function_reg.IsNone()); 2011 DCHECK(function_reg.Is(x1) || function_reg.IsNone());
2012 Register arity_reg = x0; 2012 Register arity_reg = x0;
2013 2013
2014 LPointerMap* pointers = instr->pointer_map(); 2014 LPointerMap* pointers = instr->pointer_map();
2015 2015
2016 // If necessary, load the function object. 2016 // If necessary, load the function object.
2017 if (function_reg.IsNone()) { 2017 if (function_reg.IsNone()) {
2018 function_reg = x1; 2018 function_reg = x1;
2019 __ LoadObject(function_reg, function); 2019 __ LoadObject(function_reg, function);
2020 } 2020 }
2021 2021
(...skipping 24 matching lines...) Expand all
2046 } else { 2046 } else {
2047 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 2047 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2048 ParameterCount count(arity); 2048 ParameterCount count(arity);
2049 ParameterCount expected(formal_parameter_count); 2049 ParameterCount expected(formal_parameter_count);
2050 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); 2050 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
2051 } 2051 }
2052 } 2052 }
2053 2053
2054 2054
2055 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 2055 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2056 ASSERT(instr->IsMarkedAsCall()); 2056 DCHECK(instr->IsMarkedAsCall());
2057 ASSERT(ToRegister(instr->result()).Is(x0)); 2057 DCHECK(ToRegister(instr->result()).Is(x0));
2058 2058
2059 LPointerMap* pointers = instr->pointer_map(); 2059 LPointerMap* pointers = instr->pointer_map();
2060 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 2060 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2061 2061
2062 if (instr->target()->IsConstantOperand()) { 2062 if (instr->target()->IsConstantOperand()) {
2063 LConstantOperand* target = LConstantOperand::cast(instr->target()); 2063 LConstantOperand* target = LConstantOperand::cast(instr->target());
2064 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 2064 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2065 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 2065 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2066 // TODO(all): on ARM we use a call descriptor to specify a storage mode 2066 // TODO(all): on ARM we use a call descriptor to specify a storage mode
2067 // but on ARM64 we only have one storage mode so it isn't necessary. Check 2067 // but on ARM64 we only have one storage mode so it isn't necessary. Check
2068 // this understanding is correct. 2068 // this understanding is correct.
2069 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None()); 2069 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
2070 } else { 2070 } else {
2071 ASSERT(instr->target()->IsRegister()); 2071 DCHECK(instr->target()->IsRegister());
2072 Register target = ToRegister(instr->target()); 2072 Register target = ToRegister(instr->target());
2073 generator.BeforeCall(__ CallSize(target)); 2073 generator.BeforeCall(__ CallSize(target));
2074 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); 2074 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
2075 __ Call(target); 2075 __ Call(target);
2076 } 2076 }
2077 generator.AfterCall(); 2077 generator.AfterCall();
2078 after_push_argument_ = false; 2078 after_push_argument_ = false;
2079 } 2079 }
2080 2080
2081 2081
2082 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 2082 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
2083 ASSERT(instr->IsMarkedAsCall()); 2083 DCHECK(instr->IsMarkedAsCall());
2084 ASSERT(ToRegister(instr->function()).is(x1)); 2084 DCHECK(ToRegister(instr->function()).is(x1));
2085 2085
2086 if (instr->hydrogen()->pass_argument_count()) { 2086 if (instr->hydrogen()->pass_argument_count()) {
2087 __ Mov(x0, Operand(instr->arity())); 2087 __ Mov(x0, Operand(instr->arity()));
2088 } 2088 }
2089 2089
2090 // Change context. 2090 // Change context.
2091 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); 2091 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2092 2092
2093 // Load the code entry address 2093 // Load the code entry address
2094 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); 2094 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
2095 __ Call(x10); 2095 __ Call(x10);
2096 2096
2097 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 2097 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2098 after_push_argument_ = false; 2098 after_push_argument_ = false;
2099 } 2099 }
2100 2100
2101 2101
2102 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 2102 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2103 CallRuntime(instr->function(), instr->arity(), instr); 2103 CallRuntime(instr->function(), instr->arity(), instr);
2104 after_push_argument_ = false; 2104 after_push_argument_ = false;
2105 } 2105 }
2106 2106
2107 2107
2108 void LCodeGen::DoCallStub(LCallStub* instr) { 2108 void LCodeGen::DoCallStub(LCallStub* instr) {
2109 ASSERT(ToRegister(instr->context()).is(cp)); 2109 DCHECK(ToRegister(instr->context()).is(cp));
2110 ASSERT(ToRegister(instr->result()).is(x0)); 2110 DCHECK(ToRegister(instr->result()).is(x0));
2111 switch (instr->hydrogen()->major_key()) { 2111 switch (instr->hydrogen()->major_key()) {
2112 case CodeStub::RegExpExec: { 2112 case CodeStub::RegExpExec: {
2113 RegExpExecStub stub(isolate()); 2113 RegExpExecStub stub(isolate());
2114 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2114 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2115 break; 2115 break;
2116 } 2116 }
2117 case CodeStub::SubString: { 2117 case CodeStub::SubString: {
2118 SubStringStub stub(isolate()); 2118 SubStringStub stub(isolate());
2119 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2119 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2120 break; 2120 break;
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
2211 2211
2212 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 2212 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
2213 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2213 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2214 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment()); 2214 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
2215 } 2215 }
2216 } 2216 }
2217 2217
2218 2218
2219 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 2219 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2220 Register value = ToRegister(instr->value()); 2220 Register value = ToRegister(instr->value());
2221 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value)); 2221 DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
2222 DeoptimizeIfNotSmi(value, instr->environment()); 2222 DeoptimizeIfNotSmi(value, instr->environment());
2223 } 2223 }
2224 2224
2225 2225
2226 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 2226 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2227 Register input = ToRegister(instr->value()); 2227 Register input = ToRegister(instr->value());
2228 Register scratch = ToRegister(instr->temp()); 2228 Register scratch = ToRegister(instr->temp());
2229 2229
2230 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 2230 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2231 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 2231 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
(...skipping 14 matching lines...) Expand all
2246 // to force a deopt. 2246 // to force a deopt.
2247 __ Ccmp(scratch, last, CFlag, hs); 2247 __ Ccmp(scratch, last, CFlag, hs);
2248 DeoptimizeIf(hi, instr->environment()); 2248 DeoptimizeIf(hi, instr->environment());
2249 } 2249 }
2250 } else { 2250 } else {
2251 uint8_t mask; 2251 uint8_t mask;
2252 uint8_t tag; 2252 uint8_t tag;
2253 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 2253 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
2254 2254
2255 if (IsPowerOf2(mask)) { 2255 if (IsPowerOf2(mask)) {
2256 ASSERT((tag == 0) || (tag == mask)); 2256 DCHECK((tag == 0) || (tag == mask));
2257 if (tag == 0) { 2257 if (tag == 0) {
2258 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment()); 2258 DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
2259 } else { 2259 } else {
2260 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment()); 2260 DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
2261 } 2261 }
2262 } else { 2262 } else {
2263 if (tag == 0) { 2263 if (tag == 0) {
2264 __ Tst(scratch, mask); 2264 __ Tst(scratch, mask);
2265 } else { 2265 } else {
2266 __ And(scratch, scratch, mask); 2266 __ And(scratch, scratch, mask);
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
2402 // The name in the constructor is internalized because of the way the context 2402 // The name in the constructor is internalized because of the way the context
2403 // is booted. This routine isn't expected to work for random API-created 2403 // is booted. This routine isn't expected to work for random API-created
2404 // classes and it doesn't have to because you can't access it with natives 2404 // classes and it doesn't have to because you can't access it with natives
2405 // syntax. Since both sides are internalized it is sufficient to use an 2405 // syntax. Since both sides are internalized it is sufficient to use an
2406 // identity comparison. 2406 // identity comparison.
2407 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name)); 2407 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
2408 } 2408 }
2409 2409
2410 2410
2411 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { 2411 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
2412 ASSERT(instr->hydrogen()->representation().IsDouble()); 2412 DCHECK(instr->hydrogen()->representation().IsDouble());
2413 FPRegister object = ToDoubleRegister(instr->object()); 2413 FPRegister object = ToDoubleRegister(instr->object());
2414 Register temp = ToRegister(instr->temp()); 2414 Register temp = ToRegister(instr->temp());
2415 2415
2416 // If we don't have a NaN, we don't have the hole, so branch now to avoid the 2416 // If we don't have a NaN, we don't have the hole, so branch now to avoid the
2417 // (relatively expensive) hole-NaN check. 2417 // (relatively expensive) hole-NaN check.
2418 __ Fcmp(object, object); 2418 __ Fcmp(object, object);
2419 __ B(vc, instr->FalseLabel(chunk_)); 2419 __ B(vc, instr->FalseLabel(chunk_));
2420 2420
2421 // We have a NaN, but is it the hole? 2421 // We have a NaN, but is it the hole?
2422 __ Fmov(temp, object); 2422 __ Fmov(temp, object);
2423 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64); 2423 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
2424 } 2424 }
2425 2425
2426 2426
2427 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { 2427 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
2428 ASSERT(instr->hydrogen()->representation().IsTagged()); 2428 DCHECK(instr->hydrogen()->representation().IsTagged());
2429 Register object = ToRegister(instr->object()); 2429 Register object = ToRegister(instr->object());
2430 2430
2431 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); 2431 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
2432 } 2432 }
2433 2433
2434 2434
2435 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2435 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2436 Register value = ToRegister(instr->value()); 2436 Register value = ToRegister(instr->value());
2437 Register map = ToRegister(instr->temp()); 2437 Register map = ToRegister(instr->temp());
2438 2438
2439 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 2439 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
2440 EmitCompareAndBranch(instr, eq, map, Operand(instr->map())); 2440 EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
2441 } 2441 }
2442 2442
2443 2443
2444 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2444 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2445 Representation rep = instr->hydrogen()->value()->representation(); 2445 Representation rep = instr->hydrogen()->value()->representation();
2446 ASSERT(!rep.IsInteger32()); 2446 DCHECK(!rep.IsInteger32());
2447 Register scratch = ToRegister(instr->temp()); 2447 Register scratch = ToRegister(instr->temp());
2448 2448
2449 if (rep.IsDouble()) { 2449 if (rep.IsDouble()) {
2450 __ JumpIfMinusZero(ToDoubleRegister(instr->value()), 2450 __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
2451 instr->TrueLabel(chunk())); 2451 instr->TrueLabel(chunk()));
2452 } else { 2452 } else {
2453 Register value = ToRegister(instr->value()); 2453 Register value = ToRegister(instr->value());
2454 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, 2454 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2455 instr->FalseLabel(chunk()), DO_SMI_CHECK); 2455 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2456 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset)); 2456 __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2491 ToRegister32(left), 2491 ToRegister32(left),
2492 ToOperand32I(right)); 2492 ToOperand32I(right));
2493 } else { 2493 } else {
2494 // Commute the operands and the condition. 2494 // Commute the operands and the condition.
2495 EmitCompareAndBranch(instr, 2495 EmitCompareAndBranch(instr,
2496 CommuteCondition(cond), 2496 CommuteCondition(cond),
2497 ToRegister32(right), 2497 ToRegister32(right),
2498 ToOperand32I(left)); 2498 ToOperand32I(left));
2499 } 2499 }
2500 } else { 2500 } else {
2501 ASSERT(instr->hydrogen_value()->representation().IsSmi()); 2501 DCHECK(instr->hydrogen_value()->representation().IsSmi());
2502 if (right->IsConstantOperand()) { 2502 if (right->IsConstantOperand()) {
2503 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2503 int32_t value = ToInteger32(LConstantOperand::cast(right));
2504 EmitCompareAndBranch(instr, 2504 EmitCompareAndBranch(instr,
2505 cond, 2505 cond,
2506 ToRegister(left), 2506 ToRegister(left),
2507 Operand(Smi::FromInt(value))); 2507 Operand(Smi::FromInt(value)));
2508 } else if (left->IsConstantOperand()) { 2508 } else if (left->IsConstantOperand()) {
2509 // Commute the operands and the condition. 2509 // Commute the operands and the condition.
2510 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2510 int32_t value = ToInteger32(LConstantOperand::cast(left));
2511 EmitCompareAndBranch(instr, 2511 EmitCompareAndBranch(instr,
(...skipping 13 matching lines...) Expand all
2525 2525
2526 2526
2527 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2527 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2528 Register left = ToRegister(instr->left()); 2528 Register left = ToRegister(instr->left());
2529 Register right = ToRegister(instr->right()); 2529 Register right = ToRegister(instr->right());
2530 EmitCompareAndBranch(instr, eq, left, right); 2530 EmitCompareAndBranch(instr, eq, left, right);
2531 } 2531 }
2532 2532
2533 2533
2534 void LCodeGen::DoCmpT(LCmpT* instr) { 2534 void LCodeGen::DoCmpT(LCmpT* instr) {
2535 ASSERT(ToRegister(instr->context()).is(cp)); 2535 DCHECK(ToRegister(instr->context()).is(cp));
2536 Token::Value op = instr->op(); 2536 Token::Value op = instr->op();
2537 Condition cond = TokenToCondition(op, false); 2537 Condition cond = TokenToCondition(op, false);
2538 2538
2539 ASSERT(ToRegister(instr->left()).Is(x1)); 2539 DCHECK(ToRegister(instr->left()).Is(x1));
2540 ASSERT(ToRegister(instr->right()).Is(x0)); 2540 DCHECK(ToRegister(instr->right()).Is(x0));
2541 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2541 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2542 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2542 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2543 // Signal that we don't inline smi code before this stub. 2543 // Signal that we don't inline smi code before this stub.
2544 InlineSmiCheckInfo::EmitNotInlined(masm()); 2544 InlineSmiCheckInfo::EmitNotInlined(masm());
2545 2545
2546 // Return true or false depending on CompareIC result. 2546 // Return true or false depending on CompareIC result.
2547 // This instruction is marked as call. We can clobber any register. 2547 // This instruction is marked as call. We can clobber any register.
2548 ASSERT(instr->IsMarkedAsCall()); 2548 DCHECK(instr->IsMarkedAsCall());
2549 __ LoadTrueFalseRoots(x1, x2); 2549 __ LoadTrueFalseRoots(x1, x2);
2550 __ Cmp(x0, 0); 2550 __ Cmp(x0, 0);
2551 __ Csel(ToRegister(instr->result()), x1, x2, cond); 2551 __ Csel(ToRegister(instr->result()), x1, x2, cond);
2552 } 2552 }
2553 2553
2554 2554
2555 void LCodeGen::DoConstantD(LConstantD* instr) { 2555 void LCodeGen::DoConstantD(LConstantD* instr) {
2556 ASSERT(instr->result()->IsDoubleRegister()); 2556 DCHECK(instr->result()->IsDoubleRegister());
2557 DoubleRegister result = ToDoubleRegister(instr->result()); 2557 DoubleRegister result = ToDoubleRegister(instr->result());
2558 if (instr->value() == 0) { 2558 if (instr->value() == 0) {
2559 if (copysign(1.0, instr->value()) == 1.0) { 2559 if (copysign(1.0, instr->value()) == 1.0) {
2560 __ Fmov(result, fp_zero); 2560 __ Fmov(result, fp_zero);
2561 } else { 2561 } else {
2562 __ Fneg(result, fp_zero); 2562 __ Fneg(result, fp_zero);
2563 } 2563 }
2564 } else { 2564 } else {
2565 __ Fmov(result, instr->value()); 2565 __ Fmov(result, instr->value());
2566 } 2566 }
2567 } 2567 }
2568 2568
2569 2569
2570 void LCodeGen::DoConstantE(LConstantE* instr) { 2570 void LCodeGen::DoConstantE(LConstantE* instr) {
2571 __ Mov(ToRegister(instr->result()), Operand(instr->value())); 2571 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2572 } 2572 }
2573 2573
2574 2574
2575 void LCodeGen::DoConstantI(LConstantI* instr) { 2575 void LCodeGen::DoConstantI(LConstantI* instr) {
2576 ASSERT(is_int32(instr->value())); 2576 DCHECK(is_int32(instr->value()));
2577 // Cast the value here to ensure that the value isn't sign extended by the 2577 // Cast the value here to ensure that the value isn't sign extended by the
2578 // implicit Operand constructor. 2578 // implicit Operand constructor.
2579 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value())); 2579 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
2580 } 2580 }
2581 2581
2582 2582
2583 void LCodeGen::DoConstantS(LConstantS* instr) { 2583 void LCodeGen::DoConstantS(LConstantS* instr) {
2584 __ Mov(ToRegister(instr->result()), Operand(instr->value())); 2584 __ Mov(ToRegister(instr->result()), Operand(instr->value()));
2585 } 2585 }
2586 2586
2587 2587
2588 void LCodeGen::DoConstantT(LConstantT* instr) { 2588 void LCodeGen::DoConstantT(LConstantT* instr) {
2589 Handle<Object> object = instr->value(isolate()); 2589 Handle<Object> object = instr->value(isolate());
2590 AllowDeferredHandleDereference smi_check; 2590 AllowDeferredHandleDereference smi_check;
2591 __ LoadObject(ToRegister(instr->result()), object); 2591 __ LoadObject(ToRegister(instr->result()), object);
2592 } 2592 }
2593 2593
2594 2594
2595 void LCodeGen::DoContext(LContext* instr) { 2595 void LCodeGen::DoContext(LContext* instr) {
2596 // If there is a non-return use, the context must be moved to a register. 2596 // If there is a non-return use, the context must be moved to a register.
2597 Register result = ToRegister(instr->result()); 2597 Register result = ToRegister(instr->result());
2598 if (info()->IsOptimizing()) { 2598 if (info()->IsOptimizing()) {
2599 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2599 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
2600 } else { 2600 } else {
2601 // If there is no frame, the context must be in cp. 2601 // If there is no frame, the context must be in cp.
2602 ASSERT(result.is(cp)); 2602 DCHECK(result.is(cp));
2603 } 2603 }
2604 } 2604 }
2605 2605
2606 2606
2607 void LCodeGen::DoCheckValue(LCheckValue* instr) { 2607 void LCodeGen::DoCheckValue(LCheckValue* instr) {
2608 Register reg = ToRegister(instr->value()); 2608 Register reg = ToRegister(instr->value());
2609 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 2609 Handle<HeapObject> object = instr->hydrogen()->object().handle();
2610 AllowDeferredHandleDereference smi_check; 2610 AllowDeferredHandleDereference smi_check;
2611 if (isolate()->heap()->InNewSpace(*object)) { 2611 if (isolate()->heap()->InNewSpace(*object)) {
2612 UseScratchRegisterScope temps(masm()); 2612 UseScratchRegisterScope temps(masm());
2613 Register temp = temps.AcquireX(); 2613 Register temp = temps.AcquireX();
2614 Handle<Cell> cell = isolate()->factory()->NewCell(object); 2614 Handle<Cell> cell = isolate()->factory()->NewCell(object);
2615 __ Mov(temp, Operand(Handle<Object>(cell))); 2615 __ Mov(temp, Operand(Handle<Object>(cell)));
2616 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); 2616 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
2617 __ Cmp(reg, temp); 2617 __ Cmp(reg, temp);
2618 } else { 2618 } else {
2619 __ Cmp(reg, Operand(object)); 2619 __ Cmp(reg, Operand(object));
2620 } 2620 }
2621 DeoptimizeIf(ne, instr->environment()); 2621 DeoptimizeIf(ne, instr->environment());
2622 } 2622 }
2623 2623
2624 2624
2625 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 2625 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
2626 last_lazy_deopt_pc_ = masm()->pc_offset(); 2626 last_lazy_deopt_pc_ = masm()->pc_offset();
2627 ASSERT(instr->HasEnvironment()); 2627 DCHECK(instr->HasEnvironment());
2628 LEnvironment* env = instr->environment(); 2628 LEnvironment* env = instr->environment();
2629 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 2629 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
2630 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2630 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2631 } 2631 }
2632 2632
2633 2633
2634 void LCodeGen::DoDateField(LDateField* instr) { 2634 void LCodeGen::DoDateField(LDateField* instr) {
2635 Register object = ToRegister(instr->date()); 2635 Register object = ToRegister(instr->date());
2636 Register result = ToRegister(instr->result()); 2636 Register result = ToRegister(instr->result());
2637 Register temp1 = x10; 2637 Register temp1 = x10;
2638 Register temp2 = x11; 2638 Register temp2 = x11;
2639 Smi* index = instr->index(); 2639 Smi* index = instr->index();
2640 Label runtime, done; 2640 Label runtime, done;
2641 2641
2642 ASSERT(object.is(result) && object.Is(x0)); 2642 DCHECK(object.is(result) && object.Is(x0));
2643 ASSERT(instr->IsMarkedAsCall()); 2643 DCHECK(instr->IsMarkedAsCall());
2644 2644
2645 DeoptimizeIfSmi(object, instr->environment()); 2645 DeoptimizeIfSmi(object, instr->environment());
2646 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); 2646 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
2647 DeoptimizeIf(ne, instr->environment()); 2647 DeoptimizeIf(ne, instr->environment());
2648 2648
2649 if (index->value() == 0) { 2649 if (index->value() == 0) {
2650 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); 2650 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
2651 } else { 2651 } else {
2652 if (index->value() < JSDate::kFirstUncachedField) { 2652 if (index->value() < JSDate::kFirstUncachedField) {
2653 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 2653 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
(...skipping 28 matching lines...) Expand all
2682 2682
2683 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); 2683 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
2684 Deoptimize(instr->environment(), &type); 2684 Deoptimize(instr->environment(), &type);
2685 } 2685 }
2686 2686
2687 2687
2688 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 2688 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
2689 Register dividend = ToRegister32(instr->dividend()); 2689 Register dividend = ToRegister32(instr->dividend());
2690 int32_t divisor = instr->divisor(); 2690 int32_t divisor = instr->divisor();
2691 Register result = ToRegister32(instr->result()); 2691 Register result = ToRegister32(instr->result());
2692 ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor))); 2692 DCHECK(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
2693 ASSERT(!result.is(dividend)); 2693 DCHECK(!result.is(dividend));
2694 2694
2695 // Check for (0 / -x) that will produce negative zero. 2695 // Check for (0 / -x) that will produce negative zero.
2696 HDiv* hdiv = instr->hydrogen(); 2696 HDiv* hdiv = instr->hydrogen();
2697 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2697 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2698 DeoptimizeIfZero(dividend, instr->environment()); 2698 DeoptimizeIfZero(dividend, instr->environment());
2699 } 2699 }
2700 // Check for (kMinInt / -1). 2700 // Check for (kMinInt / -1).
2701 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 2701 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
2702 // Test dividend for kMinInt by subtracting one (cmp) and checking for 2702 // Test dividend for kMinInt by subtracting one (cmp) and checking for
2703 // overflow. 2703 // overflow.
(...skipping 23 matching lines...) Expand all
2727 } 2727 }
2728 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); 2728 if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
2729 if (divisor < 0) __ Neg(result, result); 2729 if (divisor < 0) __ Neg(result, result);
2730 } 2730 }
2731 2731
2732 2732
2733 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 2733 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
2734 Register dividend = ToRegister32(instr->dividend()); 2734 Register dividend = ToRegister32(instr->dividend());
2735 int32_t divisor = instr->divisor(); 2735 int32_t divisor = instr->divisor();
2736 Register result = ToRegister32(instr->result()); 2736 Register result = ToRegister32(instr->result());
2737 ASSERT(!AreAliased(dividend, result)); 2737 DCHECK(!AreAliased(dividend, result));
2738 2738
2739 if (divisor == 0) { 2739 if (divisor == 0) {
2740 Deoptimize(instr->environment()); 2740 Deoptimize(instr->environment());
2741 return; 2741 return;
2742 } 2742 }
2743 2743
2744 // Check for (0 / -x) that will produce negative zero. 2744 // Check for (0 / -x) that will produce negative zero.
2745 HDiv* hdiv = instr->hydrogen(); 2745 HDiv* hdiv = instr->hydrogen();
2746 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 2746 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
2747 DeoptimizeIfZero(dividend, instr->environment()); 2747 DeoptimizeIfZero(dividend, instr->environment());
2748 } 2748 }
2749 2749
2750 __ TruncatingDiv(result, dividend, Abs(divisor)); 2750 __ TruncatingDiv(result, dividend, Abs(divisor));
2751 if (divisor < 0) __ Neg(result, result); 2751 if (divisor < 0) __ Neg(result, result);
2752 2752
2753 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 2753 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
2754 Register temp = ToRegister32(instr->temp()); 2754 Register temp = ToRegister32(instr->temp());
2755 ASSERT(!AreAliased(dividend, result, temp)); 2755 DCHECK(!AreAliased(dividend, result, temp));
2756 __ Sxtw(dividend.X(), dividend); 2756 __ Sxtw(dividend.X(), dividend);
2757 __ Mov(temp, divisor); 2757 __ Mov(temp, divisor);
2758 __ Smsubl(temp.X(), result, temp, dividend.X()); 2758 __ Smsubl(temp.X(), result, temp, dividend.X());
2759 DeoptimizeIfNotZero(temp, instr->environment()); 2759 DeoptimizeIfNotZero(temp, instr->environment());
2760 } 2760 }
2761 } 2761 }
2762 2762
2763 2763
2764 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 2764 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
2765 void LCodeGen::DoDivI(LDivI* instr) { 2765 void LCodeGen::DoDivI(LDivI* instr) {
2766 HBinaryOperation* hdiv = instr->hydrogen(); 2766 HBinaryOperation* hdiv = instr->hydrogen();
2767 Register dividend = ToRegister32(instr->dividend()); 2767 Register dividend = ToRegister32(instr->dividend());
2768 Register divisor = ToRegister32(instr->divisor()); 2768 Register divisor = ToRegister32(instr->divisor());
2769 Register result = ToRegister32(instr->result()); 2769 Register result = ToRegister32(instr->result());
2770 2770
2771 // Issue the division first, and then check for any deopt cases whilst the 2771 // Issue the division first, and then check for any deopt cases whilst the
2772 // result is computed. 2772 // result is computed.
2773 __ Sdiv(result, dividend, divisor); 2773 __ Sdiv(result, dividend, divisor);
2774 2774
2775 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 2775 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
2776 ASSERT_EQ(NULL, instr->temp()); 2776 DCHECK_EQ(NULL, instr->temp());
2777 return; 2777 return;
2778 } 2778 }
2779 2779
2780 // Check for x / 0. 2780 // Check for x / 0.
2781 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 2781 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
2782 DeoptimizeIfZero(divisor, instr->environment()); 2782 DeoptimizeIfZero(divisor, instr->environment());
2783 } 2783 }
2784 2784
2785 // Check for (0 / -x) as that will produce negative zero. 2785 // Check for (0 / -x) as that will produce negative zero.
2786 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 2786 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
2839 // Nothing to see here, move on! 2839 // Nothing to see here, move on!
2840 } 2840 }
2841 2841
2842 2842
2843 void LCodeGen::DoDummyUse(LDummyUse* instr) { 2843 void LCodeGen::DoDummyUse(LDummyUse* instr) {
2844 // Nothing to see here, move on! 2844 // Nothing to see here, move on!
2845 } 2845 }
2846 2846
2847 2847
2848 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 2848 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2849 ASSERT(ToRegister(instr->context()).is(cp)); 2849 DCHECK(ToRegister(instr->context()).is(cp));
2850 // FunctionLiteral instruction is marked as call, we can trash any register. 2850 // FunctionLiteral instruction is marked as call, we can trash any register.
2851 ASSERT(instr->IsMarkedAsCall()); 2851 DCHECK(instr->IsMarkedAsCall());
2852 2852
2853 // Use the fast case closure allocation code that allocates in new 2853 // Use the fast case closure allocation code that allocates in new
2854 // space for nested functions that don't need literals cloning. 2854 // space for nested functions that don't need literals cloning.
2855 bool pretenure = instr->hydrogen()->pretenure(); 2855 bool pretenure = instr->hydrogen()->pretenure();
2856 if (!pretenure && instr->hydrogen()->has_no_literals()) { 2856 if (!pretenure && instr->hydrogen()->has_no_literals()) {
2857 FastNewClosureStub stub(isolate(), 2857 FastNewClosureStub stub(isolate(),
2858 instr->hydrogen()->strict_mode(), 2858 instr->hydrogen()->strict_mode(),
2859 instr->hydrogen()->is_generator()); 2859 instr->hydrogen()->is_generator());
2860 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); 2860 __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
2861 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2861 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
(...skipping 25 matching lines...) Expand all
2887 DeoptimizeIfZero(result, instr->environment()); 2887 DeoptimizeIfZero(result, instr->environment());
2888 2888
2889 __ Bind(&done); 2889 __ Bind(&done);
2890 } 2890 }
2891 2891
2892 2892
2893 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 2893 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
2894 Register object = ToRegister(instr->object()); 2894 Register object = ToRegister(instr->object());
2895 Register null_value = x5; 2895 Register null_value = x5;
2896 2896
2897 ASSERT(instr->IsMarkedAsCall()); 2897 DCHECK(instr->IsMarkedAsCall());
2898 ASSERT(object.Is(x0)); 2898 DCHECK(object.Is(x0));
2899 2899
2900 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, 2900 DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
2901 instr->environment()); 2901 instr->environment());
2902 2902
2903 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 2903 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
2904 __ Cmp(object, null_value); 2904 __ Cmp(object, null_value);
2905 DeoptimizeIf(eq, instr->environment()); 2905 DeoptimizeIf(eq, instr->environment());
2906 2906
2907 DeoptimizeIfSmi(object, instr->environment()); 2907 DeoptimizeIfSmi(object, instr->environment());
2908 2908
(...skipping 19 matching lines...) Expand all
2928 } 2928 }
2929 2929
2930 2930
2931 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2931 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2932 Register input = ToRegister(instr->value()); 2932 Register input = ToRegister(instr->value());
2933 Register result = ToRegister(instr->result()); 2933 Register result = ToRegister(instr->result());
2934 2934
2935 __ AssertString(input); 2935 __ AssertString(input);
2936 2936
2937 // Assert that we can use a W register load to get the hash. 2937 // Assert that we can use a W register load to get the hash.
2938 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); 2938 DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
2939 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); 2939 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
2940 __ IndexFromHash(result, result); 2940 __ IndexFromHash(result, result);
2941 } 2941 }
2942 2942
2943 2943
2944 void LCodeGen::EmitGoto(int block) { 2944 void LCodeGen::EmitGoto(int block) {
2945 // Do not emit jump if we are emitting a goto to the next block. 2945 // Do not emit jump if we are emitting a goto to the next block.
2946 if (!IsNextEmittedBlock(block)) { 2946 if (!IsNextEmittedBlock(block)) {
2947 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); 2947 __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
2948 } 2948 }
2949 } 2949 }
2950 2950
2951 2951
2952 void LCodeGen::DoGoto(LGoto* instr) { 2952 void LCodeGen::DoGoto(LGoto* instr) {
2953 EmitGoto(instr->block_id()); 2953 EmitGoto(instr->block_id());
2954 } 2954 }
2955 2955
2956 2956
2957 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2957 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2958 LHasCachedArrayIndexAndBranch* instr) { 2958 LHasCachedArrayIndexAndBranch* instr) {
2959 Register input = ToRegister(instr->value()); 2959 Register input = ToRegister(instr->value());
2960 Register temp = ToRegister32(instr->temp()); 2960 Register temp = ToRegister32(instr->temp());
2961 2961
2962 // Assert that the cache status bits fit in a W register. 2962 // Assert that the cache status bits fit in a W register.
2963 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask)); 2963 DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
2964 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset)); 2964 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
2965 __ Tst(temp, String::kContainsCachedArrayIndexMask); 2965 __ Tst(temp, String::kContainsCachedArrayIndexMask);
2966 EmitBranch(instr, eq); 2966 EmitBranch(instr, eq);
2967 } 2967 }
2968 2968
2969 2969
2970 // HHasInstanceTypeAndBranch instruction is built with an interval of type 2970 // HHasInstanceTypeAndBranch instruction is built with an interval of type
2971 // to test but is only used in very restricted ways. The only possible kinds 2971 // to test but is only used in very restricted ways. The only possible kinds
2972 // of intervals are: 2972 // of intervals are:
2973 // - [ FIRST_TYPE, instr->to() ] 2973 // - [ FIRST_TYPE, instr->to() ]
2974 // - [ instr->form(), LAST_TYPE ] 2974 // - [ instr->form(), LAST_TYPE ]
2975 // - instr->from() == instr->to() 2975 // - instr->from() == instr->to()
2976 // 2976 //
2977 // These kinds of intervals can be check with only one compare instruction 2977 // These kinds of intervals can be check with only one compare instruction
2978 // providing the correct value and test condition are used. 2978 // providing the correct value and test condition are used.
2979 // 2979 //
2980 // TestType() will return the value to use in the compare instruction and 2980 // TestType() will return the value to use in the compare instruction and
2981 // BranchCondition() will return the condition to use depending on the kind 2981 // BranchCondition() will return the condition to use depending on the kind
2982 // of interval actually specified in the instruction. 2982 // of interval actually specified in the instruction.
2983 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2983 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2984 InstanceType from = instr->from(); 2984 InstanceType from = instr->from();
2985 InstanceType to = instr->to(); 2985 InstanceType to = instr->to();
2986 if (from == FIRST_TYPE) return to; 2986 if (from == FIRST_TYPE) return to;
2987 ASSERT((from == to) || (to == LAST_TYPE)); 2987 DCHECK((from == to) || (to == LAST_TYPE));
2988 return from; 2988 return from;
2989 } 2989 }
2990 2990
2991 2991
2992 // See comment above TestType function for what this function does. 2992 // See comment above TestType function for what this function does.
2993 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2993 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2994 InstanceType from = instr->from(); 2994 InstanceType from = instr->from();
2995 InstanceType to = instr->to(); 2995 InstanceType to = instr->to();
2996 if (from == to) return eq; 2996 if (from == to) return eq;
2997 if (to == LAST_TYPE) return hs; 2997 if (to == LAST_TYPE) return hs;
(...skipping 20 matching lines...) Expand all
3018 Register base = ToRegister(instr->base_object()); 3018 Register base = ToRegister(instr->base_object());
3019 if (instr->offset()->IsConstantOperand()) { 3019 if (instr->offset()->IsConstantOperand()) {
3020 __ Add(result, base, ToOperand32I(instr->offset())); 3020 __ Add(result, base, ToOperand32I(instr->offset()));
3021 } else { 3021 } else {
3022 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW)); 3022 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
3023 } 3023 }
3024 } 3024 }
3025 3025
3026 3026
3027 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 3027 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
3028 ASSERT(ToRegister(instr->context()).is(cp)); 3028 DCHECK(ToRegister(instr->context()).is(cp));
3029 // Assert that the arguments are in the registers expected by InstanceofStub. 3029 // Assert that the arguments are in the registers expected by InstanceofStub.
3030 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left())); 3030 DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
3031 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right())); 3031 DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
3032 3032
3033 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 3033 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
3034 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3034 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3035 3035
3036 // InstanceofStub returns a result in x0: 3036 // InstanceofStub returns a result in x0:
3037 // 0 => not an instance 3037 // 0 => not an instance
3038 // smi 1 => instance. 3038 // smi 1 => instance.
3039 __ Cmp(x0, 0); 3039 __ Cmp(x0, 0);
3040 __ LoadTrueFalseRoots(x0, x1); 3040 __ LoadTrueFalseRoots(x0, x1);
3041 __ Csel(x0, x0, x1, eq); 3041 __ Csel(x0, x0, x1, eq);
(...skipping 18 matching lines...) Expand all
3060 new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 3060 new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
3061 3061
3062 Label map_check, return_false, cache_miss, done; 3062 Label map_check, return_false, cache_miss, done;
3063 Register object = ToRegister(instr->value()); 3063 Register object = ToRegister(instr->value());
3064 Register result = ToRegister(instr->result()); 3064 Register result = ToRegister(instr->result());
3065 // x4 is expected in the associated deferred code and stub. 3065 // x4 is expected in the associated deferred code and stub.
3066 Register map_check_site = x4; 3066 Register map_check_site = x4;
3067 Register map = x5; 3067 Register map = x5;
3068 3068
3069 // This instruction is marked as call. We can clobber any register. 3069 // This instruction is marked as call. We can clobber any register.
3070 ASSERT(instr->IsMarkedAsCall()); 3070 DCHECK(instr->IsMarkedAsCall());
3071 3071
3072 // We must take into account that object is in x11. 3072 // We must take into account that object is in x11.
3073 ASSERT(object.Is(x11)); 3073 DCHECK(object.Is(x11));
3074 Register scratch = x10; 3074 Register scratch = x10;
3075 3075
3076 // A Smi is not instance of anything. 3076 // A Smi is not instance of anything.
3077 __ JumpIfSmi(object, &return_false); 3077 __ JumpIfSmi(object, &return_false);
3078 3078
3079 // This is the inlined call site instanceof cache. The two occurences of the 3079 // This is the inlined call site instanceof cache. The two occurences of the
3080 // hole value will be patched to the last map/result pair generated by the 3080 // hole value will be patched to the last map/result pair generated by the
3081 // instanceof stub. 3081 // instanceof stub.
3082 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 3082 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3083 { 3083 {
3084 // Below we use Factory::the_hole_value() on purpose instead of loading from 3084 // Below we use Factory::the_hole_value() on purpose instead of loading from
3085 // the root array to force relocation and later be able to patch with a 3085 // the root array to force relocation and later be able to patch with a
3086 // custom value. 3086 // custom value.
3087 InstructionAccurateScope scope(masm(), 5); 3087 InstructionAccurateScope scope(masm(), 5);
3088 __ bind(&map_check); 3088 __ bind(&map_check);
3089 // Will be patched with the cached map. 3089 // Will be patched with the cached map.
3090 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 3090 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
3091 __ ldr(scratch, Immediate(Handle<Object>(cell))); 3091 __ ldr(scratch, Immediate(Handle<Object>(cell)));
3092 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); 3092 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
3093 __ cmp(map, scratch); 3093 __ cmp(map, scratch);
3094 __ b(&cache_miss, ne); 3094 __ b(&cache_miss, ne);
3095 // The address of this instruction is computed relative to the map check 3095 // The address of this instruction is computed relative to the map check
3096 // above, so check the size of the code generated. 3096 // above, so check the size of the code generated.
3097 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4); 3097 DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
3098 // Will be patched with the cached result. 3098 // Will be patched with the cached result.
3099 __ ldr(result, Immediate(factory()->the_hole_value())); 3099 __ ldr(result, Immediate(factory()->the_hole_value()));
3100 } 3100 }
3101 __ B(&done); 3101 __ B(&done);
3102 3102
3103 // The inlined call site cache did not match. 3103 // The inlined call site cache did not match.
3104 // Check null and string before calling the deferred code. 3104 // Check null and string before calling the deferred code.
3105 __ Bind(&cache_miss); 3105 __ Bind(&cache_miss);
3106 // Compute the address of the map check. It must not be clobbered until the 3106 // Compute the address of the map check. It must not be clobbered until the
3107 // InstanceOfStub has used it. 3107 // InstanceOfStub has used it.
(...skipping 14 matching lines...) Expand all
3122 __ LoadRoot(result, Heap::kFalseValueRootIndex); 3122 __ LoadRoot(result, Heap::kFalseValueRootIndex);
3123 3123
3124 // Here result is either true or false. 3124 // Here result is either true or false.
3125 __ Bind(deferred->exit()); 3125 __ Bind(deferred->exit());
3126 __ Bind(&done); 3126 __ Bind(&done);
3127 } 3127 }
3128 3128
3129 3129
3130 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 3130 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
3131 Register result = ToRegister(instr->result()); 3131 Register result = ToRegister(instr->result());
3132 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0. 3132 DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
3133 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 3133 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
3134 flags = static_cast<InstanceofStub::Flags>( 3134 flags = static_cast<InstanceofStub::Flags>(
3135 flags | InstanceofStub::kArgsInRegisters); 3135 flags | InstanceofStub::kArgsInRegisters);
3136 flags = static_cast<InstanceofStub::Flags>( 3136 flags = static_cast<InstanceofStub::Flags>(
3137 flags | InstanceofStub::kReturnTrueFalseObject); 3137 flags | InstanceofStub::kReturnTrueFalseObject);
3138 flags = static_cast<InstanceofStub::Flags>( 3138 flags = static_cast<InstanceofStub::Flags>(
3139 flags | InstanceofStub::kCallSiteInlineCheck); 3139 flags | InstanceofStub::kCallSiteInlineCheck);
3140 3140
3141 PushSafepointRegistersScope scope(this); 3141 PushSafepointRegistersScope scope(this);
3142 LoadContextFromDeferred(instr->context()); 3142 LoadContextFromDeferred(instr->context());
3143 3143
3144 // Prepare InstanceofStub arguments. 3144 // Prepare InstanceofStub arguments.
3145 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left())); 3145 DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
3146 __ LoadObject(InstanceofStub::right(), instr->function()); 3146 __ LoadObject(InstanceofStub::right(), instr->function());
3147 3147
3148 InstanceofStub stub(isolate(), flags); 3148 InstanceofStub stub(isolate(), flags);
3149 CallCodeGeneric(stub.GetCode(), 3149 CallCodeGeneric(stub.GetCode(),
3150 RelocInfo::CODE_TARGET, 3150 RelocInfo::CODE_TARGET,
3151 instr, 3151 instr,
3152 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 3152 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3153 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 3153 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
3154 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 3154 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
3155 3155
3156 // Put the result value into the result register slot. 3156 // Put the result value into the result register slot.
3157 __ StoreToSafepointRegisterSlot(result, result); 3157 __ StoreToSafepointRegisterSlot(result, result);
3158 } 3158 }
3159 3159
3160 3160
3161 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { 3161 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
3162 DoGap(instr); 3162 DoGap(instr);
3163 } 3163 }
3164 3164
3165 3165
3166 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 3166 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3167 Register value = ToRegister32(instr->value()); 3167 Register value = ToRegister32(instr->value());
3168 DoubleRegister result = ToDoubleRegister(instr->result()); 3168 DoubleRegister result = ToDoubleRegister(instr->result());
3169 __ Scvtf(result, value); 3169 __ Scvtf(result, value);
3170 } 3170 }
3171 3171
3172 3172
3173 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3173 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3174 ASSERT(ToRegister(instr->context()).is(cp)); 3174 DCHECK(ToRegister(instr->context()).is(cp));
3175 // The function is required to be in x1. 3175 // The function is required to be in x1.
3176 ASSERT(ToRegister(instr->function()).is(x1)); 3176 DCHECK(ToRegister(instr->function()).is(x1));
3177 ASSERT(instr->HasPointerMap()); 3177 DCHECK(instr->HasPointerMap());
3178 3178
3179 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 3179 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3180 if (known_function.is_null()) { 3180 if (known_function.is_null()) {
3181 LPointerMap* pointers = instr->pointer_map(); 3181 LPointerMap* pointers = instr->pointer_map();
3182 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3182 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3183 ParameterCount count(instr->arity()); 3183 ParameterCount count(instr->arity());
3184 __ InvokeFunction(x1, count, CALL_FUNCTION, generator); 3184 __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
3185 } else { 3185 } else {
3186 CallKnownFunction(known_function, 3186 CallKnownFunction(known_function,
3187 instr->hydrogen()->formal_parameter_count(), 3187 instr->hydrogen()->formal_parameter_count(),
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
3358 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 3358 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
3359 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); 3359 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
3360 if (instr->hydrogen()->RequiresHoleCheck()) { 3360 if (instr->hydrogen()->RequiresHoleCheck()) {
3361 DeoptimizeIfRoot( 3361 DeoptimizeIfRoot(
3362 result, Heap::kTheHoleValueRootIndex, instr->environment()); 3362 result, Heap::kTheHoleValueRootIndex, instr->environment());
3363 } 3363 }
3364 } 3364 }
3365 3365
3366 3366
3367 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3367 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3368 ASSERT(ToRegister(instr->context()).is(cp)); 3368 DCHECK(ToRegister(instr->context()).is(cp));
3369 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); 3369 DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
3370 ASSERT(ToRegister(instr->result()).Is(x0)); 3370 DCHECK(ToRegister(instr->result()).Is(x0));
3371 __ Mov(LoadIC::NameRegister(), Operand(instr->name())); 3371 __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
3372 if (FLAG_vector_ics) { 3372 if (FLAG_vector_ics) {
3373 Register vector = ToRegister(instr->temp_vector()); 3373 Register vector = ToRegister(instr->temp_vector());
3374 ASSERT(vector.is(LoadIC::VectorRegister())); 3374 DCHECK(vector.is(LoadIC::VectorRegister()));
3375 __ Mov(vector, instr->hydrogen()->feedback_vector()); 3375 __ Mov(vector, instr->hydrogen()->feedback_vector());
3376 // No need to allocate this register. 3376 // No need to allocate this register.
3377 ASSERT(LoadIC::SlotRegister().is(x0)); 3377 DCHECK(LoadIC::SlotRegister().is(x0));
3378 __ Mov(LoadIC::SlotRegister(), 3378 __ Mov(LoadIC::SlotRegister(),
3379 Smi::FromInt(instr->hydrogen()->slot())); 3379 Smi::FromInt(instr->hydrogen()->slot()));
3380 } 3380 }
3381 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; 3381 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3382 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); 3382 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
3383 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3383 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3384 } 3384 }
3385 3385
3386 3386
3387 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand( 3387 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
(...skipping 14 matching lines...) Expand all
3402 3402
3403 if (key_is_smi) { 3403 if (key_is_smi) {
3404 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); 3404 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
3405 return MemOperand(scratch, base_offset); 3405 return MemOperand(scratch, base_offset);
3406 } 3406 }
3407 3407
3408 if (base_offset == 0) { 3408 if (base_offset == 0) {
3409 return MemOperand(base, key, SXTW, element_size_shift); 3409 return MemOperand(base, key, SXTW, element_size_shift);
3410 } 3410 }
3411 3411
3412 ASSERT(!AreAliased(scratch, key)); 3412 DCHECK(!AreAliased(scratch, key));
3413 __ Add(scratch, base, base_offset); 3413 __ Add(scratch, base, base_offset);
3414 return MemOperand(scratch, key, SXTW, element_size_shift); 3414 return MemOperand(scratch, key, SXTW, element_size_shift);
3415 } 3415 }
3416 3416
3417 3417
3418 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { 3418 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
3419 Register ext_ptr = ToRegister(instr->elements()); 3419 Register ext_ptr = ToRegister(instr->elements());
3420 Register scratch; 3420 Register scratch;
3421 ElementsKind elements_kind = instr->elements_kind(); 3421 ElementsKind elements_kind = instr->elements_kind();
3422 3422
3423 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 3423 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3424 bool key_is_constant = instr->key()->IsConstantOperand(); 3424 bool key_is_constant = instr->key()->IsConstantOperand();
3425 Register key = no_reg; 3425 Register key = no_reg;
3426 int constant_key = 0; 3426 int constant_key = 0;
3427 if (key_is_constant) { 3427 if (key_is_constant) {
3428 ASSERT(instr->temp() == NULL); 3428 DCHECK(instr->temp() == NULL);
3429 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3429 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3430 if (constant_key & 0xf0000000) { 3430 if (constant_key & 0xf0000000) {
3431 Abort(kArrayIndexConstantValueTooBig); 3431 Abort(kArrayIndexConstantValueTooBig);
3432 } 3432 }
3433 } else { 3433 } else {
3434 scratch = ToRegister(instr->temp()); 3434 scratch = ToRegister(instr->temp());
3435 key = ToRegister(instr->key()); 3435 key = ToRegister(instr->key());
3436 } 3436 }
3437 3437
3438 MemOperand mem_op = 3438 MemOperand mem_op =
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
3515 STATIC_ASSERT(kSmiTag == 0); 3515 STATIC_ASSERT(kSmiTag == 0);
3516 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3516 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3517 3517
3518 // Even though the HLoad/StoreKeyed instructions force the input 3518 // Even though the HLoad/StoreKeyed instructions force the input
3519 // representation for the key to be an integer, the input gets replaced during 3519 // representation for the key to be an integer, the input gets replaced during
3520 // bounds check elimination with the index argument to the bounds check, which 3520 // bounds check elimination with the index argument to the bounds check, which
3521 // can be tagged, so that case must be handled here, too. 3521 // can be tagged, so that case must be handled here, too.
3522 if (key_is_tagged) { 3522 if (key_is_tagged) {
3523 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); 3523 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
3524 if (representation.IsInteger32()) { 3524 if (representation.IsInteger32()) {
3525 ASSERT(elements_kind == FAST_SMI_ELEMENTS); 3525 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3526 // Read or write only the smi payload in the case of fast smi arrays. 3526 // Read or write only the smi payload in the case of fast smi arrays.
3527 return UntagSmiMemOperand(base, base_offset); 3527 return UntagSmiMemOperand(base, base_offset);
3528 } else { 3528 } else {
3529 return MemOperand(base, base_offset); 3529 return MemOperand(base, base_offset);
3530 } 3530 }
3531 } else { 3531 } else {
3532 // Sign extend key because it could be a 32-bit negative value or contain 3532 // Sign extend key because it could be a 32-bit negative value or contain
3533 // garbage in the top 32-bits. The address computation happens in 64-bit. 3533 // garbage in the top 32-bits. The address computation happens in 64-bit.
3534 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); 3534 DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
3535 if (representation.IsInteger32()) { 3535 if (representation.IsInteger32()) {
3536 ASSERT(elements_kind == FAST_SMI_ELEMENTS); 3536 DCHECK(elements_kind == FAST_SMI_ELEMENTS);
3537 // Read or write only the smi payload in the case of fast smi arrays. 3537 // Read or write only the smi payload in the case of fast smi arrays.
3538 __ Add(base, elements, Operand(key, SXTW, element_size_shift)); 3538 __ Add(base, elements, Operand(key, SXTW, element_size_shift));
3539 return UntagSmiMemOperand(base, base_offset); 3539 return UntagSmiMemOperand(base, base_offset);
3540 } else { 3540 } else {
3541 __ Add(base, elements, base_offset); 3541 __ Add(base, elements, base_offset);
3542 return MemOperand(base, key, SXTW, element_size_shift); 3542 return MemOperand(base, key, SXTW, element_size_shift);
3543 } 3543 }
3544 } 3544 }
3545 } 3545 }
3546 3546
3547 3547
3548 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { 3548 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
3549 Register elements = ToRegister(instr->elements()); 3549 Register elements = ToRegister(instr->elements());
3550 DoubleRegister result = ToDoubleRegister(instr->result()); 3550 DoubleRegister result = ToDoubleRegister(instr->result());
3551 MemOperand mem_op; 3551 MemOperand mem_op;
3552 3552
3553 if (instr->key()->IsConstantOperand()) { 3553 if (instr->key()->IsConstantOperand()) {
3554 ASSERT(instr->hydrogen()->RequiresHoleCheck() || 3554 DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
3555 (instr->temp() == NULL)); 3555 (instr->temp() == NULL));
3556 3556
3557 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3557 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3558 if (constant_key & 0xf0000000) { 3558 if (constant_key & 0xf0000000) {
3559 Abort(kArrayIndexConstantValueTooBig); 3559 Abort(kArrayIndexConstantValueTooBig);
3560 } 3560 }
3561 int offset = instr->base_offset() + constant_key * kDoubleSize; 3561 int offset = instr->base_offset() + constant_key * kDoubleSize;
3562 mem_op = MemOperand(elements, offset); 3562 mem_op = MemOperand(elements, offset);
3563 } else { 3563 } else {
3564 Register load_base = ToRegister(instr->temp()); 3564 Register load_base = ToRegister(instr->temp());
(...skipping 19 matching lines...) Expand all
3584 } 3584 }
3585 3585
3586 3586
3587 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { 3587 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
3588 Register elements = ToRegister(instr->elements()); 3588 Register elements = ToRegister(instr->elements());
3589 Register result = ToRegister(instr->result()); 3589 Register result = ToRegister(instr->result());
3590 MemOperand mem_op; 3590 MemOperand mem_op;
3591 3591
3592 Representation representation = instr->hydrogen()->representation(); 3592 Representation representation = instr->hydrogen()->representation();
3593 if (instr->key()->IsConstantOperand()) { 3593 if (instr->key()->IsConstantOperand()) {
3594 ASSERT(instr->temp() == NULL); 3594 DCHECK(instr->temp() == NULL);
3595 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3595 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3596 int offset = instr->base_offset() + 3596 int offset = instr->base_offset() +
3597 ToInteger32(const_operand) * kPointerSize; 3597 ToInteger32(const_operand) * kPointerSize;
3598 if (representation.IsInteger32()) { 3598 if (representation.IsInteger32()) {
3599 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 3599 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
3600 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 3600 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
3601 STATIC_ASSERT(kSmiTag == 0); 3601 STATIC_ASSERT(kSmiTag == 0);
3602 mem_op = UntagSmiMemOperand(elements, offset); 3602 mem_op = UntagSmiMemOperand(elements, offset);
3603 } else { 3603 } else {
3604 mem_op = MemOperand(elements, offset); 3604 mem_op = MemOperand(elements, offset);
3605 } 3605 }
3606 } else { 3606 } else {
3607 Register load_base = ToRegister(instr->temp()); 3607 Register load_base = ToRegister(instr->temp());
3608 Register key = ToRegister(instr->key()); 3608 Register key = ToRegister(instr->key());
3609 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 3609 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
(...skipping 10 matching lines...) Expand all
3620 DeoptimizeIfNotSmi(result, instr->environment()); 3620 DeoptimizeIfNotSmi(result, instr->environment());
3621 } else { 3621 } else {
3622 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, 3622 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
3623 instr->environment()); 3623 instr->environment());
3624 } 3624 }
3625 } 3625 }
3626 } 3626 }
3627 3627
3628 3628
3629 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3629 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3630 ASSERT(ToRegister(instr->context()).is(cp)); 3630 DCHECK(ToRegister(instr->context()).is(cp));
3631 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3631 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3632 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister())); 3632 DCHECK(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3633 if (FLAG_vector_ics) { 3633 if (FLAG_vector_ics) {
3634 Register vector = ToRegister(instr->temp_vector()); 3634 Register vector = ToRegister(instr->temp_vector());
3635 ASSERT(vector.is(LoadIC::VectorRegister())); 3635 DCHECK(vector.is(LoadIC::VectorRegister()));
3636 __ Mov(vector, instr->hydrogen()->feedback_vector()); 3636 __ Mov(vector, instr->hydrogen()->feedback_vector());
3637 // No need to allocate this register. 3637 // No need to allocate this register.
3638 ASSERT(LoadIC::SlotRegister().is(x0)); 3638 DCHECK(LoadIC::SlotRegister().is(x0));
3639 __ Mov(LoadIC::SlotRegister(), 3639 __ Mov(LoadIC::SlotRegister(),
3640 Smi::FromInt(instr->hydrogen()->slot())); 3640 Smi::FromInt(instr->hydrogen()->slot()));
3641 } 3641 }
3642 3642
3643 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3643 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3644 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3644 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3645 3645
3646 ASSERT(ToRegister(instr->result()).Is(x0)); 3646 DCHECK(ToRegister(instr->result()).Is(x0));
3647 } 3647 }
3648 3648
3649 3649
3650 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 3650 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3651 HObjectAccess access = instr->hydrogen()->access(); 3651 HObjectAccess access = instr->hydrogen()->access();
3652 int offset = access.offset(); 3652 int offset = access.offset();
3653 Register object = ToRegister(instr->object()); 3653 Register object = ToRegister(instr->object());
3654 3654
3655 if (access.IsExternalMemory()) { 3655 if (access.IsExternalMemory()) {
3656 Register result = ToRegister(instr->result()); 3656 Register result = ToRegister(instr->result());
(...skipping 24 matching lines...) Expand all
3681 STATIC_ASSERT(kSmiTag == 0); 3681 STATIC_ASSERT(kSmiTag == 0);
3682 __ Load(result, UntagSmiFieldMemOperand(source, offset), 3682 __ Load(result, UntagSmiFieldMemOperand(source, offset),
3683 Representation::Integer32()); 3683 Representation::Integer32());
3684 } else { 3684 } else {
3685 __ Load(result, FieldMemOperand(source, offset), access.representation()); 3685 __ Load(result, FieldMemOperand(source, offset), access.representation());
3686 } 3686 }
3687 } 3687 }
3688 3688
3689 3689
3690 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3690 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3691 ASSERT(ToRegister(instr->context()).is(cp)); 3691 DCHECK(ToRegister(instr->context()).is(cp));
3692 // LoadIC expects name and receiver in registers. 3692 // LoadIC expects name and receiver in registers.
3693 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3693 DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3694 __ Mov(LoadIC::NameRegister(), Operand(instr->name())); 3694 __ Mov(LoadIC::NameRegister(), Operand(instr->name()));
3695 if (FLAG_vector_ics) { 3695 if (FLAG_vector_ics) {
3696 Register vector = ToRegister(instr->temp_vector()); 3696 Register vector = ToRegister(instr->temp_vector());
3697 ASSERT(vector.is(LoadIC::VectorRegister())); 3697 DCHECK(vector.is(LoadIC::VectorRegister()));
3698 __ Mov(vector, instr->hydrogen()->feedback_vector()); 3698 __ Mov(vector, instr->hydrogen()->feedback_vector());
3699 // No need to allocate this register. 3699 // No need to allocate this register.
3700 ASSERT(LoadIC::SlotRegister().is(x0)); 3700 DCHECK(LoadIC::SlotRegister().is(x0));
3701 __ Mov(LoadIC::SlotRegister(), 3701 __ Mov(LoadIC::SlotRegister(),
3702 Smi::FromInt(instr->hydrogen()->slot())); 3702 Smi::FromInt(instr->hydrogen()->slot()));
3703 } 3703 }
3704 3704
3705 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); 3705 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3706 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3706 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3707 3707
3708 ASSERT(ToRegister(instr->result()).is(x0)); 3708 DCHECK(ToRegister(instr->result()).is(x0));
3709 } 3709 }
3710 3710
3711 3711
3712 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3712 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3713 Register result = ToRegister(instr->result()); 3713 Register result = ToRegister(instr->result());
3714 __ LoadRoot(result, instr->index()); 3714 __ LoadRoot(result, instr->index());
3715 } 3715 }
3716 3716
3717 3717
3718 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { 3718 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
(...skipping 25 matching lines...) Expand all
3744 Label* allocation_entry) { 3744 Label* allocation_entry) {
3745 // Handle the tricky cases of MathAbsTagged: 3745 // Handle the tricky cases of MathAbsTagged:
3746 // - HeapNumber inputs. 3746 // - HeapNumber inputs.
3747 // - Negative inputs produce a positive result, so a new HeapNumber is 3747 // - Negative inputs produce a positive result, so a new HeapNumber is
3748 // allocated to hold it. 3748 // allocated to hold it.
3749 // - Positive inputs are returned as-is, since there is no need to allocate 3749 // - Positive inputs are returned as-is, since there is no need to allocate
3750 // a new HeapNumber for the result. 3750 // a new HeapNumber for the result.
3751 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit 3751 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit
3752 // a smi. In this case, the inline code sets the result and jumps directly 3752 // a smi. In this case, the inline code sets the result and jumps directly
3753 // to the allocation_entry label. 3753 // to the allocation_entry label.
3754 ASSERT(instr->context() != NULL); 3754 DCHECK(instr->context() != NULL);
3755 ASSERT(ToRegister(instr->context()).is(cp)); 3755 DCHECK(ToRegister(instr->context()).is(cp));
3756 Register input = ToRegister(instr->value()); 3756 Register input = ToRegister(instr->value());
3757 Register temp1 = ToRegister(instr->temp1()); 3757 Register temp1 = ToRegister(instr->temp1());
3758 Register temp2 = ToRegister(instr->temp2()); 3758 Register temp2 = ToRegister(instr->temp2());
3759 Register result_bits = ToRegister(instr->temp3()); 3759 Register result_bits = ToRegister(instr->temp3());
3760 Register result = ToRegister(instr->result()); 3760 Register result = ToRegister(instr->result());
3761 3761
3762 Label runtime_allocation; 3762 Label runtime_allocation;
3763 3763
3764 // Deoptimize if the input is not a HeapNumber. 3764 // Deoptimize if the input is not a HeapNumber.
3765 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 3765 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
3819 } 3819 }
3820 virtual LInstruction* instr() { return instr_; } 3820 virtual LInstruction* instr() { return instr_; }
3821 Label* allocation_entry() { return &allocation; } 3821 Label* allocation_entry() { return &allocation; }
3822 private: 3822 private:
3823 LMathAbsTagged* instr_; 3823 LMathAbsTagged* instr_;
3824 Label allocation; 3824 Label allocation;
3825 }; 3825 };
3826 3826
3827 // TODO(jbramley): The early-exit mechanism would skip the new frame handling 3827 // TODO(jbramley): The early-exit mechanism would skip the new frame handling
3828 // in GenerateDeferredCode. Tidy this up. 3828 // in GenerateDeferredCode. Tidy this up.
3829 ASSERT(!NeedsDeferredFrame()); 3829 DCHECK(!NeedsDeferredFrame());
3830 3830
3831 DeferredMathAbsTagged* deferred = 3831 DeferredMathAbsTagged* deferred =
3832 new(zone()) DeferredMathAbsTagged(this, instr); 3832 new(zone()) DeferredMathAbsTagged(this, instr);
3833 3833
3834 ASSERT(instr->hydrogen()->value()->representation().IsTagged() || 3834 DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
3835 instr->hydrogen()->value()->representation().IsSmi()); 3835 instr->hydrogen()->value()->representation().IsSmi());
3836 Register input = ToRegister(instr->value()); 3836 Register input = ToRegister(instr->value());
3837 Register result_bits = ToRegister(instr->temp3()); 3837 Register result_bits = ToRegister(instr->temp3());
3838 Register result = ToRegister(instr->result()); 3838 Register result = ToRegister(instr->result());
3839 Label done; 3839 Label done;
3840 3840
3841 // Handle smis inline. 3841 // Handle smis inline.
3842 // We can treat smis as 64-bit integers, since the (low-order) tag bits will 3842 // We can treat smis as 64-bit integers, since the (low-order) tag bits will
3843 // never get set by the negation. This is therefore the same as the Integer32 3843 // never get set by the negation. This is therefore the same as the Integer32
3844 // case in DoMathAbs, except that it operates on 64-bit values. 3844 // case in DoMathAbs, except that it operates on 64-bit values.
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
3943 3943
3944 __ Asr(result, result, shift); 3944 __ Asr(result, result, shift);
3945 __ Csel(result, result, kMinInt / divisor, vc); 3945 __ Csel(result, result, kMinInt / divisor, vc);
3946 } 3946 }
3947 3947
3948 3948
3949 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 3949 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
3950 Register dividend = ToRegister32(instr->dividend()); 3950 Register dividend = ToRegister32(instr->dividend());
3951 int32_t divisor = instr->divisor(); 3951 int32_t divisor = instr->divisor();
3952 Register result = ToRegister32(instr->result()); 3952 Register result = ToRegister32(instr->result());
3953 ASSERT(!AreAliased(dividend, result)); 3953 DCHECK(!AreAliased(dividend, result));
3954 3954
3955 if (divisor == 0) { 3955 if (divisor == 0) {
3956 Deoptimize(instr->environment()); 3956 Deoptimize(instr->environment());
3957 return; 3957 return;
3958 } 3958 }
3959 3959
3960 // Check for (0 / -x) that will produce negative zero. 3960 // Check for (0 / -x) that will produce negative zero.
3961 HMathFloorOfDiv* hdiv = instr->hydrogen(); 3961 HMathFloorOfDiv* hdiv = instr->hydrogen();
3962 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 3962 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
3963 DeoptimizeIfZero(dividend, instr->environment()); 3963 DeoptimizeIfZero(dividend, instr->environment());
3964 } 3964 }
3965 3965
3966 // Easy case: We need no dynamic check for the dividend and the flooring 3966 // Easy case: We need no dynamic check for the dividend and the flooring
3967 // division is the same as the truncating division. 3967 // division is the same as the truncating division.
3968 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 3968 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
3969 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 3969 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
3970 __ TruncatingDiv(result, dividend, Abs(divisor)); 3970 __ TruncatingDiv(result, dividend, Abs(divisor));
3971 if (divisor < 0) __ Neg(result, result); 3971 if (divisor < 0) __ Neg(result, result);
3972 return; 3972 return;
3973 } 3973 }
3974 3974
3975 // In the general case we may need to adjust before and after the truncating 3975 // In the general case we may need to adjust before and after the truncating
3976 // division to get a flooring division. 3976 // division to get a flooring division.
3977 Register temp = ToRegister32(instr->temp()); 3977 Register temp = ToRegister32(instr->temp());
3978 ASSERT(!AreAliased(temp, dividend, result)); 3978 DCHECK(!AreAliased(temp, dividend, result));
3979 Label needs_adjustment, done; 3979 Label needs_adjustment, done;
3980 __ Cmp(dividend, 0); 3980 __ Cmp(dividend, 0);
3981 __ B(divisor > 0 ? lt : gt, &needs_adjustment); 3981 __ B(divisor > 0 ? lt : gt, &needs_adjustment);
3982 __ TruncatingDiv(result, dividend, Abs(divisor)); 3982 __ TruncatingDiv(result, dividend, Abs(divisor));
3983 if (divisor < 0) __ Neg(result, result); 3983 if (divisor < 0) __ Neg(result, result);
3984 __ B(&done); 3984 __ B(&done);
3985 __ Bind(&needs_adjustment); 3985 __ Bind(&needs_adjustment);
3986 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 3986 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
3987 __ TruncatingDiv(result, temp, Abs(divisor)); 3987 __ TruncatingDiv(result, temp, Abs(divisor));
3988 if (divisor < 0) __ Neg(result, result); 3988 if (divisor < 0) __ Neg(result, result);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4031 // Check if the result needs to be corrected. 4031 // Check if the result needs to be corrected.
4032 __ Msub(remainder, result, divisor, dividend); 4032 __ Msub(remainder, result, divisor, dividend);
4033 __ Cbz(remainder, &done); 4033 __ Cbz(remainder, &done);
4034 __ Sub(result, result, 1); 4034 __ Sub(result, result, 1);
4035 4035
4036 __ Bind(&done); 4036 __ Bind(&done);
4037 } 4037 }
4038 4038
4039 4039
4040 void LCodeGen::DoMathLog(LMathLog* instr) { 4040 void LCodeGen::DoMathLog(LMathLog* instr) {
4041 ASSERT(instr->IsMarkedAsCall()); 4041 DCHECK(instr->IsMarkedAsCall());
4042 ASSERT(ToDoubleRegister(instr->value()).is(d0)); 4042 DCHECK(ToDoubleRegister(instr->value()).is(d0));
4043 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 4043 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
4044 0, 1); 4044 0, 1);
4045 ASSERT(ToDoubleRegister(instr->result()).Is(d0)); 4045 DCHECK(ToDoubleRegister(instr->result()).Is(d0));
4046 } 4046 }
4047 4047
4048 4048
4049 void LCodeGen::DoMathClz32(LMathClz32* instr) { 4049 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4050 Register input = ToRegister32(instr->value()); 4050 Register input = ToRegister32(instr->value());
4051 Register result = ToRegister32(instr->result()); 4051 Register result = ToRegister32(instr->result());
4052 __ Clz(result, input); 4052 __ Clz(result, input);
4053 } 4053 }
4054 4054
4055 4055
(...skipping 18 matching lines...) Expand all
4074 __ Fsqrt(result, double_scratch()); 4074 __ Fsqrt(result, double_scratch());
4075 4075
4076 __ Bind(&done); 4076 __ Bind(&done);
4077 } 4077 }
4078 4078
4079 4079
4080 void LCodeGen::DoPower(LPower* instr) { 4080 void LCodeGen::DoPower(LPower* instr) {
4081 Representation exponent_type = instr->hydrogen()->right()->representation(); 4081 Representation exponent_type = instr->hydrogen()->right()->representation();
4082 // Having marked this as a call, we can use any registers. 4082 // Having marked this as a call, we can use any registers.
4083 // Just make sure that the input/output registers are the expected ones. 4083 // Just make sure that the input/output registers are the expected ones.
4084 ASSERT(!instr->right()->IsDoubleRegister() || 4084 DCHECK(!instr->right()->IsDoubleRegister() ||
4085 ToDoubleRegister(instr->right()).is(d1)); 4085 ToDoubleRegister(instr->right()).is(d1));
4086 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() || 4086 DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
4087 ToRegister(instr->right()).is(x11)); 4087 ToRegister(instr->right()).is(x11));
4088 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12)); 4088 DCHECK(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
4089 ASSERT(ToDoubleRegister(instr->left()).is(d0)); 4089 DCHECK(ToDoubleRegister(instr->left()).is(d0));
4090 ASSERT(ToDoubleRegister(instr->result()).is(d0)); 4090 DCHECK(ToDoubleRegister(instr->result()).is(d0));
4091 4091
4092 if (exponent_type.IsSmi()) { 4092 if (exponent_type.IsSmi()) {
4093 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4093 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4094 __ CallStub(&stub); 4094 __ CallStub(&stub);
4095 } else if (exponent_type.IsTagged()) { 4095 } else if (exponent_type.IsTagged()) {
4096 Label no_deopt; 4096 Label no_deopt;
4097 __ JumpIfSmi(x11, &no_deopt); 4097 __ JumpIfSmi(x11, &no_deopt);
4098 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset)); 4098 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
4099 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, 4099 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
4100 instr->environment()); 4100 instr->environment());
4101 __ Bind(&no_deopt); 4101 __ Bind(&no_deopt);
4102 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4102 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4103 __ CallStub(&stub); 4103 __ CallStub(&stub);
4104 } else if (exponent_type.IsInteger32()) { 4104 } else if (exponent_type.IsInteger32()) {
4105 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub 4105 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
4106 // supports large integer exponents. 4106 // supports large integer exponents.
4107 Register exponent = ToRegister(instr->right()); 4107 Register exponent = ToRegister(instr->right());
4108 __ Sxtw(exponent, exponent); 4108 __ Sxtw(exponent, exponent);
4109 MathPowStub stub(isolate(), MathPowStub::INTEGER); 4109 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4110 __ CallStub(&stub); 4110 __ CallStub(&stub);
4111 } else { 4111 } else {
4112 ASSERT(exponent_type.IsDouble()); 4112 DCHECK(exponent_type.IsDouble());
4113 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 4113 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4114 __ CallStub(&stub); 4114 __ CallStub(&stub);
4115 } 4115 }
4116 } 4116 }
4117 4117
4118 4118
4119 void LCodeGen::DoMathRoundD(LMathRoundD* instr) { 4119 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
4120 DoubleRegister input = ToDoubleRegister(instr->value()); 4120 DoubleRegister input = ToDoubleRegister(instr->value());
4121 DoubleRegister result = ToDoubleRegister(instr->result()); 4121 DoubleRegister result = ToDoubleRegister(instr->result());
4122 DoubleRegister scratch_d = double_scratch(); 4122 DoubleRegister scratch_d = double_scratch();
4123 4123
4124 ASSERT(!AreAliased(input, result, scratch_d)); 4124 DCHECK(!AreAliased(input, result, scratch_d));
4125 4125
4126 Label done; 4126 Label done;
4127 4127
4128 __ Frinta(result, input); 4128 __ Frinta(result, input);
4129 __ Fcmp(input, 0.0); 4129 __ Fcmp(input, 0.0);
4130 __ Fccmp(result, input, ZFlag, lt); 4130 __ Fccmp(result, input, ZFlag, lt);
4131 // The result is correct if the input was in [-0, +infinity], or was a 4131 // The result is correct if the input was in [-0, +infinity], or was a
4132 // negative integral value. 4132 // negative integral value.
4133 __ B(eq, &done); 4133 __ B(eq, &done);
4134 4134
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
4225 __ Cmp(left, right); 4225 __ Cmp(left, right);
4226 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); 4226 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4227 } else if (instr->hydrogen()->representation().IsSmi()) { 4227 } else if (instr->hydrogen()->representation().IsSmi()) {
4228 Register result = ToRegister(instr->result()); 4228 Register result = ToRegister(instr->result());
4229 Register left = ToRegister(instr->left()); 4229 Register left = ToRegister(instr->left());
4230 Operand right = ToOperand(instr->right()); 4230 Operand right = ToOperand(instr->right());
4231 4231
4232 __ Cmp(left, right); 4232 __ Cmp(left, right);
4233 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); 4233 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
4234 } else { 4234 } else {
4235 ASSERT(instr->hydrogen()->representation().IsDouble()); 4235 DCHECK(instr->hydrogen()->representation().IsDouble());
4236 DoubleRegister result = ToDoubleRegister(instr->result()); 4236 DoubleRegister result = ToDoubleRegister(instr->result());
4237 DoubleRegister left = ToDoubleRegister(instr->left()); 4237 DoubleRegister left = ToDoubleRegister(instr->left());
4238 DoubleRegister right = ToDoubleRegister(instr->right()); 4238 DoubleRegister right = ToDoubleRegister(instr->right());
4239 4239
4240 if (op == HMathMinMax::kMathMax) { 4240 if (op == HMathMinMax::kMathMax) {
4241 __ Fmax(result, left, right); 4241 __ Fmax(result, left, right);
4242 } else { 4242 } else {
4243 ASSERT(op == HMathMinMax::kMathMin); 4243 DCHECK(op == HMathMinMax::kMathMin);
4244 __ Fmin(result, left, right); 4244 __ Fmin(result, left, right);
4245 } 4245 }
4246 } 4246 }
4247 } 4247 }
4248 4248
4249 4249
4250 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 4250 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
4251 Register dividend = ToRegister32(instr->dividend()); 4251 Register dividend = ToRegister32(instr->dividend());
4252 int32_t divisor = instr->divisor(); 4252 int32_t divisor = instr->divisor();
4253 ASSERT(dividend.is(ToRegister32(instr->result()))); 4253 DCHECK(dividend.is(ToRegister32(instr->result())));
4254 4254
4255 // Theoretically, a variation of the branch-free code for integer division by 4255 // Theoretically, a variation of the branch-free code for integer division by
4256 // a power of 2 (calculating the remainder via an additional multiplication 4256 // a power of 2 (calculating the remainder via an additional multiplication
4257 // (which gets simplified to an 'and') and subtraction) should be faster, and 4257 // (which gets simplified to an 'and') and subtraction) should be faster, and
4258 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 4258 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
4259 // indicate that positive dividends are heavily favored, so the branching 4259 // indicate that positive dividends are heavily favored, so the branching
4260 // version performs better. 4260 // version performs better.
4261 HMod* hmod = instr->hydrogen(); 4261 HMod* hmod = instr->hydrogen();
4262 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 4262 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
4263 Label dividend_is_not_negative, done; 4263 Label dividend_is_not_negative, done;
(...skipping 13 matching lines...) Expand all
4277 __ And(dividend, dividend, mask); 4277 __ And(dividend, dividend, mask);
4278 __ bind(&done); 4278 __ bind(&done);
4279 } 4279 }
4280 4280
4281 4281
4282 void LCodeGen::DoModByConstI(LModByConstI* instr) { 4282 void LCodeGen::DoModByConstI(LModByConstI* instr) {
4283 Register dividend = ToRegister32(instr->dividend()); 4283 Register dividend = ToRegister32(instr->dividend());
4284 int32_t divisor = instr->divisor(); 4284 int32_t divisor = instr->divisor();
4285 Register result = ToRegister32(instr->result()); 4285 Register result = ToRegister32(instr->result());
4286 Register temp = ToRegister32(instr->temp()); 4286 Register temp = ToRegister32(instr->temp());
4287 ASSERT(!AreAliased(dividend, result, temp)); 4287 DCHECK(!AreAliased(dividend, result, temp));
4288 4288
4289 if (divisor == 0) { 4289 if (divisor == 0) {
4290 Deoptimize(instr->environment()); 4290 Deoptimize(instr->environment());
4291 return; 4291 return;
4292 } 4292 }
4293 4293
4294 __ TruncatingDiv(result, dividend, Abs(divisor)); 4294 __ TruncatingDiv(result, dividend, Abs(divisor));
4295 __ Sxtw(dividend.X(), dividend); 4295 __ Sxtw(dividend.X(), dividend);
4296 __ Mov(temp, Abs(divisor)); 4296 __ Mov(temp, Abs(divisor));
4297 __ Smsubl(result.X(), result, temp, dividend.X()); 4297 __ Smsubl(result.X(), result, temp, dividend.X());
(...skipping 23 matching lines...) Expand all
4321 __ Msub(result, result, divisor, dividend); 4321 __ Msub(result, result, divisor, dividend);
4322 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4322 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4323 __ Cbnz(result, &done); 4323 __ Cbnz(result, &done);
4324 DeoptimizeIfNegative(dividend, instr->environment()); 4324 DeoptimizeIfNegative(dividend, instr->environment());
4325 } 4325 }
4326 __ Bind(&done); 4326 __ Bind(&done);
4327 } 4327 }
4328 4328
4329 4329
4330 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { 4330 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
4331 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); 4331 DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
4332 bool is_smi = instr->hydrogen()->representation().IsSmi(); 4332 bool is_smi = instr->hydrogen()->representation().IsSmi();
4333 Register result = 4333 Register result =
4334 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); 4334 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
4335 Register left = 4335 Register left =
4336 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; 4336 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
4337 int32_t right = ToInteger32(instr->right()); 4337 int32_t right = ToInteger32(instr->right());
4338 ASSERT((right > -kMaxInt) || (right < kMaxInt)); 4338 DCHECK((right > -kMaxInt) || (right < kMaxInt));
4339 4339
4340 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 4340 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
4341 bool bailout_on_minus_zero = 4341 bool bailout_on_minus_zero =
4342 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 4342 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
4343 4343
4344 if (bailout_on_minus_zero) { 4344 if (bailout_on_minus_zero) {
4345 if (right < 0) { 4345 if (right < 0) {
4346 // The result is -0 if right is negative and left is zero. 4346 // The result is -0 if right is negative and left is zero.
4347 DeoptimizeIfZero(left, instr->environment()); 4347 DeoptimizeIfZero(left, instr->environment());
4348 } else if (right == 0) { 4348 } else if (right == 0) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
4382 default: 4382 default:
4383 // Multiplication by constant powers of two (and some related values) 4383 // Multiplication by constant powers of two (and some related values)
4384 // can be done efficiently with shifted operands. 4384 // can be done efficiently with shifted operands.
4385 int32_t right_abs = Abs(right); 4385 int32_t right_abs = Abs(right);
4386 4386
4387 if (IsPowerOf2(right_abs)) { 4387 if (IsPowerOf2(right_abs)) {
4388 int right_log2 = WhichPowerOf2(right_abs); 4388 int right_log2 = WhichPowerOf2(right_abs);
4389 4389
4390 if (can_overflow) { 4390 if (can_overflow) {
4391 Register scratch = result; 4391 Register scratch = result;
4392 ASSERT(!AreAliased(scratch, left)); 4392 DCHECK(!AreAliased(scratch, left));
4393 __ Cls(scratch, left); 4393 __ Cls(scratch, left);
4394 __ Cmp(scratch, right_log2); 4394 __ Cmp(scratch, right_log2);
4395 DeoptimizeIf(lt, instr->environment()); 4395 DeoptimizeIf(lt, instr->environment());
4396 } 4396 }
4397 4397
4398 if (right >= 0) { 4398 if (right >= 0) {
4399 // result = left << log2(right) 4399 // result = left << log2(right)
4400 __ Lsl(result, left, right_log2); 4400 __ Lsl(result, left, right_log2);
4401 } else { 4401 } else {
4402 // result = -left << log2(-right) 4402 // result = -left << log2(-right)
4403 if (can_overflow) { 4403 if (can_overflow) {
4404 __ Negs(result, Operand(left, LSL, right_log2)); 4404 __ Negs(result, Operand(left, LSL, right_log2));
4405 DeoptimizeIf(vs, instr->environment()); 4405 DeoptimizeIf(vs, instr->environment());
4406 } else { 4406 } else {
4407 __ Neg(result, Operand(left, LSL, right_log2)); 4407 __ Neg(result, Operand(left, LSL, right_log2));
4408 } 4408 }
4409 } 4409 }
4410 return; 4410 return;
4411 } 4411 }
4412 4412
4413 4413
4414 // For the following cases, we could perform a conservative overflow check 4414 // For the following cases, we could perform a conservative overflow check
4415 // with CLS as above. However the few cycles saved are likely not worth 4415 // with CLS as above. However the few cycles saved are likely not worth
4416 // the risk of deoptimizing more often than required. 4416 // the risk of deoptimizing more often than required.
4417 ASSERT(!can_overflow); 4417 DCHECK(!can_overflow);
4418 4418
4419 if (right >= 0) { 4419 if (right >= 0) {
4420 if (IsPowerOf2(right - 1)) { 4420 if (IsPowerOf2(right - 1)) {
4421 // result = left + left << log2(right - 1) 4421 // result = left + left << log2(right - 1)
4422 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1))); 4422 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
4423 } else if (IsPowerOf2(right + 1)) { 4423 } else if (IsPowerOf2(right + 1)) {
4424 // result = -left + left << log2(right + 1) 4424 // result = -left + left << log2(right + 1)
4425 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1))); 4425 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
4426 __ Neg(result, result); 4426 __ Neg(result, result);
4427 } else { 4427 } else {
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
4505 // multiply, giving a tagged result. 4505 // multiply, giving a tagged result.
4506 STATIC_ASSERT((kSmiShift % 2) == 0); 4506 STATIC_ASSERT((kSmiShift % 2) == 0);
4507 __ Asr(result, left, kSmiShift / 2); 4507 __ Asr(result, left, kSmiShift / 2);
4508 __ Mul(result, result, result); 4508 __ Mul(result, result, result);
4509 } else if (result.Is(left) && !left.Is(right)) { 4509 } else if (result.Is(left) && !left.Is(right)) {
4510 // Registers result and left alias, right is distinct: untag left into 4510 // Registers result and left alias, right is distinct: untag left into
4511 // result, and then multiply by right, giving a tagged result. 4511 // result, and then multiply by right, giving a tagged result.
4512 __ SmiUntag(result, left); 4512 __ SmiUntag(result, left);
4513 __ Mul(result, result, right); 4513 __ Mul(result, result, right);
4514 } else { 4514 } else {
4515 ASSERT(!left.Is(result)); 4515 DCHECK(!left.Is(result));
4516 // Registers result and right alias, left is distinct, or all registers 4516 // Registers result and right alias, left is distinct, or all registers
4517 // are distinct: untag right into result, and then multiply by left, 4517 // are distinct: untag right into result, and then multiply by left,
4518 // giving a tagged result. 4518 // giving a tagged result.
4519 __ SmiUntag(result, right); 4519 __ SmiUntag(result, right);
4520 __ Mul(result, left, result); 4520 __ Mul(result, left, result);
4521 } 4521 }
4522 } 4522 }
4523 } 4523 }
4524 4524
4525 4525
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
4685 __ Bind(&convert_undefined); 4685 __ Bind(&convert_undefined);
4686 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, 4686 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
4687 instr->environment()); 4687 instr->environment());
4688 4688
4689 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4689 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4690 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4690 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4691 __ B(&done); 4691 __ B(&done);
4692 } 4692 }
4693 4693
4694 } else { 4694 } else {
4695 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); 4695 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4696 // Fall through to load_smi. 4696 // Fall through to load_smi.
4697 } 4697 }
4698 4698
4699 // Smi to double register conversion. 4699 // Smi to double register conversion.
4700 __ Bind(&load_smi); 4700 __ Bind(&load_smi);
4701 __ SmiUntagToDouble(result, input); 4701 __ SmiUntagToDouble(result, input);
4702 4702
4703 __ Bind(&done); 4703 __ Bind(&done);
4704 } 4704 }
4705 4705
4706 4706
4707 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 4707 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4708 // This is a pseudo-instruction that ensures that the environment here is 4708 // This is a pseudo-instruction that ensures that the environment here is
4709 // properly registered for deoptimization and records the assembler's PC 4709 // properly registered for deoptimization and records the assembler's PC
4710 // offset. 4710 // offset.
4711 LEnvironment* environment = instr->environment(); 4711 LEnvironment* environment = instr->environment();
4712 4712
4713 // If the environment were already registered, we would have no way of 4713 // If the environment were already registered, we would have no way of
4714 // backpatching it with the spill slot operands. 4714 // backpatching it with the spill slot operands.
4715 ASSERT(!environment->HasBeenRegistered()); 4715 DCHECK(!environment->HasBeenRegistered());
4716 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 4716 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4717 4717
4718 GenerateOsrPrologue(); 4718 GenerateOsrPrologue();
4719 } 4719 }
4720 4720
4721 4721
4722 void LCodeGen::DoParameter(LParameter* instr) { 4722 void LCodeGen::DoParameter(LParameter* instr) {
4723 // Nothing to do. 4723 // Nothing to do.
4724 } 4724 }
4725 4725
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
4844 } 4844 }
4845 4845
4846 4846
4847 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 4847 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
4848 String::Encoding encoding = instr->hydrogen()->encoding(); 4848 String::Encoding encoding = instr->hydrogen()->encoding();
4849 Register string = ToRegister(instr->string()); 4849 Register string = ToRegister(instr->string());
4850 Register value = ToRegister(instr->value()); 4850 Register value = ToRegister(instr->value());
4851 Register temp = ToRegister(instr->temp()); 4851 Register temp = ToRegister(instr->temp());
4852 4852
4853 if (FLAG_debug_code) { 4853 if (FLAG_debug_code) {
4854 ASSERT(ToRegister(instr->context()).is(cp)); 4854 DCHECK(ToRegister(instr->context()).is(cp));
4855 Register index = ToRegister(instr->index()); 4855 Register index = ToRegister(instr->index());
4856 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 4856 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
4857 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 4857 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
4858 int encoding_mask = 4858 int encoding_mask =
4859 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 4859 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
4860 ? one_byte_seq_type : two_byte_seq_type; 4860 ? one_byte_seq_type : two_byte_seq_type;
4861 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp, 4861 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
4862 encoding_mask); 4862 encoding_mask);
4863 } 4863 }
4864 MemOperand operand = 4864 MemOperand operand =
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4914 Label right_not_zero; 4914 Label right_not_zero;
4915 __ Cbnz(right, &right_not_zero); 4915 __ Cbnz(right, &right_not_zero);
4916 DeoptimizeIfNegative(left, instr->environment()); 4916 DeoptimizeIfNegative(left, instr->environment());
4917 __ Bind(&right_not_zero); 4917 __ Bind(&right_not_zero);
4918 } 4918 }
4919 __ Lsr(result, left, right); 4919 __ Lsr(result, left, right);
4920 break; 4920 break;
4921 default: UNREACHABLE(); 4921 default: UNREACHABLE();
4922 } 4922 }
4923 } else { 4923 } else {
4924 ASSERT(right_op->IsConstantOperand()); 4924 DCHECK(right_op->IsConstantOperand());
4925 int shift_count = JSShiftAmountFromLConstant(right_op); 4925 int shift_count = JSShiftAmountFromLConstant(right_op);
4926 if (shift_count == 0) { 4926 if (shift_count == 0) {
4927 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4927 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4928 DeoptimizeIfNegative(left, instr->environment()); 4928 DeoptimizeIfNegative(left, instr->environment());
4929 } 4929 }
4930 __ Mov(result, left, kDiscardForSameWReg); 4930 __ Mov(result, left, kDiscardForSameWReg);
4931 } else { 4931 } else {
4932 switch (instr->op()) { 4932 switch (instr->op()) {
4933 case Token::ROR: __ Ror(result, left, shift_count); break; 4933 case Token::ROR: __ Ror(result, left, shift_count); break;
4934 case Token::SAR: __ Asr(result, left, shift_count); break; 4934 case Token::SAR: __ Asr(result, left, shift_count); break;
4935 case Token::SHL: __ Lsl(result, left, shift_count); break; 4935 case Token::SHL: __ Lsl(result, left, shift_count); break;
4936 case Token::SHR: __ Lsr(result, left, shift_count); break; 4936 case Token::SHR: __ Lsr(result, left, shift_count); break;
4937 default: UNREACHABLE(); 4937 default: UNREACHABLE();
4938 } 4938 }
4939 } 4939 }
4940 } 4940 }
4941 } 4941 }
4942 4942
4943 4943
4944 void LCodeGen::DoShiftS(LShiftS* instr) { 4944 void LCodeGen::DoShiftS(LShiftS* instr) {
4945 LOperand* right_op = instr->right(); 4945 LOperand* right_op = instr->right();
4946 Register left = ToRegister(instr->left()); 4946 Register left = ToRegister(instr->left());
4947 Register result = ToRegister(instr->result()); 4947 Register result = ToRegister(instr->result());
4948 4948
4949 // Only ROR by register needs a temp. 4949 // Only ROR by register needs a temp.
4950 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) || 4950 DCHECK(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
4951 (instr->temp() == NULL)); 4951 (instr->temp() == NULL));
4952 4952
4953 if (right_op->IsRegister()) { 4953 if (right_op->IsRegister()) {
4954 Register right = ToRegister(instr->right()); 4954 Register right = ToRegister(instr->right());
4955 switch (instr->op()) { 4955 switch (instr->op()) {
4956 case Token::ROR: { 4956 case Token::ROR: {
4957 Register temp = ToRegister(instr->temp()); 4957 Register temp = ToRegister(instr->temp());
4958 __ Ubfx(temp, right, kSmiShift, 5); 4958 __ Ubfx(temp, right, kSmiShift, 5);
4959 __ SmiUntag(result, left); 4959 __ SmiUntag(result, left);
4960 __ Ror(result.W(), result.W(), temp.W()); 4960 __ Ror(result.W(), result.W(), temp.W());
(...skipping 16 matching lines...) Expand all
4977 DeoptimizeIfNegative(left, instr->environment()); 4977 DeoptimizeIfNegative(left, instr->environment());
4978 __ Bind(&right_not_zero); 4978 __ Bind(&right_not_zero);
4979 } 4979 }
4980 __ Ubfx(result, right, kSmiShift, 5); 4980 __ Ubfx(result, right, kSmiShift, 5);
4981 __ Lsr(result, left, result); 4981 __ Lsr(result, left, result);
4982 __ Bic(result, result, kSmiShiftMask); 4982 __ Bic(result, result, kSmiShiftMask);
4983 break; 4983 break;
4984 default: UNREACHABLE(); 4984 default: UNREACHABLE();
4985 } 4985 }
4986 } else { 4986 } else {
4987 ASSERT(right_op->IsConstantOperand()); 4987 DCHECK(right_op->IsConstantOperand());
4988 int shift_count = JSShiftAmountFromLConstant(right_op); 4988 int shift_count = JSShiftAmountFromLConstant(right_op);
4989 if (shift_count == 0) { 4989 if (shift_count == 0) {
4990 if ((instr->op() == Token::SHR) && instr->can_deopt()) { 4990 if ((instr->op() == Token::SHR) && instr->can_deopt()) {
4991 DeoptimizeIfNegative(left, instr->environment()); 4991 DeoptimizeIfNegative(left, instr->environment());
4992 } 4992 }
4993 __ Mov(result, left); 4993 __ Mov(result, left);
4994 } else { 4994 } else {
4995 switch (instr->op()) { 4995 switch (instr->op()) {
4996 case Token::ROR: 4996 case Token::ROR:
4997 __ SmiUntag(result, left); 4997 __ SmiUntag(result, left);
(...skipping 17 matching lines...) Expand all
5015 } 5015 }
5016 } 5016 }
5017 5017
5018 5018
5019 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { 5019 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
5020 __ Debug("LDebugBreak", 0, BREAK); 5020 __ Debug("LDebugBreak", 0, BREAK);
5021 } 5021 }
5022 5022
5023 5023
5024 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 5024 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
5025 ASSERT(ToRegister(instr->context()).is(cp)); 5025 DCHECK(ToRegister(instr->context()).is(cp));
5026 Register scratch1 = x5; 5026 Register scratch1 = x5;
5027 Register scratch2 = x6; 5027 Register scratch2 = x6;
5028 ASSERT(instr->IsMarkedAsCall()); 5028 DCHECK(instr->IsMarkedAsCall());
5029 5029
5030 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals"); 5030 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
5031 // TODO(all): if Mov could handle object in new space then it could be used 5031 // TODO(all): if Mov could handle object in new space then it could be used
5032 // here. 5032 // here.
5033 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); 5033 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
5034 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags())); 5034 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
5035 __ Push(cp, scratch1, scratch2); // The context is the first argument. 5035 __ Push(cp, scratch1, scratch2); // The context is the first argument.
5036 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 5036 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
5037 } 5037 }
5038 5038
5039 5039
5040 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5040 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5041 PushSafepointRegistersScope scope(this); 5041 PushSafepointRegistersScope scope(this);
5042 LoadContextFromDeferred(instr->context()); 5042 LoadContextFromDeferred(instr->context());
5043 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5043 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5044 RecordSafepointWithLazyDeopt( 5044 RecordSafepointWithLazyDeopt(
5045 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5045 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5046 ASSERT(instr->HasEnvironment()); 5046 DCHECK(instr->HasEnvironment());
5047 LEnvironment* env = instr->environment(); 5047 LEnvironment* env = instr->environment();
5048 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5048 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5049 } 5049 }
5050 5050
5051 5051
5052 void LCodeGen::DoStackCheck(LStackCheck* instr) { 5052 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5053 class DeferredStackCheck: public LDeferredCode { 5053 class DeferredStackCheck: public LDeferredCode {
5054 public: 5054 public:
5055 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5055 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5056 : LDeferredCode(codegen), instr_(instr) { } 5056 : LDeferredCode(codegen), instr_(instr) { }
5057 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } 5057 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5058 virtual LInstruction* instr() { return instr_; } 5058 virtual LInstruction* instr() { return instr_; }
5059 private: 5059 private:
5060 LStackCheck* instr_; 5060 LStackCheck* instr_;
5061 }; 5061 };
5062 5062
5063 ASSERT(instr->HasEnvironment()); 5063 DCHECK(instr->HasEnvironment());
5064 LEnvironment* env = instr->environment(); 5064 LEnvironment* env = instr->environment();
5065 // There is no LLazyBailout instruction for stack-checks. We have to 5065 // There is no LLazyBailout instruction for stack-checks. We have to
5066 // prepare for lazy deoptimization explicitly here. 5066 // prepare for lazy deoptimization explicitly here.
5067 if (instr->hydrogen()->is_function_entry()) { 5067 if (instr->hydrogen()->is_function_entry()) {
5068 // Perform stack overflow check. 5068 // Perform stack overflow check.
5069 Label done; 5069 Label done;
5070 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); 5070 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5071 __ B(hs, &done); 5071 __ B(hs, &done);
5072 5072
5073 PredictableCodeSizeScope predictable(masm_, 5073 PredictableCodeSizeScope predictable(masm_,
5074 Assembler::kCallSizeWithRelocation); 5074 Assembler::kCallSizeWithRelocation);
5075 ASSERT(instr->context()->IsRegister()); 5075 DCHECK(instr->context()->IsRegister());
5076 ASSERT(ToRegister(instr->context()).is(cp)); 5076 DCHECK(ToRegister(instr->context()).is(cp));
5077 CallCode(isolate()->builtins()->StackCheck(), 5077 CallCode(isolate()->builtins()->StackCheck(),
5078 RelocInfo::CODE_TARGET, 5078 RelocInfo::CODE_TARGET,
5079 instr); 5079 instr);
5080 __ Bind(&done); 5080 __ Bind(&done);
5081 } else { 5081 } else {
5082 ASSERT(instr->hydrogen()->is_backwards_branch()); 5082 DCHECK(instr->hydrogen()->is_backwards_branch());
5083 // Perform stack overflow check if this goto needs it before jumping. 5083 // Perform stack overflow check if this goto needs it before jumping.
5084 DeferredStackCheck* deferred_stack_check = 5084 DeferredStackCheck* deferred_stack_check =
5085 new(zone()) DeferredStackCheck(this, instr); 5085 new(zone()) DeferredStackCheck(this, instr);
5086 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); 5086 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
5087 __ B(lo, deferred_stack_check->entry()); 5087 __ B(lo, deferred_stack_check->entry());
5088 5088
5089 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5089 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5090 __ Bind(instr->done_label()); 5090 __ Bind(instr->done_label());
5091 deferred_stack_check->SetExit(instr->done_label()); 5091 deferred_stack_check->SetExit(instr->done_label());
5092 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5092 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
5169 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { 5169 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
5170 Register ext_ptr = ToRegister(instr->elements()); 5170 Register ext_ptr = ToRegister(instr->elements());
5171 Register key = no_reg; 5171 Register key = no_reg;
5172 Register scratch; 5172 Register scratch;
5173 ElementsKind elements_kind = instr->elements_kind(); 5173 ElementsKind elements_kind = instr->elements_kind();
5174 5174
5175 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 5175 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
5176 bool key_is_constant = instr->key()->IsConstantOperand(); 5176 bool key_is_constant = instr->key()->IsConstantOperand();
5177 int constant_key = 0; 5177 int constant_key = 0;
5178 if (key_is_constant) { 5178 if (key_is_constant) {
5179 ASSERT(instr->temp() == NULL); 5179 DCHECK(instr->temp() == NULL);
5180 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 5180 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
5181 if (constant_key & 0xf0000000) { 5181 if (constant_key & 0xf0000000) {
5182 Abort(kArrayIndexConstantValueTooBig); 5182 Abort(kArrayIndexConstantValueTooBig);
5183 } 5183 }
5184 } else { 5184 } else {
5185 key = ToRegister(instr->key()); 5185 key = ToRegister(instr->key());
5186 scratch = ToRegister(instr->temp()); 5186 scratch = ToRegister(instr->temp());
5187 } 5187 }
5188 5188
5189 MemOperand dst = 5189 MemOperand dst =
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
5289 scratch = ToRegister(instr->temp()); 5289 scratch = ToRegister(instr->temp());
5290 } 5290 }
5291 5291
5292 Representation representation = instr->hydrogen()->value()->representation(); 5292 Representation representation = instr->hydrogen()->value()->representation();
5293 if (instr->key()->IsConstantOperand()) { 5293 if (instr->key()->IsConstantOperand()) {
5294 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 5294 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
5295 int offset = instr->base_offset() + 5295 int offset = instr->base_offset() +
5296 ToInteger32(const_operand) * kPointerSize; 5296 ToInteger32(const_operand) * kPointerSize;
5297 store_base = elements; 5297 store_base = elements;
5298 if (representation.IsInteger32()) { 5298 if (representation.IsInteger32()) {
5299 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); 5299 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5300 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); 5300 DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
5301 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 5301 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5302 STATIC_ASSERT(kSmiTag == 0); 5302 STATIC_ASSERT(kSmiTag == 0);
5303 mem_op = UntagSmiMemOperand(store_base, offset); 5303 mem_op = UntagSmiMemOperand(store_base, offset);
5304 } else { 5304 } else {
5305 mem_op = MemOperand(store_base, offset); 5305 mem_op = MemOperand(store_base, offset);
5306 } 5306 }
5307 } else { 5307 } else {
5308 store_base = scratch; 5308 store_base = scratch;
5309 key = ToRegister(instr->key()); 5309 key = ToRegister(instr->key());
5310 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); 5310 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
5311 5311
5312 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, 5312 mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
5313 instr->hydrogen()->elements_kind(), 5313 instr->hydrogen()->elements_kind(),
5314 representation, instr->base_offset()); 5314 representation, instr->base_offset());
5315 } 5315 }
5316 5316
5317 __ Store(value, mem_op, representation); 5317 __ Store(value, mem_op, representation);
5318 5318
5319 if (instr->hydrogen()->NeedsWriteBarrier()) { 5319 if (instr->hydrogen()->NeedsWriteBarrier()) {
5320 ASSERT(representation.IsTagged()); 5320 DCHECK(representation.IsTagged());
5321 // This assignment may cause element_addr to alias store_base. 5321 // This assignment may cause element_addr to alias store_base.
5322 Register element_addr = scratch; 5322 Register element_addr = scratch;
5323 SmiCheck check_needed = 5323 SmiCheck check_needed =
5324 instr->hydrogen()->value()->type().IsHeapObject() 5324 instr->hydrogen()->value()->type().IsHeapObject()
5325 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 5325 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
5326 // Compute address of modified element and store it into key register. 5326 // Compute address of modified element and store it into key register.
5327 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand()); 5327 __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
5328 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), 5328 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
5329 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed, 5329 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
5330 instr->hydrogen()->PointersToHereCheckForValue()); 5330 instr->hydrogen()->PointersToHereCheckForValue());
5331 } 5331 }
5332 } 5332 }
5333 5333
5334 5334
5335 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 5335 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
5336 ASSERT(ToRegister(instr->context()).is(cp)); 5336 DCHECK(ToRegister(instr->context()).is(cp));
5337 ASSERT(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister())); 5337 DCHECK(ToRegister(instr->object()).is(KeyedStoreIC::ReceiverRegister()));
5338 ASSERT(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); 5338 DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister()));
5339 ASSERT(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); 5339 DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister()));
5340 5340
5341 Handle<Code> ic = instr->strict_mode() == STRICT 5341 Handle<Code> ic = instr->strict_mode() == STRICT
5342 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 5342 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
5343 : isolate()->builtins()->KeyedStoreIC_Initialize(); 5343 : isolate()->builtins()->KeyedStoreIC_Initialize();
5344 CallCode(ic, RelocInfo::CODE_TARGET, instr); 5344 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5345 } 5345 }
5346 5346
5347 5347
5348 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 5348 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
5349 Representation representation = instr->representation(); 5349 Representation representation = instr->representation();
5350 5350
5351 Register object = ToRegister(instr->object()); 5351 Register object = ToRegister(instr->object());
5352 HObjectAccess access = instr->hydrogen()->access(); 5352 HObjectAccess access = instr->hydrogen()->access();
5353 int offset = access.offset(); 5353 int offset = access.offset();
5354 5354
5355 if (access.IsExternalMemory()) { 5355 if (access.IsExternalMemory()) {
5356 ASSERT(!instr->hydrogen()->has_transition()); 5356 DCHECK(!instr->hydrogen()->has_transition());
5357 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 5357 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5358 Register value = ToRegister(instr->value()); 5358 Register value = ToRegister(instr->value());
5359 __ Store(value, MemOperand(object, offset), representation); 5359 __ Store(value, MemOperand(object, offset), representation);
5360 return; 5360 return;
5361 } 5361 }
5362 5362
5363 __ AssertNotSmi(object); 5363 __ AssertNotSmi(object);
5364 5364
5365 if (representation.IsDouble()) { 5365 if (representation.IsDouble()) {
5366 ASSERT(access.IsInobject()); 5366 DCHECK(access.IsInobject());
5367 ASSERT(!instr->hydrogen()->has_transition()); 5367 DCHECK(!instr->hydrogen()->has_transition());
5368 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 5368 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
5369 FPRegister value = ToDoubleRegister(instr->value()); 5369 FPRegister value = ToDoubleRegister(instr->value());
5370 __ Str(value, FieldMemOperand(object, offset)); 5370 __ Str(value, FieldMemOperand(object, offset));
5371 return; 5371 return;
5372 } 5372 }
5373 5373
5374 Register value = ToRegister(instr->value()); 5374 Register value = ToRegister(instr->value());
5375 5375
5376 ASSERT(!representation.IsSmi() || 5376 DCHECK(!representation.IsSmi() ||
5377 !instr->value()->IsConstantOperand() || 5377 !instr->value()->IsConstantOperand() ||
5378 IsInteger32Constant(LConstantOperand::cast(instr->value()))); 5378 IsInteger32Constant(LConstantOperand::cast(instr->value())));
5379 5379
5380 if (instr->hydrogen()->has_transition()) { 5380 if (instr->hydrogen()->has_transition()) {
5381 Handle<Map> transition = instr->hydrogen()->transition_map(); 5381 Handle<Map> transition = instr->hydrogen()->transition_map();
5382 AddDeprecationDependency(transition); 5382 AddDeprecationDependency(transition);
5383 // Store the new map value. 5383 // Store the new map value.
5384 Register new_map_value = ToRegister(instr->temp0()); 5384 Register new_map_value = ToRegister(instr->temp0());
5385 __ Mov(new_map_value, Operand(transition)); 5385 __ Mov(new_map_value, Operand(transition));
5386 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); 5386 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
(...skipping 12 matching lines...) Expand all
5399 if (access.IsInobject()) { 5399 if (access.IsInobject()) {
5400 destination = object; 5400 destination = object;
5401 } else { 5401 } else {
5402 Register temp0 = ToRegister(instr->temp0()); 5402 Register temp0 = ToRegister(instr->temp0());
5403 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5403 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
5404 destination = temp0; 5404 destination = temp0;
5405 } 5405 }
5406 5406
5407 if (representation.IsSmi() && 5407 if (representation.IsSmi() &&
5408 instr->hydrogen()->value()->representation().IsInteger32()) { 5408 instr->hydrogen()->value()->representation().IsInteger32()) {
5409 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); 5409 DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
5410 #ifdef DEBUG 5410 #ifdef DEBUG
5411 Register temp0 = ToRegister(instr->temp0()); 5411 Register temp0 = ToRegister(instr->temp0());
5412 __ Ldr(temp0, FieldMemOperand(destination, offset)); 5412 __ Ldr(temp0, FieldMemOperand(destination, offset));
5413 __ AssertSmi(temp0); 5413 __ AssertSmi(temp0);
5414 // If destination aliased temp0, restore it to the address calculated 5414 // If destination aliased temp0, restore it to the address calculated
5415 // earlier. 5415 // earlier.
5416 if (destination.Is(temp0)) { 5416 if (destination.Is(temp0)) {
5417 ASSERT(!access.IsInobject()); 5417 DCHECK(!access.IsInobject());
5418 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5418 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
5419 } 5419 }
5420 #endif 5420 #endif
5421 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits); 5421 STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
5422 STATIC_ASSERT(kSmiTag == 0); 5422 STATIC_ASSERT(kSmiTag == 0);
5423 __ Store(value, UntagSmiFieldMemOperand(destination, offset), 5423 __ Store(value, UntagSmiFieldMemOperand(destination, offset),
5424 Representation::Integer32()); 5424 Representation::Integer32());
5425 } else { 5425 } else {
5426 __ Store(value, FieldMemOperand(destination, offset), representation); 5426 __ Store(value, FieldMemOperand(destination, offset), representation);
5427 } 5427 }
5428 if (instr->hydrogen()->NeedsWriteBarrier()) { 5428 if (instr->hydrogen()->NeedsWriteBarrier()) {
5429 __ RecordWriteField(destination, 5429 __ RecordWriteField(destination,
5430 offset, 5430 offset,
5431 value, // Clobbered. 5431 value, // Clobbered.
5432 ToRegister(instr->temp1()), // Clobbered. 5432 ToRegister(instr->temp1()), // Clobbered.
5433 GetLinkRegisterState(), 5433 GetLinkRegisterState(),
5434 kSaveFPRegs, 5434 kSaveFPRegs,
5435 EMIT_REMEMBERED_SET, 5435 EMIT_REMEMBERED_SET,
5436 instr->hydrogen()->SmiCheckForWriteBarrier(), 5436 instr->hydrogen()->SmiCheckForWriteBarrier(),
5437 instr->hydrogen()->PointersToHereCheckForValue()); 5437 instr->hydrogen()->PointersToHereCheckForValue());
5438 } 5438 }
5439 } 5439 }
5440 5440
5441 5441
5442 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 5442 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
5443 ASSERT(ToRegister(instr->context()).is(cp)); 5443 DCHECK(ToRegister(instr->context()).is(cp));
5444 ASSERT(ToRegister(instr->object()).is(StoreIC::ReceiverRegister())); 5444 DCHECK(ToRegister(instr->object()).is(StoreIC::ReceiverRegister()));
5445 ASSERT(ToRegister(instr->value()).is(StoreIC::ValueRegister())); 5445 DCHECK(ToRegister(instr->value()).is(StoreIC::ValueRegister()));
5446 5446
5447 __ Mov(StoreIC::NameRegister(), Operand(instr->name())); 5447 __ Mov(StoreIC::NameRegister(), Operand(instr->name()));
5448 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 5448 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
5449 CallCode(ic, RelocInfo::CODE_TARGET, instr); 5449 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5450 } 5450 }
5451 5451
5452 5452
5453 void LCodeGen::DoStringAdd(LStringAdd* instr) { 5453 void LCodeGen::DoStringAdd(LStringAdd* instr) {
5454 ASSERT(ToRegister(instr->context()).is(cp)); 5454 DCHECK(ToRegister(instr->context()).is(cp));
5455 ASSERT(ToRegister(instr->left()).Is(x1)); 5455 DCHECK(ToRegister(instr->left()).Is(x1));
5456 ASSERT(ToRegister(instr->right()).Is(x0)); 5456 DCHECK(ToRegister(instr->right()).Is(x0));
5457 StringAddStub stub(isolate(), 5457 StringAddStub stub(isolate(),
5458 instr->hydrogen()->flags(), 5458 instr->hydrogen()->flags(),
5459 instr->hydrogen()->pretenure_flag()); 5459 instr->hydrogen()->pretenure_flag());
5460 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5460 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5461 } 5461 }
5462 5462
5463 5463
5464 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 5464 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
5465 class DeferredStringCharCodeAt: public LDeferredCode { 5465 class DeferredStringCharCodeAt: public LDeferredCode {
5466 public: 5466 public:
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
5515 : LDeferredCode(codegen), instr_(instr) { } 5515 : LDeferredCode(codegen), instr_(instr) { }
5516 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } 5516 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
5517 virtual LInstruction* instr() { return instr_; } 5517 virtual LInstruction* instr() { return instr_; }
5518 private: 5518 private:
5519 LStringCharFromCode* instr_; 5519 LStringCharFromCode* instr_;
5520 }; 5520 };
5521 5521
5522 DeferredStringCharFromCode* deferred = 5522 DeferredStringCharFromCode* deferred =
5523 new(zone()) DeferredStringCharFromCode(this, instr); 5523 new(zone()) DeferredStringCharFromCode(this, instr);
5524 5524
5525 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 5525 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5526 Register char_code = ToRegister32(instr->char_code()); 5526 Register char_code = ToRegister32(instr->char_code());
5527 Register result = ToRegister(instr->result()); 5527 Register result = ToRegister(instr->result());
5528 5528
5529 __ Cmp(char_code, String::kMaxOneByteCharCode); 5529 __ Cmp(char_code, String::kMaxOneByteCharCode);
5530 __ B(hi, deferred->entry()); 5530 __ B(hi, deferred->entry());
5531 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 5531 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5532 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag); 5532 __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
5533 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2)); 5533 __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
5534 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); 5534 __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
5535 __ B(eq, deferred->entry()); 5535 __ B(eq, deferred->entry());
(...skipping 11 matching lines...) Expand all
5547 __ Mov(result, 0); 5547 __ Mov(result, 0);
5548 5548
5549 PushSafepointRegistersScope scope(this); 5549 PushSafepointRegistersScope scope(this);
5550 __ SmiTagAndPush(char_code); 5550 __ SmiTagAndPush(char_code);
5551 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); 5551 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5552 __ StoreToSafepointRegisterSlot(x0, result); 5552 __ StoreToSafepointRegisterSlot(x0, result);
5553 } 5553 }
5554 5554
5555 5555
5556 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 5556 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
5557 ASSERT(ToRegister(instr->context()).is(cp)); 5557 DCHECK(ToRegister(instr->context()).is(cp));
5558 Token::Value op = instr->op(); 5558 Token::Value op = instr->op();
5559 5559
5560 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 5560 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
5561 CallCode(ic, RelocInfo::CODE_TARGET, instr); 5561 CallCode(ic, RelocInfo::CODE_TARGET, instr);
5562 InlineSmiCheckInfo::EmitNotInlined(masm()); 5562 InlineSmiCheckInfo::EmitNotInlined(masm());
5563 5563
5564 Condition condition = TokenToCondition(op, false); 5564 Condition condition = TokenToCondition(op, false);
5565 5565
5566 EmitCompareAndBranch(instr, condition, x0, 0); 5566 EmitCompareAndBranch(instr, condition, x0, 0);
5567 } 5567 }
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
5690 } 5690 }
5691 5691
5692 5692
5693 void LCodeGen::DoThisFunction(LThisFunction* instr) { 5693 void LCodeGen::DoThisFunction(LThisFunction* instr) {
5694 Register result = ToRegister(instr->result()); 5694 Register result = ToRegister(instr->result());
5695 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 5695 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5696 } 5696 }
5697 5697
5698 5698
5699 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5699 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5700 ASSERT(ToRegister(instr->value()).Is(x0)); 5700 DCHECK(ToRegister(instr->value()).Is(x0));
5701 ASSERT(ToRegister(instr->result()).Is(x0)); 5701 DCHECK(ToRegister(instr->result()).Is(x0));
5702 __ Push(x0); 5702 __ Push(x0);
5703 CallRuntime(Runtime::kToFastProperties, 1, instr); 5703 CallRuntime(Runtime::kToFastProperties, 1, instr);
5704 } 5704 }
5705 5705
5706 5706
5707 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5707 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5708 ASSERT(ToRegister(instr->context()).is(cp)); 5708 DCHECK(ToRegister(instr->context()).is(cp));
5709 Label materialized; 5709 Label materialized;
5710 // Registers will be used as follows: 5710 // Registers will be used as follows:
5711 // x7 = literals array. 5711 // x7 = literals array.
5712 // x1 = regexp literal. 5712 // x1 = regexp literal.
5713 // x0 = regexp literal clone. 5713 // x0 = regexp literal clone.
5714 // x10-x12 are used as temporaries. 5714 // x10-x12 are used as temporaries.
5715 int literal_offset = 5715 int literal_offset =
5716 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5716 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5717 __ LoadObject(x7, instr->hydrogen()->literals()); 5717 __ LoadObject(x7, instr->hydrogen()->literals());
5718 __ Ldr(x1, FieldMemOperand(x7, literal_offset)); 5718 __ Ldr(x1, FieldMemOperand(x7, literal_offset));
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
5766 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(), 5766 __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
5767 kDontSaveFPRegs); 5767 kDontSaveFPRegs);
5768 } else { 5768 } else {
5769 { 5769 {
5770 UseScratchRegisterScope temps(masm()); 5770 UseScratchRegisterScope temps(masm());
5771 // Use the temp register only in a restricted scope - the codegen checks 5771 // Use the temp register only in a restricted scope - the codegen checks
5772 // that we do not use any register across a call. 5772 // that we do not use any register across a call.
5773 __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable, 5773 __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
5774 DONT_DO_SMI_CHECK); 5774 DONT_DO_SMI_CHECK);
5775 } 5775 }
5776 ASSERT(object.is(x0)); 5776 DCHECK(object.is(x0));
5777 ASSERT(ToRegister(instr->context()).is(cp)); 5777 DCHECK(ToRegister(instr->context()).is(cp));
5778 PushSafepointRegistersScope scope(this); 5778 PushSafepointRegistersScope scope(this);
5779 __ Mov(x1, Operand(to_map)); 5779 __ Mov(x1, Operand(to_map));
5780 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; 5780 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
5781 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); 5781 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
5782 __ CallStub(&stub); 5782 __ CallStub(&stub);
5783 RecordSafepointWithRegisters( 5783 RecordSafepointWithRegisters(
5784 instr->pointer_map(), 0, Safepoint::kLazyDeopt); 5784 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
5785 } 5785 }
5786 __ Bind(&not_applicable); 5786 __ Bind(&not_applicable);
5787 } 5787 }
(...skipping 29 matching lines...) Expand all
5817 5817
5818 5818
5819 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5819 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5820 Handle<String> type_name = instr->type_literal(); 5820 Handle<String> type_name = instr->type_literal();
5821 Label* true_label = instr->TrueLabel(chunk_); 5821 Label* true_label = instr->TrueLabel(chunk_);
5822 Label* false_label = instr->FalseLabel(chunk_); 5822 Label* false_label = instr->FalseLabel(chunk_);
5823 Register value = ToRegister(instr->value()); 5823 Register value = ToRegister(instr->value());
5824 5824
5825 Factory* factory = isolate()->factory(); 5825 Factory* factory = isolate()->factory();
5826 if (String::Equals(type_name, factory->number_string())) { 5826 if (String::Equals(type_name, factory->number_string())) {
5827 ASSERT(instr->temp1() != NULL); 5827 DCHECK(instr->temp1() != NULL);
5828 Register map = ToRegister(instr->temp1()); 5828 Register map = ToRegister(instr->temp1());
5829 5829
5830 __ JumpIfSmi(value, true_label); 5830 __ JumpIfSmi(value, true_label);
5831 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 5831 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
5832 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 5832 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5833 EmitBranch(instr, eq); 5833 EmitBranch(instr, eq);
5834 5834
5835 } else if (String::Equals(type_name, factory->string_string())) { 5835 } else if (String::Equals(type_name, factory->string_string())) {
5836 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5836 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5837 Register map = ToRegister(instr->temp1()); 5837 Register map = ToRegister(instr->temp1());
5838 Register scratch = ToRegister(instr->temp2()); 5838 Register scratch = ToRegister(instr->temp2());
5839 5839
5840 __ JumpIfSmi(value, false_label); 5840 __ JumpIfSmi(value, false_label);
5841 __ JumpIfObjectType( 5841 __ JumpIfObjectType(
5842 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge); 5842 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge);
5843 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 5843 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5844 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); 5844 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
5845 5845
5846 } else if (String::Equals(type_name, factory->symbol_string())) { 5846 } else if (String::Equals(type_name, factory->symbol_string())) {
5847 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5847 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5848 Register map = ToRegister(instr->temp1()); 5848 Register map = ToRegister(instr->temp1());
5849 Register scratch = ToRegister(instr->temp2()); 5849 Register scratch = ToRegister(instr->temp2());
5850 5850
5851 __ JumpIfSmi(value, false_label); 5851 __ JumpIfSmi(value, false_label);
5852 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); 5852 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
5853 EmitBranch(instr, eq); 5853 EmitBranch(instr, eq);
5854 5854
5855 } else if (String::Equals(type_name, factory->boolean_string())) { 5855 } else if (String::Equals(type_name, factory->boolean_string())) {
5856 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); 5856 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
5857 __ CompareRoot(value, Heap::kFalseValueRootIndex); 5857 __ CompareRoot(value, Heap::kFalseValueRootIndex);
5858 EmitBranch(instr, eq); 5858 EmitBranch(instr, eq);
5859 5859
5860 } else if (String::Equals(type_name, factory->undefined_string())) { 5860 } else if (String::Equals(type_name, factory->undefined_string())) {
5861 ASSERT(instr->temp1() != NULL); 5861 DCHECK(instr->temp1() != NULL);
5862 Register scratch = ToRegister(instr->temp1()); 5862 Register scratch = ToRegister(instr->temp1());
5863 5863
5864 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label); 5864 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
5865 __ JumpIfSmi(value, false_label); 5865 __ JumpIfSmi(value, false_label);
5866 // Check for undetectable objects and jump to the true branch in this case. 5866 // Check for undetectable objects and jump to the true branch in this case.
5867 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5867 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5868 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5868 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5869 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); 5869 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
5870 5870
5871 } else if (String::Equals(type_name, factory->function_string())) { 5871 } else if (String::Equals(type_name, factory->function_string())) {
5872 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5872 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5873 ASSERT(instr->temp1() != NULL); 5873 DCHECK(instr->temp1() != NULL);
5874 Register type = ToRegister(instr->temp1()); 5874 Register type = ToRegister(instr->temp1());
5875 5875
5876 __ JumpIfSmi(value, false_label); 5876 __ JumpIfSmi(value, false_label);
5877 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label); 5877 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
5878 // HeapObject's type has been loaded into type register by JumpIfObjectType. 5878 // HeapObject's type has been loaded into type register by JumpIfObjectType.
5879 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE); 5879 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
5880 5880
5881 } else if (String::Equals(type_name, factory->object_string())) { 5881 } else if (String::Equals(type_name, factory->object_string())) {
5882 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); 5882 DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
5883 Register map = ToRegister(instr->temp1()); 5883 Register map = ToRegister(instr->temp1());
5884 Register scratch = ToRegister(instr->temp2()); 5884 Register scratch = ToRegister(instr->temp2());
5885 5885
5886 __ JumpIfSmi(value, false_label); 5886 __ JumpIfSmi(value, false_label);
5887 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); 5887 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
5888 __ JumpIfObjectType(value, map, scratch, 5888 __ JumpIfObjectType(value, map, scratch,
5889 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt); 5889 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
5890 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); 5890 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5891 __ B(gt, false_label); 5891 __ B(gt, false_label);
5892 // Check for undetectable objects => false. 5892 // Check for undetectable objects => false.
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
6046 Handle<ScopeInfo> scope_info = instr->scope_info(); 6046 Handle<ScopeInfo> scope_info = instr->scope_info();
6047 __ Push(scope_info); 6047 __ Push(scope_info);
6048 __ Push(ToRegister(instr->function())); 6048 __ Push(ToRegister(instr->function()));
6049 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6049 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6050 RecordSafepoint(Safepoint::kNoLazyDeopt); 6050 RecordSafepoint(Safepoint::kNoLazyDeopt);
6051 } 6051 }
6052 6052
6053 6053
6054 6054
6055 } } // namespace v8::internal 6055 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm64/lithium-codegen-arm64.h ('k') | src/arm64/lithium-gap-resolver-arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698