Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(121)

Side by Side Diff: src/a64/lithium-codegen-a64.cc

Issue 148593004: A64: Synchronize with r18084. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/macro-assembler-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 599 matching lines...) Expand 10 before | Expand all | Expand 10 after
610 FrameScope frame_scope(masm_, StackFrame::NONE); 610 FrameScope frame_scope(masm_, StackFrame::NONE);
611 611
612 return GeneratePrologue() && 612 return GeneratePrologue() &&
613 GenerateBody() && 613 GenerateBody() &&
614 GenerateDeferredCode() && 614 GenerateDeferredCode() &&
615 GenerateDeoptJumpTable() && 615 GenerateDeoptJumpTable() &&
616 GenerateSafepointTable(); 616 GenerateSafepointTable();
617 } 617 }
618 618
619 619
620 void LCodeGen::SaveCallerDoubles() {
621 ASSERT(info()->saves_caller_doubles());
622 ASSERT(NeedsEagerFrame());
623 Comment(";;; Save clobbered callee double registers");
624 BitVector* doubles = chunk()->allocated_double_registers();
625 BitVector::Iterator iterator(doubles);
626 int count = 0;
627 while (!iterator.Done()) {
628 // TODO(all): Is this supposed to save just the callee-saved doubles? It
629 // looks like it's saving all of them.
630 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
631 __ Poke(value, count * kDoubleSize);
632 iterator.Advance();
633 count++;
634 }
635 }
636
637
638 void LCodeGen::RestoreCallerDoubles() {
639 ASSERT(info()->saves_caller_doubles());
640 ASSERT(NeedsEagerFrame());
641 Comment(";;; Restore clobbered callee double registers");
642 BitVector* doubles = chunk()->allocated_double_registers();
643 BitVector::Iterator iterator(doubles);
644 int count = 0;
645 while (!iterator.Done()) {
646 // TODO(all): Is this supposed to restore just the callee-saved doubles? It
647 // looks like it's restoring all of them.
648 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
649 __ Peek(value, count * kDoubleSize);
650 iterator.Advance();
651 count++;
652 }
653 }
654
655
620 bool LCodeGen::GeneratePrologue() { 656 bool LCodeGen::GeneratePrologue() {
621 ASSERT(is_generating()); 657 ASSERT(is_generating());
622 658
623 if (info()->IsOptimizing()) { 659 if (info()->IsOptimizing()) {
624 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 660 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
625 661
626 // TODO(all): Add support for stop_t FLAG in DEBUG mode. 662 // TODO(all): Add support for stop_t FLAG in DEBUG mode.
627 663
628 // Strict mode functions and builtins need to replace the receiver 664 // Strict mode functions and builtins need to replace the receiver
629 // with undefined when called as functions (without an explicit 665 // with undefined when called as functions (without an explicit
(...skipping 18 matching lines...) Expand all
648 info_->AddNoFrameRange(0, masm_->pc_offset()); 684 info_->AddNoFrameRange(0, masm_->pc_offset());
649 } 685 }
650 686
651 // Reserve space for the stack slots needed by the code. 687 // Reserve space for the stack slots needed by the code.
652 int slots = GetStackSlotCount(); 688 int slots = GetStackSlotCount();
653 if (slots > 0) { 689 if (slots > 0) {
654 __ Claim(slots, kPointerSize); 690 __ Claim(slots, kPointerSize);
655 } 691 }
656 692
657 if (info()->saves_caller_doubles()) { 693 if (info()->saves_caller_doubles()) {
658 Comment(";;; Save clobbered callee double registers"); 694 SaveCallerDoubles();
659 ASSERT(NeedsEagerFrame());
660 BitVector* doubles = chunk()->allocated_double_registers();
661 BitVector::Iterator iterator(doubles);
662 int count = 0;
663 while (!iterator.Done()) {
664 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
665 __ Poke(value, count * kDoubleSize);
666 iterator.Advance();
667 count++;
668 }
669 } 695 }
670 696
671 // Allocate a local context if needed. 697 // Allocate a local context if needed.
672 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 698 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
673 if (heap_slots > 0) { 699 if (heap_slots > 0) {
674 Comment(";;; Allocate local context"); 700 Comment(";;; Allocate local context");
675 // Argument to NewContext is the function, which is in x1. 701 // Argument to NewContext is the function, which is in x1.
676 __ Push(x1); 702 __ Push(x1);
677 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 703 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
678 FastNewContextStub stub(heap_slots); 704 FastNewContextStub stub(heap_slots);
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
796 __ Bind(&deopt_jump_table_[i].label); 822 __ Bind(&deopt_jump_table_[i].label);
797 Address entry = deopt_jump_table_[i].address; 823 Address entry = deopt_jump_table_[i].address;
798 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 824 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
799 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 825 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
800 if (id == Deoptimizer::kNotDeoptimizationEntry) { 826 if (id == Deoptimizer::kNotDeoptimizationEntry) {
801 Comment(";;; jump table entry %d.", i); 827 Comment(";;; jump table entry %d.", i);
802 } else { 828 } else {
803 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 829 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
804 } 830 }
805 if (deopt_jump_table_[i].needs_frame) { 831 if (deopt_jump_table_[i].needs_frame) {
832 ASSERT(!info()->saves_caller_doubles());
806 __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry))); 833 __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
807 if (needs_frame.is_bound()) { 834 if (needs_frame.is_bound()) {
808 __ B(&needs_frame); 835 __ B(&needs_frame);
809 } else { 836 } else {
810 __ Bind(&needs_frame); 837 __ Bind(&needs_frame);
811 // This variant of deopt can only be used with stubs. Since we don't 838 // This variant of deopt can only be used with stubs. Since we don't
812 // have a function pointer to install in the stack frame that we're 839 // have a function pointer to install in the stack frame that we're
813 // building, install a special marker there instead. 840 // building, install a special marker there instead.
814 // TODO(jochen): Revisit the use of TmpX(). 841 // TODO(jochen): Revisit the use of TmpX().
815 ASSERT(info()->IsStub()); 842 ASSERT(info()->IsStub());
816 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB))); 843 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
817 __ Push(lr, fp, cp, __ Tmp1()); 844 __ Push(lr, fp, cp, __ Tmp1());
818 __ Add(fp, __ StackPointer(), 2 * kPointerSize); 845 __ Add(fp, __ StackPointer(), 2 * kPointerSize);
819 __ Call(__ Tmp0()); 846 __ Call(__ Tmp0());
820 } 847 }
821 } else { 848 } else {
849 if (info()->saves_caller_doubles()) {
850 ASSERT(info()->IsStub());
851 RestoreCallerDoubles();
852 }
822 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 853 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
823 } 854 }
824 masm()->CheckConstPool(false, false); 855 masm()->CheckConstPool(false, false);
825 } 856 }
826 857
827 // Force constant pool emission at the end of the deopt jump table to make 858 // Force constant pool emission at the end of the deopt jump table to make
828 // sure that no constant pools are emitted after. 859 // sure that no constant pools are emitted after.
829 masm()->CheckConstPool(true, false); 860 masm()->CheckConstPool(true, false);
830 861
831 // The deoptimization jump table is the last part of the instruction 862 // The deoptimization jump table is the last part of the instruction
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
965 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 996 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
966 return; 997 return;
967 } 998 }
968 999
969 if (info()->ShouldTrapOnDeopt()) { 1000 if (info()->ShouldTrapOnDeopt()) {
970 __ Debug("trap_on_deopt", __LINE__, BREAK); 1001 __ Debug("trap_on_deopt", __LINE__, BREAK);
971 } 1002 }
972 1003
973 1004
974 ASSERT(info()->IsStub() || frame_is_built_); 1005 ASSERT(info()->IsStub() || frame_is_built_);
975 if (frame_is_built_) { 1006 // Go through jump table if we need to build frame, or restore caller doubles.
1007 if (frame_is_built_ && !info()->saves_caller_doubles()) {
976 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 1008 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
977 } else { 1009 } else {
978 // We often have several deopts to the same entry, reuse the last 1010 // We often have several deopts to the same entry, reuse the last
979 // jump entry if this is the case. 1011 // jump entry if this is the case.
980 if (deopt_jump_table_.is_empty() || 1012 if (deopt_jump_table_.is_empty() ||
981 (deopt_jump_table_.last().address != entry) || 1013 (deopt_jump_table_.last().address != entry) ||
982 (deopt_jump_table_.last().bailout_type != bailout_type) || 1014 (deopt_jump_table_.last().bailout_type != bailout_type) ||
983 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { 1015 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
984 Deoptimizer::JumpTableEntry table_entry(entry, 1016 Deoptimizer::JumpTableEntry table_entry(entry,
985 bailout_type, 1017 bailout_type,
(...skipping 2716 matching lines...) Expand 10 before | Expand all | Expand 10 after
3702 3734
3703 void LCodeGen::DoMathCos(LMathCos* instr) { 3735 void LCodeGen::DoMathCos(LMathCos* instr) {
3704 ASSERT(ToDoubleRegister(instr->result()).is(d0)); 3736 ASSERT(ToDoubleRegister(instr->result()).is(d0));
3705 TranscendentalCacheStub stub(TranscendentalCache::COS, 3737 TranscendentalCacheStub stub(TranscendentalCache::COS,
3706 TranscendentalCacheStub::UNTAGGED); 3738 TranscendentalCacheStub::UNTAGGED);
3707 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 3739 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3708 ASSERT(ToDoubleRegister(instr->result()).Is(d0)); 3740 ASSERT(ToDoubleRegister(instr->result()).Is(d0));
3709 } 3741 }
3710 3742
3711 3743
3712 // TODO(all): This will disappear when Math.random is rewritten in JavaScript.
3713 void LCodeGen::DoRandom(LRandom* instr) {
3714 class DeferredDoRandom: public LDeferredCode {
3715 public:
3716 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3717 : LDeferredCode(codegen), instr_(instr) { }
3718 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3719 virtual LInstruction* instr() { return instr_; }
3720
3721 private:
3722 LRandom* instr_;
3723 };
3724
3725 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3726
3727 // Having marked this instruction as a call we can use any registers.
3728 ASSERT(instr->IsMarkedAsCall());
3729 ASSERT(ToDoubleRegister(instr->result()).is(d7));
3730 ASSERT(ToRegister(instr->global_object()).is(x0));
3731
3732 static const int kSeedSize = sizeof(uint32_t);
3733 STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
3734
3735 Register global_object = x0;
3736 __ Ldr(global_object,
3737 FieldMemOperand(global_object, GlobalObject::kNativeContextOffset));
3738 static const int kRandomSeedOffset =
3739 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3740 __ Ldr(x1, FieldMemOperand(global_object, kRandomSeedOffset));
3741 // x1: FixedArray of the native context's random seeds
3742
3743 // Load state[0].
3744 __ Ldr(w2, FieldMemOperand(x1, ByteArray::kHeaderSize));
3745 // If state[0] == 0, call runtime to initialize seeds.
3746 __ Cbz(w2, deferred->entry());
3747 // Load state[1].
3748 __ Ldr(w3, FieldMemOperand(x1, ByteArray::kHeaderSize + kSeedSize));
3749
3750 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3751 __ And(w4, w2, 0xFFFF);
3752 __ Mov(w5, 18273);
3753 __ Mul(w5, w5, w4);
3754 __ Add(w2, w5, Operand(w2, LSR, 16));
3755 // Save state[0].
3756 __ Str(w2, FieldMemOperand(x1, ByteArray::kHeaderSize));
3757
3758 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3759 __ And(w4, w3, 0xFFFF);
3760 __ Mov(w5, 36969);
3761 __ Mul(w5, w5, w4);
3762 __ Add(w3, w5, Operand(w3, LSR, 16));
3763 // Save state[1].
3764 __ Str(w3, FieldMemOperand(x1, ByteArray::kHeaderSize + kSeedSize));
3765
3766 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3767 __ And(w3, w3, 0x3FFFF);
3768 __ Add(w0, w3, Operand(w2, LSL, 14));
3769
3770 __ Bind(deferred->exit());
3771 // Interpret the 32 random bits as a 0.32 fixed point number, and convert to
3772 // a double in the range 0.0 <= number < 1.0.
3773 __ Ucvtf(d7, w0, 32);
3774 }
3775
3776
3777 // TODO(all): This will disappear when Math.random is rewritten in JavaScript.
3778 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3779 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3780 // Return value is in x0.
3781 }
3782
3783
3784 void LCodeGen::DoMathExp(LMathExp* instr) { 3744 void LCodeGen::DoMathExp(LMathExp* instr) {
3785 DoubleRegister input = ToDoubleRegister(instr->value()); 3745 DoubleRegister input = ToDoubleRegister(instr->value());
3786 DoubleRegister result = ToDoubleRegister(instr->result()); 3746 DoubleRegister result = ToDoubleRegister(instr->result());
3787 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1()); 3747 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
3788 DoubleRegister double_temp2 = double_scratch(); 3748 DoubleRegister double_temp2 = double_scratch();
3789 Register temp1 = ToRegister(instr->temp1()); 3749 Register temp1 = ToRegister(instr->temp1());
3790 Register temp2 = ToRegister(instr->temp2()); 3750 Register temp2 = ToRegister(instr->temp2());
3791 Register temp3 = ToRegister(instr->temp3()); 3751 Register temp3 = ToRegister(instr->temp3());
3792 3752
3793 MathExpGenerator::EmitMathExp(masm(), input, result, 3753 MathExpGenerator::EmitMathExp(masm(), input, result,
(...skipping 681 matching lines...) Expand 10 before | Expand all | Expand 10 after
4475 4435
4476 void LCodeGen::DoReturn(LReturn* instr) { 4436 void LCodeGen::DoReturn(LReturn* instr) {
4477 if (FLAG_trace && info()->IsOptimizing()) { 4437 if (FLAG_trace && info()->IsOptimizing()) {
4478 // Push the return value on the stack as the parameter. 4438 // Push the return value on the stack as the parameter.
4479 // Runtime::TraceExit returns its parameter in x0. 4439 // Runtime::TraceExit returns its parameter in x0.
4480 __ Push(x0); 4440 __ Push(x0);
4481 __ CallRuntime(Runtime::kTraceExit, 1); 4441 __ CallRuntime(Runtime::kTraceExit, 1);
4482 } 4442 }
4483 4443
4484 if (info()->saves_caller_doubles()) { 4444 if (info()->saves_caller_doubles()) {
4485 ASSERT(NeedsEagerFrame()); 4445 RestoreCallerDoubles();
4486 BitVector* doubles = chunk()->allocated_double_registers();
4487 BitVector::Iterator iterator(doubles);
4488 int count = 0;
4489 while (!iterator.Done()) {
4490 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current());
4491 __ Peek(value, count * kDoubleSize);
4492 iterator.Advance();
4493 count++;
4494 }
4495 } 4446 }
4496 4447
4497 int no_frame_start = -1; 4448 int no_frame_start = -1;
4498 if (NeedsEagerFrame()) { 4449 if (NeedsEagerFrame()) {
4499 Register stack_pointer = masm()->StackPointer(); 4450 Register stack_pointer = masm()->StackPointer();
4500 __ Mov(stack_pointer, fp); 4451 __ Mov(stack_pointer, fp);
4501 no_frame_start = masm_->pc_offset(); 4452 no_frame_start = masm_->pc_offset();
4502 __ Pop(fp, lr); 4453 __ Pop(fp, lr);
4503 } 4454 }
4504 4455
(...skipping 1231 matching lines...) Expand 10 before | Expand all | Expand 10 after
5736 __ Bind(&out_of_object); 5687 __ Bind(&out_of_object);
5737 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5688 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5738 // Index is equal to negated out of object property index plus 1. 5689 // Index is equal to negated out of object property index plus 1.
5739 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); 5690 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
5740 __ Ldr(result, FieldMemOperand(result, 5691 __ Ldr(result, FieldMemOperand(result,
5741 FixedArray::kHeaderSize - kPointerSize)); 5692 FixedArray::kHeaderSize - kPointerSize));
5742 __ Bind(&done); 5693 __ Bind(&done);
5743 } 5694 }
5744 5695
5745 } } // namespace v8::internal 5696 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/a64/lithium-codegen-a64.h ('k') | src/a64/macro-assembler-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698