| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/arm64/lithium-codegen-arm64.h" | 7 #include "src/arm64/lithium-codegen-arm64.h" |
| 8 #include "src/arm64/lithium-gap-resolver-arm64.h" | 8 #include "src/arm64/lithium-gap-resolver-arm64.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 575 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 586 bool LCodeGen::GenerateCode() { | 586 bool LCodeGen::GenerateCode() { |
| 587 LPhase phase("Z_Code generation", chunk()); | 587 LPhase phase("Z_Code generation", chunk()); |
| 588 DCHECK(is_unused()); | 588 DCHECK(is_unused()); |
| 589 status_ = GENERATING; | 589 status_ = GENERATING; |
| 590 | 590 |
| 591 // Open a frame scope to indicate that there is a frame on the stack. The | 591 // Open a frame scope to indicate that there is a frame on the stack. The |
| 592 // NONE indicates that the scope shouldn't actually generate code to set up | 592 // NONE indicates that the scope shouldn't actually generate code to set up |
| 593 // the frame (that is done in GeneratePrologue). | 593 // the frame (that is done in GeneratePrologue). |
| 594 FrameScope frame_scope(masm_, StackFrame::NONE); | 594 FrameScope frame_scope(masm_, StackFrame::NONE); |
| 595 | 595 |
| 596 return GeneratePrologue() && | 596 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && |
| 597 GenerateBody() && | 597 GenerateJumpTable() && GenerateSafepointTable(); |
| 598 GenerateDeferredCode() && | |
| 599 GenerateDeoptJumpTable() && | |
| 600 GenerateSafepointTable(); | |
| 601 } | 598 } |
| 602 | 599 |
| 603 | 600 |
| 604 void LCodeGen::SaveCallerDoubles() { | 601 void LCodeGen::SaveCallerDoubles() { |
| 605 DCHECK(info()->saves_caller_doubles()); | 602 DCHECK(info()->saves_caller_doubles()); |
| 606 DCHECK(NeedsEagerFrame()); | 603 DCHECK(NeedsEagerFrame()); |
| 607 Comment(";;; Save clobbered callee double registers"); | 604 Comment(";;; Save clobbered callee double registers"); |
| 608 BitVector* doubles = chunk()->allocated_double_registers(); | 605 BitVector* doubles = chunk()->allocated_double_registers(); |
| 609 BitVector::Iterator iterator(doubles); | 606 BitVector::Iterator iterator(doubles); |
| 610 int count = 0; | 607 int count = 0; |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 820 // Force constant pool emission at the end of the deferred code to make | 817 // Force constant pool emission at the end of the deferred code to make |
| 821 // sure that no constant pools are emitted after deferred code because | 818 // sure that no constant pools are emitted after deferred code because |
| 822 // deferred code generation is the last step which generates code. The two | 819 // deferred code generation is the last step which generates code. The two |
| 823 // following steps will only output data used by crakshaft. | 820 // following steps will only output data used by crakshaft. |
| 824 masm()->CheckConstPool(true, false); | 821 masm()->CheckConstPool(true, false); |
| 825 | 822 |
| 826 return !is_aborted(); | 823 return !is_aborted(); |
| 827 } | 824 } |
| 828 | 825 |
| 829 | 826 |
| 830 bool LCodeGen::GenerateDeoptJumpTable() { | 827 bool LCodeGen::GenerateJumpTable() { |
| 831 Label needs_frame, restore_caller_doubles, call_deopt_entry; | 828 Label needs_frame, restore_caller_doubles, call_deopt_entry; |
| 832 | 829 |
| 833 if (deopt_jump_table_.length() > 0) { | 830 if (jump_table_.length() > 0) { |
| 834 Comment(";;; -------------------- Jump table --------------------"); | 831 Comment(";;; -------------------- Jump table --------------------"); |
| 835 Address base = deopt_jump_table_[0]->address; | 832 Address base = jump_table_[0]->address; |
| 836 | 833 |
| 837 UseScratchRegisterScope temps(masm()); | 834 UseScratchRegisterScope temps(masm()); |
| 838 Register entry_offset = temps.AcquireX(); | 835 Register entry_offset = temps.AcquireX(); |
| 839 | 836 |
| 840 int length = deopt_jump_table_.length(); | 837 int length = jump_table_.length(); |
| 841 for (int i = 0; i < length; i++) { | 838 for (int i = 0; i < length; i++) { |
| 842 __ Bind(&deopt_jump_table_[i]->label); | 839 Deoptimizer::JumpTableEntry* table_entry = jump_table_[i]; |
| 840 __ Bind(&table_entry->label); |
| 843 | 841 |
| 844 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; | 842 Deoptimizer::BailoutType type = table_entry->bailout_type; |
| 845 Address entry = deopt_jump_table_[i]->address; | 843 Address entry = table_entry->address; |
| 846 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 844 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 847 if (id == Deoptimizer::kNotDeoptimizationEntry) { | 845 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| 848 Comment(";;; jump table entry %d.", i); | 846 Comment(";;; jump table entry %d.", i); |
| 849 } else { | 847 } else { |
| 850 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 848 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 851 } | 849 } |
| 850 DeoptComment(table_entry->mnemonic, table_entry->reason); |
| 852 | 851 |
| 853 // Second-level deopt table entries are contiguous and small, so instead | 852 // Second-level deopt table entries are contiguous and small, so instead |
| 854 // of loading the full, absolute address of each one, load the base | 853 // of loading the full, absolute address of each one, load the base |
| 855 // address and add an immediate offset. | 854 // address and add an immediate offset. |
| 856 __ Mov(entry_offset, entry - base); | 855 __ Mov(entry_offset, entry - base); |
| 857 | 856 |
| 858 // The last entry can fall through into `call_deopt_entry`, avoiding a | 857 // The last entry can fall through into `call_deopt_entry`, avoiding a |
| 859 // branch. | 858 // branch. |
| 860 bool last_entry = (i + 1) == length; | 859 bool last_entry = (i + 1) == length; |
| 861 | 860 |
| 862 if (deopt_jump_table_[i]->needs_frame) { | 861 if (table_entry->needs_frame) { |
| 863 DCHECK(!info()->saves_caller_doubles()); | 862 DCHECK(!info()->saves_caller_doubles()); |
| 864 if (!needs_frame.is_bound()) { | 863 if (!needs_frame.is_bound()) { |
| 865 // This variant of deopt can only be used with stubs. Since we don't | 864 // This variant of deopt can only be used with stubs. Since we don't |
| 866 // have a function pointer to install in the stack frame that we're | 865 // have a function pointer to install in the stack frame that we're |
| 867 // building, install a special marker there instead. | 866 // building, install a special marker there instead. |
| 868 DCHECK(info()->IsStub()); | 867 DCHECK(info()->IsStub()); |
| 869 | 868 |
| 870 UseScratchRegisterScope temps(masm()); | 869 UseScratchRegisterScope temps(masm()); |
| 871 Register stub_marker = temps.AcquireX(); | 870 Register stub_marker = temps.AcquireX(); |
| 872 __ Bind(&needs_frame); | 871 __ Bind(&needs_frame); |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 990 | 989 |
| 991 for (int i = 0, length = inlined_closures->length(); i < length; i++) { | 990 for (int i = 0, length = inlined_closures->length(); i < length; i++) { |
| 992 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 991 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 993 } | 992 } |
| 994 | 993 |
| 995 inlined_function_count_ = deoptimization_literals_.length(); | 994 inlined_function_count_ = deoptimization_literals_.length(); |
| 996 } | 995 } |
| 997 | 996 |
| 998 | 997 |
| 999 void LCodeGen::DeoptimizeBranch( | 998 void LCodeGen::DeoptimizeBranch( |
| 1000 LInstruction* instr, BranchType branch_type, Register reg, int bit, | 999 LInstruction* instr, const char* reason, BranchType branch_type, |
| 1001 Deoptimizer::BailoutType* override_bailout_type) { | 1000 Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { |
| 1002 LEnvironment* environment = instr->environment(); | 1001 LEnvironment* environment = instr->environment(); |
| 1003 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 1002 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 1004 Deoptimizer::BailoutType bailout_type = | 1003 Deoptimizer::BailoutType bailout_type = |
| 1005 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | 1004 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
| 1006 | 1005 |
| 1007 if (override_bailout_type != NULL) { | 1006 if (override_bailout_type != NULL) { |
| 1008 bailout_type = *override_bailout_type; | 1007 bailout_type = *override_bailout_type; |
| 1009 } | 1008 } |
| 1010 | 1009 |
| 1011 DCHECK(environment->HasBeenRegistered()); | 1010 DCHECK(environment->HasBeenRegistered()); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1045 Label dont_trap; | 1044 Label dont_trap; |
| 1046 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); | 1045 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); |
| 1047 __ Debug("trap_on_deopt", __LINE__, BREAK); | 1046 __ Debug("trap_on_deopt", __LINE__, BREAK); |
| 1048 __ Bind(&dont_trap); | 1047 __ Bind(&dont_trap); |
| 1049 } | 1048 } |
| 1050 | 1049 |
| 1051 DCHECK(info()->IsStub() || frame_is_built_); | 1050 DCHECK(info()->IsStub() || frame_is_built_); |
| 1052 // Go through jump table if we need to build frame, or restore caller doubles. | 1051 // Go through jump table if we need to build frame, or restore caller doubles. |
| 1053 if (branch_type == always && | 1052 if (branch_type == always && |
| 1054 frame_is_built_ && !info()->saves_caller_doubles()) { | 1053 frame_is_built_ && !info()->saves_caller_doubles()) { |
| 1054 DeoptComment(instr->Mnemonic(), reason); |
| 1055 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 1055 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 1056 } else { | 1056 } else { |
| 1057 // We often have several deopts to the same entry, reuse the last | 1057 // We often have several deopts to the same entry, reuse the last |
| 1058 // jump entry if this is the case. | 1058 // jump entry if this is the case. |
| 1059 if (deopt_jump_table_.is_empty() || | 1059 if (jump_table_.is_empty() || (jump_table_.last()->address != entry) || |
| 1060 (deopt_jump_table_.last()->address != entry) || | 1060 (jump_table_.last()->bailout_type != bailout_type) || |
| 1061 (deopt_jump_table_.last()->bailout_type != bailout_type) || | 1061 (jump_table_.last()->needs_frame != !frame_is_built_)) { |
| 1062 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) { | |
| 1063 Deoptimizer::JumpTableEntry* table_entry = | 1062 Deoptimizer::JumpTableEntry* table_entry = |
| 1064 new(zone()) Deoptimizer::JumpTableEntry(entry, | 1063 new (zone()) Deoptimizer::JumpTableEntry( |
| 1065 bailout_type, | 1064 entry, instr->Mnemonic(), reason, bailout_type, !frame_is_built_); |
| 1066 !frame_is_built_); | 1065 jump_table_.Add(table_entry, zone()); |
| 1067 deopt_jump_table_.Add(table_entry, zone()); | |
| 1068 } | 1066 } |
| 1069 __ B(&deopt_jump_table_.last()->label, | 1067 __ B(&jump_table_.last()->label, branch_type, reg, bit); |
| 1070 branch_type, reg, bit); | |
| 1071 } | 1068 } |
| 1072 } | 1069 } |
| 1073 | 1070 |
| 1074 | 1071 |
| 1075 void LCodeGen::Deoptimize(LInstruction* instr, | 1072 void LCodeGen::Deoptimize(LInstruction* instr, |
| 1076 Deoptimizer::BailoutType* override_bailout_type) { | 1073 Deoptimizer::BailoutType* override_bailout_type, |
| 1077 DeoptimizeBranch(instr, always, NoReg, -1, override_bailout_type); | 1074 const char* reason) { |
| 1075 DeoptimizeBranch(instr, reason, always, NoReg, -1, override_bailout_type); |
| 1078 } | 1076 } |
| 1079 | 1077 |
| 1080 | 1078 |
| 1081 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr) { | 1079 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, |
| 1082 DeoptimizeBranch(instr, static_cast<BranchType>(cond)); | 1080 const char* reason) { |
| 1081 DeoptimizeBranch(instr, reason, static_cast<BranchType>(cond)); |
| 1083 } | 1082 } |
| 1084 | 1083 |
| 1085 | 1084 |
| 1086 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr) { | 1085 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, |
| 1087 DeoptimizeBranch(instr, reg_zero, rt); | 1086 const char* reason) { |
| 1087 DeoptimizeBranch(instr, reason, reg_zero, rt); |
| 1088 } | 1088 } |
| 1089 | 1089 |
| 1090 | 1090 |
| 1091 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr) { | 1091 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, |
| 1092 DeoptimizeBranch(instr, reg_not_zero, rt); | 1092 const char* reason) { |
| 1093 DeoptimizeBranch(instr, reason, reg_not_zero, rt); |
| 1093 } | 1094 } |
| 1094 | 1095 |
| 1095 | 1096 |
| 1096 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr) { | 1097 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, |
| 1098 const char* reason) { |
| 1097 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; | 1099 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; |
| 1098 DeoptimizeIfBitSet(rt, sign_bit, instr); | 1100 DeoptimizeIfBitSet(rt, sign_bit, instr, reason); |
| 1099 } | 1101 } |
| 1100 | 1102 |
| 1101 | 1103 |
| 1102 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr) { | 1104 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, |
| 1103 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr); | 1105 const char* reason) { |
| 1106 DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, reason); |
| 1104 } | 1107 } |
| 1105 | 1108 |
| 1106 | 1109 |
| 1107 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr) { | 1110 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, |
| 1108 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr); | 1111 const char* reason) { |
| 1112 DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, reason); |
| 1109 } | 1113 } |
| 1110 | 1114 |
| 1111 | 1115 |
| 1112 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, | 1116 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, |
| 1113 LInstruction* instr) { | 1117 LInstruction* instr, const char* reason) { |
| 1114 __ CompareRoot(rt, index); | 1118 __ CompareRoot(rt, index); |
| 1115 DeoptimizeIf(eq, instr); | 1119 DeoptimizeIf(eq, instr, reason); |
| 1116 } | 1120 } |
| 1117 | 1121 |
| 1118 | 1122 |
| 1119 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, | 1123 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, |
| 1120 LInstruction* instr) { | 1124 LInstruction* instr, const char* reason) { |
| 1121 __ CompareRoot(rt, index); | 1125 __ CompareRoot(rt, index); |
| 1122 DeoptimizeIf(ne, instr); | 1126 DeoptimizeIf(ne, instr, reason); |
| 1123 } | 1127 } |
| 1124 | 1128 |
| 1125 | 1129 |
| 1126 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, | 1130 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, |
| 1127 LInstruction* instr) { | 1131 const char* reason) { |
| 1128 __ TestForMinusZero(input); | 1132 __ TestForMinusZero(input); |
| 1129 DeoptimizeIf(vs, instr); | 1133 DeoptimizeIf(vs, instr, reason); |
| 1130 } | 1134 } |
| 1131 | 1135 |
| 1132 | 1136 |
| 1133 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr) { | 1137 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, |
| 1134 DeoptimizeBranch(instr, reg_bit_set, rt, bit); | 1138 const char* reason) { |
| 1139 DeoptimizeBranch(instr, reason, reg_bit_set, rt, bit); |
| 1135 } | 1140 } |
| 1136 | 1141 |
| 1137 | 1142 |
| 1138 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr) { | 1143 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, |
| 1139 DeoptimizeBranch(instr, reg_bit_clear, rt, bit); | 1144 const char* reason) { |
| 1145 DeoptimizeBranch(instr, reason, reg_bit_clear, rt, bit); |
| 1140 } | 1146 } |
| 1141 | 1147 |
| 1142 | 1148 |
| 1143 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | 1149 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
| 1144 if (!info()->IsStub()) { | 1150 if (!info()->IsStub()) { |
| 1145 // Ensure that we have enough space after the previous lazy-bailout | 1151 // Ensure that we have enough space after the previous lazy-bailout |
| 1146 // instruction for patching the code here. | 1152 // instruction for patching the code here. |
| 1147 intptr_t current_pc = masm()->pc_offset(); | 1153 intptr_t current_pc = masm()->pc_offset(); |
| 1148 | 1154 |
| 1149 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { | 1155 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { |
| (...skipping 1528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2678 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 2684 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 2679 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 2685 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 2680 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 2686 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 2681 // needed return address), even though the implementation of LAZY and EAGER is | 2687 // needed return address), even though the implementation of LAZY and EAGER is |
| 2682 // now identical. When LAZY is eventually completely folded into EAGER, remove | 2688 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 2683 // the special case below. | 2689 // the special case below. |
| 2684 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { | 2690 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { |
| 2685 type = Deoptimizer::LAZY; | 2691 type = Deoptimizer::LAZY; |
| 2686 } | 2692 } |
| 2687 | 2693 |
| 2688 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); | 2694 Deoptimize(instr, &type, instr->hydrogen()->reason()); |
| 2689 Deoptimize(instr, &type); | |
| 2690 } | 2695 } |
| 2691 | 2696 |
| 2692 | 2697 |
| 2693 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 2698 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 2694 Register dividend = ToRegister32(instr->dividend()); | 2699 Register dividend = ToRegister32(instr->dividend()); |
| 2695 int32_t divisor = instr->divisor(); | 2700 int32_t divisor = instr->divisor(); |
| 2696 Register result = ToRegister32(instr->result()); | 2701 Register result = ToRegister32(instr->result()); |
| 2697 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 2702 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 2698 DCHECK(!result.is(dividend)); | 2703 DCHECK(!result.is(dividend)); |
| 2699 | 2704 |
| (...skipping 3335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6035 Handle<ScopeInfo> scope_info = instr->scope_info(); | 6040 Handle<ScopeInfo> scope_info = instr->scope_info(); |
| 6036 __ Push(scope_info); | 6041 __ Push(scope_info); |
| 6037 __ Push(ToRegister(instr->function())); | 6042 __ Push(ToRegister(instr->function())); |
| 6038 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 6043 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 6039 RecordSafepoint(Safepoint::kNoLazyDeopt); | 6044 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 6040 } | 6045 } |
| 6041 | 6046 |
| 6042 | 6047 |
| 6043 | 6048 |
| 6044 } } // namespace v8::internal | 6049 } } // namespace v8::internal |
| OLD | NEW |