OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_X64 | 7 #if V8_TARGET_ARCH_X64 |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 705 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
716 int pc_offset = masm()->pc_offset(); | 716 int pc_offset = masm()->pc_offset(); |
717 environment->Register(deoptimization_index, | 717 environment->Register(deoptimization_index, |
718 translation.index(), | 718 translation.index(), |
719 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 719 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
720 deoptimizations_.Add(environment, environment->zone()); | 720 deoptimizations_.Add(environment, environment->zone()); |
721 } | 721 } |
722 } | 722 } |
723 | 723 |
724 | 724 |
725 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 725 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
726 const char* detail, | 726 Deoptimizer::DeoptReason deopt_reason, |
727 Deoptimizer::BailoutType bailout_type) { | 727 Deoptimizer::BailoutType bailout_type) { |
728 LEnvironment* environment = instr->environment(); | 728 LEnvironment* environment = instr->environment(); |
729 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 729 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
730 DCHECK(environment->HasBeenRegistered()); | 730 DCHECK(environment->HasBeenRegistered()); |
731 int id = environment->deoptimization_index(); | 731 int id = environment->deoptimization_index(); |
732 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 732 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
733 Address entry = | 733 Address entry = |
734 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 734 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
735 if (entry == NULL) { | 735 if (entry == NULL) { |
736 Abort(kBailoutWasNotPrepared); | 736 Abort(kBailoutWasNotPrepared); |
(...skipping 25 matching lines...) Expand all Loading... |
762 if (info()->ShouldTrapOnDeopt()) { | 762 if (info()->ShouldTrapOnDeopt()) { |
763 Label done; | 763 Label done; |
764 if (cc != no_condition) { | 764 if (cc != no_condition) { |
765 __ j(NegateCondition(cc), &done, Label::kNear); | 765 __ j(NegateCondition(cc), &done, Label::kNear); |
766 } | 766 } |
767 __ int3(); | 767 __ int3(); |
768 __ bind(&done); | 768 __ bind(&done); |
769 } | 769 } |
770 | 770 |
771 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 771 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
772 instr->Mnemonic(), detail); | 772 instr->Mnemonic(), deopt_reason); |
773 DCHECK(info()->IsStub() || frame_is_built_); | 773 DCHECK(info()->IsStub() || frame_is_built_); |
774 // Go through jump table if we need to handle condition, build frame, or | 774 // Go through jump table if we need to handle condition, build frame, or |
775 // restore caller doubles. | 775 // restore caller doubles. |
776 if (cc == no_condition && frame_is_built_ && | 776 if (cc == no_condition && frame_is_built_ && |
777 !info()->saves_caller_doubles()) { | 777 !info()->saves_caller_doubles()) { |
778 DeoptComment(reason); | 778 DeoptComment(reason); |
779 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 779 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
780 } else { | 780 } else { |
781 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 781 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
782 !frame_is_built_); | 782 !frame_is_built_); |
783 // We often have several deopts to the same entry, reuse the last | 783 // We often have several deopts to the same entry, reuse the last |
784 // jump entry if this is the case. | 784 // jump entry if this is the case. |
785 if (jump_table_.is_empty() || | 785 if (jump_table_.is_empty() || |
786 !table_entry.IsEquivalentTo(jump_table_.last())) { | 786 !table_entry.IsEquivalentTo(jump_table_.last())) { |
787 jump_table_.Add(table_entry, zone()); | 787 jump_table_.Add(table_entry, zone()); |
788 } | 788 } |
789 if (cc == no_condition) { | 789 if (cc == no_condition) { |
790 __ jmp(&jump_table_.last().label); | 790 __ jmp(&jump_table_.last().label); |
791 } else { | 791 } else { |
792 __ j(cc, &jump_table_.last().label); | 792 __ j(cc, &jump_table_.last().label); |
793 } | 793 } |
794 } | 794 } |
795 } | 795 } |
796 | 796 |
797 | 797 |
798 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 798 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
799 const char* detail) { | 799 Deoptimizer::DeoptReason deopt_reason) { |
800 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 800 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
801 ? Deoptimizer::LAZY | 801 ? Deoptimizer::LAZY |
802 : Deoptimizer::EAGER; | 802 : Deoptimizer::EAGER; |
803 DeoptimizeIf(cc, instr, detail, bailout_type); | 803 DeoptimizeIf(cc, instr, deopt_reason, bailout_type); |
804 } | 804 } |
805 | 805 |
806 | 806 |
807 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 807 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
808 int length = deoptimizations_.length(); | 808 int length = deoptimizations_.length(); |
809 if (length == 0) return; | 809 if (length == 0) return; |
810 Handle<DeoptimizationInputData> data = | 810 Handle<DeoptimizationInputData> data = |
811 DeoptimizationInputData::New(isolate(), length, TENURED); | 811 DeoptimizationInputData::New(isolate(), length, TENURED); |
812 | 812 |
813 Handle<ByteArray> translations = | 813 Handle<ByteArray> translations = |
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1025 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1025 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1026 Label dividend_is_not_negative, done; | 1026 Label dividend_is_not_negative, done; |
1027 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1027 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
1028 __ testl(dividend, dividend); | 1028 __ testl(dividend, dividend); |
1029 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); | 1029 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); |
1030 // Note that this is correct even for kMinInt operands. | 1030 // Note that this is correct even for kMinInt operands. |
1031 __ negl(dividend); | 1031 __ negl(dividend); |
1032 __ andl(dividend, Immediate(mask)); | 1032 __ andl(dividend, Immediate(mask)); |
1033 __ negl(dividend); | 1033 __ negl(dividend); |
1034 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1034 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1035 DeoptimizeIf(zero, instr, "minus zero"); | 1035 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1036 } | 1036 } |
1037 __ jmp(&done, Label::kNear); | 1037 __ jmp(&done, Label::kNear); |
1038 } | 1038 } |
1039 | 1039 |
1040 __ bind(÷nd_is_not_negative); | 1040 __ bind(÷nd_is_not_negative); |
1041 __ andl(dividend, Immediate(mask)); | 1041 __ andl(dividend, Immediate(mask)); |
1042 __ bind(&done); | 1042 __ bind(&done); |
1043 } | 1043 } |
1044 | 1044 |
1045 | 1045 |
1046 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1046 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
1047 Register dividend = ToRegister(instr->dividend()); | 1047 Register dividend = ToRegister(instr->dividend()); |
1048 int32_t divisor = instr->divisor(); | 1048 int32_t divisor = instr->divisor(); |
1049 DCHECK(ToRegister(instr->result()).is(rax)); | 1049 DCHECK(ToRegister(instr->result()).is(rax)); |
1050 | 1050 |
1051 if (divisor == 0) { | 1051 if (divisor == 0) { |
1052 DeoptimizeIf(no_condition, instr, "division by zero"); | 1052 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
1053 return; | 1053 return; |
1054 } | 1054 } |
1055 | 1055 |
1056 __ TruncatingDiv(dividend, Abs(divisor)); | 1056 __ TruncatingDiv(dividend, Abs(divisor)); |
1057 __ imull(rdx, rdx, Immediate(Abs(divisor))); | 1057 __ imull(rdx, rdx, Immediate(Abs(divisor))); |
1058 __ movl(rax, dividend); | 1058 __ movl(rax, dividend); |
1059 __ subl(rax, rdx); | 1059 __ subl(rax, rdx); |
1060 | 1060 |
1061 // Check for negative zero. | 1061 // Check for negative zero. |
1062 HMod* hmod = instr->hydrogen(); | 1062 HMod* hmod = instr->hydrogen(); |
1063 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1063 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1064 Label remainder_not_zero; | 1064 Label remainder_not_zero; |
1065 __ j(not_zero, &remainder_not_zero, Label::kNear); | 1065 __ j(not_zero, &remainder_not_zero, Label::kNear); |
1066 __ cmpl(dividend, Immediate(0)); | 1066 __ cmpl(dividend, Immediate(0)); |
1067 DeoptimizeIf(less, instr, "minus zero"); | 1067 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
1068 __ bind(&remainder_not_zero); | 1068 __ bind(&remainder_not_zero); |
1069 } | 1069 } |
1070 } | 1070 } |
1071 | 1071 |
1072 | 1072 |
1073 void LCodeGen::DoModI(LModI* instr) { | 1073 void LCodeGen::DoModI(LModI* instr) { |
1074 HMod* hmod = instr->hydrogen(); | 1074 HMod* hmod = instr->hydrogen(); |
1075 | 1075 |
1076 Register left_reg = ToRegister(instr->left()); | 1076 Register left_reg = ToRegister(instr->left()); |
1077 DCHECK(left_reg.is(rax)); | 1077 DCHECK(left_reg.is(rax)); |
1078 Register right_reg = ToRegister(instr->right()); | 1078 Register right_reg = ToRegister(instr->right()); |
1079 DCHECK(!right_reg.is(rax)); | 1079 DCHECK(!right_reg.is(rax)); |
1080 DCHECK(!right_reg.is(rdx)); | 1080 DCHECK(!right_reg.is(rdx)); |
1081 Register result_reg = ToRegister(instr->result()); | 1081 Register result_reg = ToRegister(instr->result()); |
1082 DCHECK(result_reg.is(rdx)); | 1082 DCHECK(result_reg.is(rdx)); |
1083 | 1083 |
1084 Label done; | 1084 Label done; |
1085 // Check for x % 0, idiv would signal a divide error. We have to | 1085 // Check for x % 0, idiv would signal a divide error. We have to |
1086 // deopt in this case because we can't return a NaN. | 1086 // deopt in this case because we can't return a NaN. |
1087 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1087 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
1088 __ testl(right_reg, right_reg); | 1088 __ testl(right_reg, right_reg); |
1089 DeoptimizeIf(zero, instr, "division by zero"); | 1089 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
1090 } | 1090 } |
1091 | 1091 |
1092 // Check for kMinInt % -1, idiv would signal a divide error. We | 1092 // Check for kMinInt % -1, idiv would signal a divide error. We |
1093 // have to deopt if we care about -0, because we can't return that. | 1093 // have to deopt if we care about -0, because we can't return that. |
1094 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1094 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
1095 Label no_overflow_possible; | 1095 Label no_overflow_possible; |
1096 __ cmpl(left_reg, Immediate(kMinInt)); | 1096 __ cmpl(left_reg, Immediate(kMinInt)); |
1097 __ j(not_zero, &no_overflow_possible, Label::kNear); | 1097 __ j(not_zero, &no_overflow_possible, Label::kNear); |
1098 __ cmpl(right_reg, Immediate(-1)); | 1098 __ cmpl(right_reg, Immediate(-1)); |
1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1100 DeoptimizeIf(equal, instr, "minus zero"); | 1100 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero); |
1101 } else { | 1101 } else { |
1102 __ j(not_equal, &no_overflow_possible, Label::kNear); | 1102 __ j(not_equal, &no_overflow_possible, Label::kNear); |
1103 __ Set(result_reg, 0); | 1103 __ Set(result_reg, 0); |
1104 __ jmp(&done, Label::kNear); | 1104 __ jmp(&done, Label::kNear); |
1105 } | 1105 } |
1106 __ bind(&no_overflow_possible); | 1106 __ bind(&no_overflow_possible); |
1107 } | 1107 } |
1108 | 1108 |
1109 // Sign extend dividend in eax into edx:eax, since we are using only the low | 1109 // Sign extend dividend in eax into edx:eax, since we are using only the low |
1110 // 32 bits of the values. | 1110 // 32 bits of the values. |
1111 __ cdq(); | 1111 __ cdq(); |
1112 | 1112 |
1113 // If we care about -0, test if the dividend is <0 and the result is 0. | 1113 // If we care about -0, test if the dividend is <0 and the result is 0. |
1114 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1114 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1115 Label positive_left; | 1115 Label positive_left; |
1116 __ testl(left_reg, left_reg); | 1116 __ testl(left_reg, left_reg); |
1117 __ j(not_sign, &positive_left, Label::kNear); | 1117 __ j(not_sign, &positive_left, Label::kNear); |
1118 __ idivl(right_reg); | 1118 __ idivl(right_reg); |
1119 __ testl(result_reg, result_reg); | 1119 __ testl(result_reg, result_reg); |
1120 DeoptimizeIf(zero, instr, "minus zero"); | 1120 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1121 __ jmp(&done, Label::kNear); | 1121 __ jmp(&done, Label::kNear); |
1122 __ bind(&positive_left); | 1122 __ bind(&positive_left); |
1123 } | 1123 } |
1124 __ idivl(right_reg); | 1124 __ idivl(right_reg); |
1125 __ bind(&done); | 1125 __ bind(&done); |
1126 } | 1126 } |
1127 | 1127 |
1128 | 1128 |
1129 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1129 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
1130 Register dividend = ToRegister(instr->dividend()); | 1130 Register dividend = ToRegister(instr->dividend()); |
1131 int32_t divisor = instr->divisor(); | 1131 int32_t divisor = instr->divisor(); |
1132 DCHECK(dividend.is(ToRegister(instr->result()))); | 1132 DCHECK(dividend.is(ToRegister(instr->result()))); |
1133 | 1133 |
1134 // If the divisor is positive, things are easy: There can be no deopts and we | 1134 // If the divisor is positive, things are easy: There can be no deopts and we |
1135 // can simply do an arithmetic right shift. | 1135 // can simply do an arithmetic right shift. |
1136 if (divisor == 1) return; | 1136 if (divisor == 1) return; |
1137 int32_t shift = WhichPowerOf2Abs(divisor); | 1137 int32_t shift = WhichPowerOf2Abs(divisor); |
1138 if (divisor > 1) { | 1138 if (divisor > 1) { |
1139 __ sarl(dividend, Immediate(shift)); | 1139 __ sarl(dividend, Immediate(shift)); |
1140 return; | 1140 return; |
1141 } | 1141 } |
1142 | 1142 |
1143 // If the divisor is negative, we have to negate and handle edge cases. | 1143 // If the divisor is negative, we have to negate and handle edge cases. |
1144 __ negl(dividend); | 1144 __ negl(dividend); |
1145 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1145 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1146 DeoptimizeIf(zero, instr, "minus zero"); | 1146 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1147 } | 1147 } |
1148 | 1148 |
1149 // Dividing by -1 is basically negation, unless we overflow. | 1149 // Dividing by -1 is basically negation, unless we overflow. |
1150 if (divisor == -1) { | 1150 if (divisor == -1) { |
1151 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1151 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1152 DeoptimizeIf(overflow, instr, "overflow"); | 1152 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
1153 } | 1153 } |
1154 return; | 1154 return; |
1155 } | 1155 } |
1156 | 1156 |
1157 // If the negation could not overflow, simply shifting is OK. | 1157 // If the negation could not overflow, simply shifting is OK. |
1158 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1158 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1159 __ sarl(dividend, Immediate(shift)); | 1159 __ sarl(dividend, Immediate(shift)); |
1160 return; | 1160 return; |
1161 } | 1161 } |
1162 | 1162 |
1163 Label not_kmin_int, done; | 1163 Label not_kmin_int, done; |
1164 __ j(no_overflow, ¬_kmin_int, Label::kNear); | 1164 __ j(no_overflow, ¬_kmin_int, Label::kNear); |
1165 __ movl(dividend, Immediate(kMinInt / divisor)); | 1165 __ movl(dividend, Immediate(kMinInt / divisor)); |
1166 __ jmp(&done, Label::kNear); | 1166 __ jmp(&done, Label::kNear); |
1167 __ bind(¬_kmin_int); | 1167 __ bind(¬_kmin_int); |
1168 __ sarl(dividend, Immediate(shift)); | 1168 __ sarl(dividend, Immediate(shift)); |
1169 __ bind(&done); | 1169 __ bind(&done); |
1170 } | 1170 } |
1171 | 1171 |
1172 | 1172 |
1173 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1173 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
1174 Register dividend = ToRegister(instr->dividend()); | 1174 Register dividend = ToRegister(instr->dividend()); |
1175 int32_t divisor = instr->divisor(); | 1175 int32_t divisor = instr->divisor(); |
1176 DCHECK(ToRegister(instr->result()).is(rdx)); | 1176 DCHECK(ToRegister(instr->result()).is(rdx)); |
1177 | 1177 |
1178 if (divisor == 0) { | 1178 if (divisor == 0) { |
1179 DeoptimizeIf(no_condition, instr, "division by zero"); | 1179 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
1180 return; | 1180 return; |
1181 } | 1181 } |
1182 | 1182 |
1183 // Check for (0 / -x) that will produce negative zero. | 1183 // Check for (0 / -x) that will produce negative zero. |
1184 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1184 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
1185 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1185 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1186 __ testl(dividend, dividend); | 1186 __ testl(dividend, dividend); |
1187 DeoptimizeIf(zero, instr, "minus zero"); | 1187 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1188 } | 1188 } |
1189 | 1189 |
1190 // Easy case: We need no dynamic check for the dividend and the flooring | 1190 // Easy case: We need no dynamic check for the dividend and the flooring |
1191 // division is the same as the truncating division. | 1191 // division is the same as the truncating division. |
1192 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1192 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
1193 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1193 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
1194 __ TruncatingDiv(dividend, Abs(divisor)); | 1194 __ TruncatingDiv(dividend, Abs(divisor)); |
1195 if (divisor < 0) __ negl(rdx); | 1195 if (divisor < 0) __ negl(rdx); |
1196 return; | 1196 return; |
1197 } | 1197 } |
(...skipping 26 matching lines...) Expand all Loading... |
1224 Register result = ToRegister(instr->result()); | 1224 Register result = ToRegister(instr->result()); |
1225 DCHECK(dividend.is(rax)); | 1225 DCHECK(dividend.is(rax)); |
1226 DCHECK(remainder.is(rdx)); | 1226 DCHECK(remainder.is(rdx)); |
1227 DCHECK(result.is(rax)); | 1227 DCHECK(result.is(rax)); |
1228 DCHECK(!divisor.is(rax)); | 1228 DCHECK(!divisor.is(rax)); |
1229 DCHECK(!divisor.is(rdx)); | 1229 DCHECK(!divisor.is(rdx)); |
1230 | 1230 |
1231 // Check for x / 0. | 1231 // Check for x / 0. |
1232 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1232 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1233 __ testl(divisor, divisor); | 1233 __ testl(divisor, divisor); |
1234 DeoptimizeIf(zero, instr, "division by zero"); | 1234 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
1235 } | 1235 } |
1236 | 1236 |
1237 // Check for (0 / -x) that will produce negative zero. | 1237 // Check for (0 / -x) that will produce negative zero. |
1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1239 Label dividend_not_zero; | 1239 Label dividend_not_zero; |
1240 __ testl(dividend, dividend); | 1240 __ testl(dividend, dividend); |
1241 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1241 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
1242 __ testl(divisor, divisor); | 1242 __ testl(divisor, divisor); |
1243 DeoptimizeIf(sign, instr, "minus zero"); | 1243 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
1244 __ bind(÷nd_not_zero); | 1244 __ bind(÷nd_not_zero); |
1245 } | 1245 } |
1246 | 1246 |
1247 // Check for (kMinInt / -1). | 1247 // Check for (kMinInt / -1). |
1248 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1248 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1249 Label dividend_not_min_int; | 1249 Label dividend_not_min_int; |
1250 __ cmpl(dividend, Immediate(kMinInt)); | 1250 __ cmpl(dividend, Immediate(kMinInt)); |
1251 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1251 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
1252 __ cmpl(divisor, Immediate(-1)); | 1252 __ cmpl(divisor, Immediate(-1)); |
1253 DeoptimizeIf(zero, instr, "overflow"); | 1253 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
1254 __ bind(÷nd_not_min_int); | 1254 __ bind(÷nd_not_min_int); |
1255 } | 1255 } |
1256 | 1256 |
1257 // Sign extend to rdx (= remainder). | 1257 // Sign extend to rdx (= remainder). |
1258 __ cdq(); | 1258 __ cdq(); |
1259 __ idivl(divisor); | 1259 __ idivl(divisor); |
1260 | 1260 |
1261 Label done; | 1261 Label done; |
1262 __ testl(remainder, remainder); | 1262 __ testl(remainder, remainder); |
1263 __ j(zero, &done, Label::kNear); | 1263 __ j(zero, &done, Label::kNear); |
1264 __ xorl(remainder, divisor); | 1264 __ xorl(remainder, divisor); |
1265 __ sarl(remainder, Immediate(31)); | 1265 __ sarl(remainder, Immediate(31)); |
1266 __ addl(result, remainder); | 1266 __ addl(result, remainder); |
1267 __ bind(&done); | 1267 __ bind(&done); |
1268 } | 1268 } |
1269 | 1269 |
1270 | 1270 |
1271 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1271 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
1272 Register dividend = ToRegister(instr->dividend()); | 1272 Register dividend = ToRegister(instr->dividend()); |
1273 int32_t divisor = instr->divisor(); | 1273 int32_t divisor = instr->divisor(); |
1274 Register result = ToRegister(instr->result()); | 1274 Register result = ToRegister(instr->result()); |
1275 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1275 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
1276 DCHECK(!result.is(dividend)); | 1276 DCHECK(!result.is(dividend)); |
1277 | 1277 |
1278 // Check for (0 / -x) that will produce negative zero. | 1278 // Check for (0 / -x) that will produce negative zero. |
1279 HDiv* hdiv = instr->hydrogen(); | 1279 HDiv* hdiv = instr->hydrogen(); |
1280 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1280 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1281 __ testl(dividend, dividend); | 1281 __ testl(dividend, dividend); |
1282 DeoptimizeIf(zero, instr, "minus zero"); | 1282 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1283 } | 1283 } |
1284 // Check for (kMinInt / -1). | 1284 // Check for (kMinInt / -1). |
1285 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1285 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
1286 __ cmpl(dividend, Immediate(kMinInt)); | 1286 __ cmpl(dividend, Immediate(kMinInt)); |
1287 DeoptimizeIf(zero, instr, "overflow"); | 1287 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
1288 } | 1288 } |
1289 // Deoptimize if remainder will not be 0. | 1289 // Deoptimize if remainder will not be 0. |
1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
1291 divisor != 1 && divisor != -1) { | 1291 divisor != 1 && divisor != -1) { |
1292 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1292 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1293 __ testl(dividend, Immediate(mask)); | 1293 __ testl(dividend, Immediate(mask)); |
1294 DeoptimizeIf(not_zero, instr, "lost precision"); | 1294 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
1295 } | 1295 } |
1296 __ Move(result, dividend); | 1296 __ Move(result, dividend); |
1297 int32_t shift = WhichPowerOf2Abs(divisor); | 1297 int32_t shift = WhichPowerOf2Abs(divisor); |
1298 if (shift > 0) { | 1298 if (shift > 0) { |
1299 // The arithmetic shift is always OK, the 'if' is an optimization only. | 1299 // The arithmetic shift is always OK, the 'if' is an optimization only. |
1300 if (shift > 1) __ sarl(result, Immediate(31)); | 1300 if (shift > 1) __ sarl(result, Immediate(31)); |
1301 __ shrl(result, Immediate(32 - shift)); | 1301 __ shrl(result, Immediate(32 - shift)); |
1302 __ addl(result, dividend); | 1302 __ addl(result, dividend); |
1303 __ sarl(result, Immediate(shift)); | 1303 __ sarl(result, Immediate(shift)); |
1304 } | 1304 } |
1305 if (divisor < 0) __ negl(result); | 1305 if (divisor < 0) __ negl(result); |
1306 } | 1306 } |
1307 | 1307 |
1308 | 1308 |
1309 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1309 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
1310 Register dividend = ToRegister(instr->dividend()); | 1310 Register dividend = ToRegister(instr->dividend()); |
1311 int32_t divisor = instr->divisor(); | 1311 int32_t divisor = instr->divisor(); |
1312 DCHECK(ToRegister(instr->result()).is(rdx)); | 1312 DCHECK(ToRegister(instr->result()).is(rdx)); |
1313 | 1313 |
1314 if (divisor == 0) { | 1314 if (divisor == 0) { |
1315 DeoptimizeIf(no_condition, instr, "division by zero"); | 1315 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
1316 return; | 1316 return; |
1317 } | 1317 } |
1318 | 1318 |
1319 // Check for (0 / -x) that will produce negative zero. | 1319 // Check for (0 / -x) that will produce negative zero. |
1320 HDiv* hdiv = instr->hydrogen(); | 1320 HDiv* hdiv = instr->hydrogen(); |
1321 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1321 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1322 __ testl(dividend, dividend); | 1322 __ testl(dividend, dividend); |
1323 DeoptimizeIf(zero, instr, "minus zero"); | 1323 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
1324 } | 1324 } |
1325 | 1325 |
1326 __ TruncatingDiv(dividend, Abs(divisor)); | 1326 __ TruncatingDiv(dividend, Abs(divisor)); |
1327 if (divisor < 0) __ negl(rdx); | 1327 if (divisor < 0) __ negl(rdx); |
1328 | 1328 |
1329 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1329 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
1330 __ movl(rax, rdx); | 1330 __ movl(rax, rdx); |
1331 __ imull(rax, rax, Immediate(divisor)); | 1331 __ imull(rax, rax, Immediate(divisor)); |
1332 __ subl(rax, dividend); | 1332 __ subl(rax, dividend); |
1333 DeoptimizeIf(not_equal, instr, "lost precision"); | 1333 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
1334 } | 1334 } |
1335 } | 1335 } |
1336 | 1336 |
1337 | 1337 |
1338 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1338 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
1339 void LCodeGen::DoDivI(LDivI* instr) { | 1339 void LCodeGen::DoDivI(LDivI* instr) { |
1340 HBinaryOperation* hdiv = instr->hydrogen(); | 1340 HBinaryOperation* hdiv = instr->hydrogen(); |
1341 Register dividend = ToRegister(instr->dividend()); | 1341 Register dividend = ToRegister(instr->dividend()); |
1342 Register divisor = ToRegister(instr->divisor()); | 1342 Register divisor = ToRegister(instr->divisor()); |
1343 Register remainder = ToRegister(instr->temp()); | 1343 Register remainder = ToRegister(instr->temp()); |
1344 DCHECK(dividend.is(rax)); | 1344 DCHECK(dividend.is(rax)); |
1345 DCHECK(remainder.is(rdx)); | 1345 DCHECK(remainder.is(rdx)); |
1346 DCHECK(ToRegister(instr->result()).is(rax)); | 1346 DCHECK(ToRegister(instr->result()).is(rax)); |
1347 DCHECK(!divisor.is(rax)); | 1347 DCHECK(!divisor.is(rax)); |
1348 DCHECK(!divisor.is(rdx)); | 1348 DCHECK(!divisor.is(rdx)); |
1349 | 1349 |
1350 // Check for x / 0. | 1350 // Check for x / 0. |
1351 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1351 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1352 __ testl(divisor, divisor); | 1352 __ testl(divisor, divisor); |
1353 DeoptimizeIf(zero, instr, "division by zero"); | 1353 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
1354 } | 1354 } |
1355 | 1355 |
1356 // Check for (0 / -x) that will produce negative zero. | 1356 // Check for (0 / -x) that will produce negative zero. |
1357 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1357 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1358 Label dividend_not_zero; | 1358 Label dividend_not_zero; |
1359 __ testl(dividend, dividend); | 1359 __ testl(dividend, dividend); |
1360 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1360 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
1361 __ testl(divisor, divisor); | 1361 __ testl(divisor, divisor); |
1362 DeoptimizeIf(sign, instr, "minus zero"); | 1362 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
1363 __ bind(÷nd_not_zero); | 1363 __ bind(÷nd_not_zero); |
1364 } | 1364 } |
1365 | 1365 |
1366 // Check for (kMinInt / -1). | 1366 // Check for (kMinInt / -1). |
1367 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1367 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
1368 Label dividend_not_min_int; | 1368 Label dividend_not_min_int; |
1369 __ cmpl(dividend, Immediate(kMinInt)); | 1369 __ cmpl(dividend, Immediate(kMinInt)); |
1370 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1370 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
1371 __ cmpl(divisor, Immediate(-1)); | 1371 __ cmpl(divisor, Immediate(-1)); |
1372 DeoptimizeIf(zero, instr, "overflow"); | 1372 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
1373 __ bind(÷nd_not_min_int); | 1373 __ bind(÷nd_not_min_int); |
1374 } | 1374 } |
1375 | 1375 |
1376 // Sign extend to rdx (= remainder). | 1376 // Sign extend to rdx (= remainder). |
1377 __ cdq(); | 1377 __ cdq(); |
1378 __ idivl(divisor); | 1378 __ idivl(divisor); |
1379 | 1379 |
1380 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1380 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
1381 // Deoptimize if remainder is not 0. | 1381 // Deoptimize if remainder is not 0. |
1382 __ testl(remainder, remainder); | 1382 __ testl(remainder, remainder); |
1383 DeoptimizeIf(not_zero, instr, "lost precision"); | 1383 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
1384 } | 1384 } |
1385 } | 1385 } |
1386 | 1386 |
1387 | 1387 |
1388 void LCodeGen::DoMulI(LMulI* instr) { | 1388 void LCodeGen::DoMulI(LMulI* instr) { |
1389 Register left = ToRegister(instr->left()); | 1389 Register left = ToRegister(instr->left()); |
1390 LOperand* right = instr->right(); | 1390 LOperand* right = instr->right(); |
1391 | 1391 |
1392 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1392 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1393 if (instr->hydrogen_value()->representation().IsSmi()) { | 1393 if (instr->hydrogen_value()->representation().IsSmi()) { |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1450 } else { | 1450 } else { |
1451 if (instr->hydrogen_value()->representation().IsSmi()) { | 1451 if (instr->hydrogen_value()->representation().IsSmi()) { |
1452 __ SmiToInteger64(left, left); | 1452 __ SmiToInteger64(left, left); |
1453 __ imulp(left, ToRegister(right)); | 1453 __ imulp(left, ToRegister(right)); |
1454 } else { | 1454 } else { |
1455 __ imull(left, ToRegister(right)); | 1455 __ imull(left, ToRegister(right)); |
1456 } | 1456 } |
1457 } | 1457 } |
1458 | 1458 |
1459 if (can_overflow) { | 1459 if (can_overflow) { |
1460 DeoptimizeIf(overflow, instr, "overflow"); | 1460 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
1461 } | 1461 } |
1462 | 1462 |
1463 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1463 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1464 // Bail out if the result is supposed to be negative zero. | 1464 // Bail out if the result is supposed to be negative zero. |
1465 Label done; | 1465 Label done; |
1466 if (instr->hydrogen_value()->representation().IsSmi()) { | 1466 if (instr->hydrogen_value()->representation().IsSmi()) { |
1467 __ testp(left, left); | 1467 __ testp(left, left); |
1468 } else { | 1468 } else { |
1469 __ testl(left, left); | 1469 __ testl(left, left); |
1470 } | 1470 } |
1471 __ j(not_zero, &done, Label::kNear); | 1471 __ j(not_zero, &done, Label::kNear); |
1472 if (right->IsConstantOperand()) { | 1472 if (right->IsConstantOperand()) { |
1473 // Constant can't be represented as 32-bit Smi due to immediate size | 1473 // Constant can't be represented as 32-bit Smi due to immediate size |
1474 // limit. | 1474 // limit. |
1475 DCHECK(SmiValuesAre32Bits() | 1475 DCHECK(SmiValuesAre32Bits() |
1476 ? !instr->hydrogen_value()->representation().IsSmi() | 1476 ? !instr->hydrogen_value()->representation().IsSmi() |
1477 : SmiValuesAre31Bits()); | 1477 : SmiValuesAre31Bits()); |
1478 if (ToInteger32(LConstantOperand::cast(right)) < 0) { | 1478 if (ToInteger32(LConstantOperand::cast(right)) < 0) { |
1479 DeoptimizeIf(no_condition, instr, "minus zero"); | 1479 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
1480 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { | 1480 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { |
1481 __ cmpl(kScratchRegister, Immediate(0)); | 1481 __ cmpl(kScratchRegister, Immediate(0)); |
1482 DeoptimizeIf(less, instr, "minus zero"); | 1482 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
1483 } | 1483 } |
1484 } else if (right->IsStackSlot()) { | 1484 } else if (right->IsStackSlot()) { |
1485 if (instr->hydrogen_value()->representation().IsSmi()) { | 1485 if (instr->hydrogen_value()->representation().IsSmi()) { |
1486 __ orp(kScratchRegister, ToOperand(right)); | 1486 __ orp(kScratchRegister, ToOperand(right)); |
1487 } else { | 1487 } else { |
1488 __ orl(kScratchRegister, ToOperand(right)); | 1488 __ orl(kScratchRegister, ToOperand(right)); |
1489 } | 1489 } |
1490 DeoptimizeIf(sign, instr, "minus zero"); | 1490 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
1491 } else { | 1491 } else { |
1492 // Test the non-zero operand for negative sign. | 1492 // Test the non-zero operand for negative sign. |
1493 if (instr->hydrogen_value()->representation().IsSmi()) { | 1493 if (instr->hydrogen_value()->representation().IsSmi()) { |
1494 __ orp(kScratchRegister, ToRegister(right)); | 1494 __ orp(kScratchRegister, ToRegister(right)); |
1495 } else { | 1495 } else { |
1496 __ orl(kScratchRegister, ToRegister(right)); | 1496 __ orl(kScratchRegister, ToRegister(right)); |
1497 } | 1497 } |
1498 DeoptimizeIf(sign, instr, "minus zero"); | 1498 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
1499 } | 1499 } |
1500 __ bind(&done); | 1500 __ bind(&done); |
1501 } | 1501 } |
1502 } | 1502 } |
1503 | 1503 |
1504 | 1504 |
1505 void LCodeGen::DoBitI(LBitI* instr) { | 1505 void LCodeGen::DoBitI(LBitI* instr) { |
1506 LOperand* left = instr->left(); | 1506 LOperand* left = instr->left(); |
1507 LOperand* right = instr->right(); | 1507 LOperand* right = instr->right(); |
1508 DCHECK(left->Equals(instr->result())); | 1508 DCHECK(left->Equals(instr->result())); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1601 case Token::ROR: | 1601 case Token::ROR: |
1602 __ rorl_cl(ToRegister(left)); | 1602 __ rorl_cl(ToRegister(left)); |
1603 break; | 1603 break; |
1604 case Token::SAR: | 1604 case Token::SAR: |
1605 __ sarl_cl(ToRegister(left)); | 1605 __ sarl_cl(ToRegister(left)); |
1606 break; | 1606 break; |
1607 case Token::SHR: | 1607 case Token::SHR: |
1608 __ shrl_cl(ToRegister(left)); | 1608 __ shrl_cl(ToRegister(left)); |
1609 if (instr->can_deopt()) { | 1609 if (instr->can_deopt()) { |
1610 __ testl(ToRegister(left), ToRegister(left)); | 1610 __ testl(ToRegister(left), ToRegister(left)); |
1611 DeoptimizeIf(negative, instr, "negative value"); | 1611 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
1612 } | 1612 } |
1613 break; | 1613 break; |
1614 case Token::SHL: | 1614 case Token::SHL: |
1615 __ shll_cl(ToRegister(left)); | 1615 __ shll_cl(ToRegister(left)); |
1616 break; | 1616 break; |
1617 default: | 1617 default: |
1618 UNREACHABLE(); | 1618 UNREACHABLE(); |
1619 break; | 1619 break; |
1620 } | 1620 } |
1621 } else { | 1621 } else { |
1622 int32_t value = ToInteger32(LConstantOperand::cast(right)); | 1622 int32_t value = ToInteger32(LConstantOperand::cast(right)); |
1623 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | 1623 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
1624 switch (instr->op()) { | 1624 switch (instr->op()) { |
1625 case Token::ROR: | 1625 case Token::ROR: |
1626 if (shift_count != 0) { | 1626 if (shift_count != 0) { |
1627 __ rorl(ToRegister(left), Immediate(shift_count)); | 1627 __ rorl(ToRegister(left), Immediate(shift_count)); |
1628 } | 1628 } |
1629 break; | 1629 break; |
1630 case Token::SAR: | 1630 case Token::SAR: |
1631 if (shift_count != 0) { | 1631 if (shift_count != 0) { |
1632 __ sarl(ToRegister(left), Immediate(shift_count)); | 1632 __ sarl(ToRegister(left), Immediate(shift_count)); |
1633 } | 1633 } |
1634 break; | 1634 break; |
1635 case Token::SHR: | 1635 case Token::SHR: |
1636 if (shift_count != 0) { | 1636 if (shift_count != 0) { |
1637 __ shrl(ToRegister(left), Immediate(shift_count)); | 1637 __ shrl(ToRegister(left), Immediate(shift_count)); |
1638 } else if (instr->can_deopt()) { | 1638 } else if (instr->can_deopt()) { |
1639 __ testl(ToRegister(left), ToRegister(left)); | 1639 __ testl(ToRegister(left), ToRegister(left)); |
1640 DeoptimizeIf(negative, instr, "negative value"); | 1640 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
1641 } | 1641 } |
1642 break; | 1642 break; |
1643 case Token::SHL: | 1643 case Token::SHL: |
1644 if (shift_count != 0) { | 1644 if (shift_count != 0) { |
1645 if (instr->hydrogen_value()->representation().IsSmi()) { | 1645 if (instr->hydrogen_value()->representation().IsSmi()) { |
1646 if (SmiValuesAre32Bits()) { | 1646 if (SmiValuesAre32Bits()) { |
1647 __ shlp(ToRegister(left), Immediate(shift_count)); | 1647 __ shlp(ToRegister(left), Immediate(shift_count)); |
1648 } else { | 1648 } else { |
1649 DCHECK(SmiValuesAre31Bits()); | 1649 DCHECK(SmiValuesAre31Bits()); |
1650 if (instr->can_deopt()) { | 1650 if (instr->can_deopt()) { |
1651 if (shift_count != 1) { | 1651 if (shift_count != 1) { |
1652 __ shll(ToRegister(left), Immediate(shift_count - 1)); | 1652 __ shll(ToRegister(left), Immediate(shift_count - 1)); |
1653 } | 1653 } |
1654 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); | 1654 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); |
1655 DeoptimizeIf(overflow, instr, "overflow"); | 1655 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
1656 } else { | 1656 } else { |
1657 __ shll(ToRegister(left), Immediate(shift_count)); | 1657 __ shll(ToRegister(left), Immediate(shift_count)); |
1658 } | 1658 } |
1659 } | 1659 } |
1660 } else { | 1660 } else { |
1661 __ shll(ToRegister(left), Immediate(shift_count)); | 1661 __ shll(ToRegister(left), Immediate(shift_count)); |
1662 } | 1662 } |
1663 } | 1663 } |
1664 break; | 1664 break; |
1665 default: | 1665 default: |
(...skipping 22 matching lines...) Expand all Loading... |
1688 } | 1688 } |
1689 } else { | 1689 } else { |
1690 if (instr->hydrogen_value()->representation().IsSmi()) { | 1690 if (instr->hydrogen_value()->representation().IsSmi()) { |
1691 __ subp(ToRegister(left), ToOperand(right)); | 1691 __ subp(ToRegister(left), ToOperand(right)); |
1692 } else { | 1692 } else { |
1693 __ subl(ToRegister(left), ToOperand(right)); | 1693 __ subl(ToRegister(left), ToOperand(right)); |
1694 } | 1694 } |
1695 } | 1695 } |
1696 | 1696 |
1697 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1697 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1698 DeoptimizeIf(overflow, instr, "overflow"); | 1698 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
1699 } | 1699 } |
1700 } | 1700 } |
1701 | 1701 |
1702 | 1702 |
1703 void LCodeGen::DoConstantI(LConstantI* instr) { | 1703 void LCodeGen::DoConstantI(LConstantI* instr) { |
1704 Register dst = ToRegister(instr->result()); | 1704 Register dst = ToRegister(instr->result()); |
1705 if (instr->value() == 0) { | 1705 if (instr->value() == 0) { |
1706 __ xorl(dst, dst); | 1706 __ xorl(dst, dst); |
1707 } else { | 1707 } else { |
1708 __ movl(dst, Immediate(instr->value())); | 1708 __ movl(dst, Immediate(instr->value())); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1741 | 1741 |
1742 void LCodeGen::DoDateField(LDateField* instr) { | 1742 void LCodeGen::DoDateField(LDateField* instr) { |
1743 Register object = ToRegister(instr->date()); | 1743 Register object = ToRegister(instr->date()); |
1744 Register result = ToRegister(instr->result()); | 1744 Register result = ToRegister(instr->result()); |
1745 Smi* index = instr->index(); | 1745 Smi* index = instr->index(); |
1746 Label runtime, done, not_date_object; | 1746 Label runtime, done, not_date_object; |
1747 DCHECK(object.is(result)); | 1747 DCHECK(object.is(result)); |
1748 DCHECK(object.is(rax)); | 1748 DCHECK(object.is(rax)); |
1749 | 1749 |
1750 Condition cc = masm()->CheckSmi(object); | 1750 Condition cc = masm()->CheckSmi(object); |
1751 DeoptimizeIf(cc, instr, "Smi"); | 1751 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
1752 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); | 1752 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); |
1753 DeoptimizeIf(not_equal, instr, "not a date object"); | 1753 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject); |
1754 | 1754 |
1755 if (index->value() == 0) { | 1755 if (index->value() == 0) { |
1756 __ movp(result, FieldOperand(object, JSDate::kValueOffset)); | 1756 __ movp(result, FieldOperand(object, JSDate::kValueOffset)); |
1757 } else { | 1757 } else { |
1758 if (index->value() < JSDate::kFirstUncachedField) { | 1758 if (index->value() < JSDate::kFirstUncachedField) { |
1759 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1759 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
1760 Operand stamp_operand = __ ExternalOperand(stamp); | 1760 Operand stamp_operand = __ ExternalOperand(stamp); |
1761 __ movp(kScratchRegister, stamp_operand); | 1761 __ movp(kScratchRegister, stamp_operand); |
1762 __ cmpp(kScratchRegister, FieldOperand(object, | 1762 __ cmpp(kScratchRegister, FieldOperand(object, |
1763 JSDate::kCacheStampOffset)); | 1763 JSDate::kCacheStampOffset)); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1907 __ addl(ToRegister(left), ToRegister(right)); | 1907 __ addl(ToRegister(left), ToRegister(right)); |
1908 } | 1908 } |
1909 } else { | 1909 } else { |
1910 if (is_p) { | 1910 if (is_p) { |
1911 __ addp(ToRegister(left), ToOperand(right)); | 1911 __ addp(ToRegister(left), ToOperand(right)); |
1912 } else { | 1912 } else { |
1913 __ addl(ToRegister(left), ToOperand(right)); | 1913 __ addl(ToRegister(left), ToOperand(right)); |
1914 } | 1914 } |
1915 } | 1915 } |
1916 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1916 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1917 DeoptimizeIf(overflow, instr, "overflow"); | 1917 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
1918 } | 1918 } |
1919 } | 1919 } |
1920 } | 1920 } |
1921 | 1921 |
1922 | 1922 |
1923 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1923 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
1924 LOperand* left = instr->left(); | 1924 LOperand* left = instr->left(); |
1925 LOperand* right = instr->right(); | 1925 LOperand* right = instr->right(); |
1926 DCHECK(left->Equals(instr->result())); | 1926 DCHECK(left->Equals(instr->result())); |
1927 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1927 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2173 } | 2173 } |
2174 | 2174 |
2175 if (expected.Contains(ToBooleanStub::SMI)) { | 2175 if (expected.Contains(ToBooleanStub::SMI)) { |
2176 // Smis: 0 -> false, all other -> true. | 2176 // Smis: 0 -> false, all other -> true. |
2177 __ Cmp(reg, Smi::FromInt(0)); | 2177 __ Cmp(reg, Smi::FromInt(0)); |
2178 __ j(equal, instr->FalseLabel(chunk_)); | 2178 __ j(equal, instr->FalseLabel(chunk_)); |
2179 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2179 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
2180 } else if (expected.NeedsMap()) { | 2180 } else if (expected.NeedsMap()) { |
2181 // If we need a map later and have a Smi -> deopt. | 2181 // If we need a map later and have a Smi -> deopt. |
2182 __ testb(reg, Immediate(kSmiTagMask)); | 2182 __ testb(reg, Immediate(kSmiTagMask)); |
2183 DeoptimizeIf(zero, instr, "Smi"); | 2183 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
2184 } | 2184 } |
2185 | 2185 |
2186 const Register map = kScratchRegister; | 2186 const Register map = kScratchRegister; |
2187 if (expected.NeedsMap()) { | 2187 if (expected.NeedsMap()) { |
2188 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); | 2188 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); |
2189 | 2189 |
2190 if (expected.CanBeUndetectable()) { | 2190 if (expected.CanBeUndetectable()) { |
2191 // Undetectable -> false. | 2191 // Undetectable -> false. |
2192 __ testb(FieldOperand(map, Map::kBitFieldOffset), | 2192 __ testb(FieldOperand(map, Map::kBitFieldOffset), |
2193 Immediate(1 << Map::kIsUndetectable)); | 2193 Immediate(1 << Map::kIsUndetectable)); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2227 __ xorps(xmm_scratch, xmm_scratch); | 2227 __ xorps(xmm_scratch, xmm_scratch); |
2228 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2228 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
2229 __ j(zero, instr->FalseLabel(chunk_)); | 2229 __ j(zero, instr->FalseLabel(chunk_)); |
2230 __ jmp(instr->TrueLabel(chunk_)); | 2230 __ jmp(instr->TrueLabel(chunk_)); |
2231 __ bind(¬_heap_number); | 2231 __ bind(¬_heap_number); |
2232 } | 2232 } |
2233 | 2233 |
2234 if (!expected.IsGeneric()) { | 2234 if (!expected.IsGeneric()) { |
2235 // We've seen something for the first time -> deopt. | 2235 // We've seen something for the first time -> deopt. |
2236 // This can only happen if we are not generic already. | 2236 // This can only happen if we are not generic already. |
2237 DeoptimizeIf(no_condition, instr, "unexpected object"); | 2237 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject); |
2238 } | 2238 } |
2239 } | 2239 } |
2240 } | 2240 } |
2241 } | 2241 } |
2242 | 2242 |
2243 | 2243 |
2244 void LCodeGen::EmitGoto(int block) { | 2244 void LCodeGen::EmitGoto(int block) { |
2245 if (!IsNextEmittedBlock(block)) { | 2245 if (!IsNextEmittedBlock(block)) { |
2246 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); | 2246 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); |
2247 } | 2247 } |
(...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2844 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); | 2844 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
2845 } | 2845 } |
2846 } | 2846 } |
2847 | 2847 |
2848 | 2848 |
2849 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2849 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2850 Register result = ToRegister(instr->result()); | 2850 Register result = ToRegister(instr->result()); |
2851 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); | 2851 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); |
2852 if (instr->hydrogen()->RequiresHoleCheck()) { | 2852 if (instr->hydrogen()->RequiresHoleCheck()) { |
2853 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 2853 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2854 DeoptimizeIf(equal, instr, "hole"); | 2854 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
2855 } | 2855 } |
2856 } | 2856 } |
2857 | 2857 |
2858 | 2858 |
2859 template <class T> | 2859 template <class T> |
2860 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2860 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
2861 DCHECK(FLAG_vector_ics); | 2861 DCHECK(FLAG_vector_ics); |
2862 Register vector_register = ToRegister(instr->temp_vector()); | 2862 Register vector_register = ToRegister(instr->temp_vector()); |
2863 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 2863 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
2864 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 2864 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2897 // If the cell we are storing to contains the hole it could have | 2897 // If the cell we are storing to contains the hole it could have |
2898 // been deleted from the property dictionary. In that case, we need | 2898 // been deleted from the property dictionary. In that case, we need |
2899 // to update the property details in the property dictionary to mark | 2899 // to update the property details in the property dictionary to mark |
2900 // it as no longer deleted. We deoptimize in that case. | 2900 // it as no longer deleted. We deoptimize in that case. |
2901 if (instr->hydrogen()->RequiresHoleCheck()) { | 2901 if (instr->hydrogen()->RequiresHoleCheck()) { |
2902 // We have a temp because CompareRoot might clobber kScratchRegister. | 2902 // We have a temp because CompareRoot might clobber kScratchRegister. |
2903 Register cell = ToRegister(instr->temp()); | 2903 Register cell = ToRegister(instr->temp()); |
2904 DCHECK(!value.is(cell)); | 2904 DCHECK(!value.is(cell)); |
2905 __ Move(cell, cell_handle, RelocInfo::CELL); | 2905 __ Move(cell, cell_handle, RelocInfo::CELL); |
2906 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); | 2906 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); |
2907 DeoptimizeIf(equal, instr, "hole"); | 2907 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
2908 // Store the value. | 2908 // Store the value. |
2909 __ movp(Operand(cell, 0), value); | 2909 __ movp(Operand(cell, 0), value); |
2910 } else { | 2910 } else { |
2911 // Store the value. | 2911 // Store the value. |
2912 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL); | 2912 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL); |
2913 __ movp(Operand(kScratchRegister, 0), value); | 2913 __ movp(Operand(kScratchRegister, 0), value); |
2914 } | 2914 } |
2915 // Cells are always rescanned, so no write barrier here. | 2915 // Cells are always rescanned, so no write barrier here. |
2916 } | 2916 } |
2917 | 2917 |
2918 | 2918 |
2919 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2919 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
2920 Register context = ToRegister(instr->context()); | 2920 Register context = ToRegister(instr->context()); |
2921 Register result = ToRegister(instr->result()); | 2921 Register result = ToRegister(instr->result()); |
2922 __ movp(result, ContextOperand(context, instr->slot_index())); | 2922 __ movp(result, ContextOperand(context, instr->slot_index())); |
2923 if (instr->hydrogen()->RequiresHoleCheck()) { | 2923 if (instr->hydrogen()->RequiresHoleCheck()) { |
2924 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 2924 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2925 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2925 if (instr->hydrogen()->DeoptimizesOnHole()) { |
2926 DeoptimizeIf(equal, instr, "hole"); | 2926 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
2927 } else { | 2927 } else { |
2928 Label is_not_hole; | 2928 Label is_not_hole; |
2929 __ j(not_equal, &is_not_hole, Label::kNear); | 2929 __ j(not_equal, &is_not_hole, Label::kNear); |
2930 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2930 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
2931 __ bind(&is_not_hole); | 2931 __ bind(&is_not_hole); |
2932 } | 2932 } |
2933 } | 2933 } |
2934 } | 2934 } |
2935 | 2935 |
2936 | 2936 |
2937 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2937 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
2938 Register context = ToRegister(instr->context()); | 2938 Register context = ToRegister(instr->context()); |
2939 Register value = ToRegister(instr->value()); | 2939 Register value = ToRegister(instr->value()); |
2940 | 2940 |
2941 Operand target = ContextOperand(context, instr->slot_index()); | 2941 Operand target = ContextOperand(context, instr->slot_index()); |
2942 | 2942 |
2943 Label skip_assignment; | 2943 Label skip_assignment; |
2944 if (instr->hydrogen()->RequiresHoleCheck()) { | 2944 if (instr->hydrogen()->RequiresHoleCheck()) { |
2945 __ CompareRoot(target, Heap::kTheHoleValueRootIndex); | 2945 __ CompareRoot(target, Heap::kTheHoleValueRootIndex); |
2946 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2946 if (instr->hydrogen()->DeoptimizesOnHole()) { |
2947 DeoptimizeIf(equal, instr, "hole"); | 2947 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
2948 } else { | 2948 } else { |
2949 __ j(not_equal, &skip_assignment); | 2949 __ j(not_equal, &skip_assignment); |
2950 } | 2950 } |
2951 } | 2951 } |
2952 __ movp(target, value); | 2952 __ movp(target, value); |
2953 | 2953 |
2954 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2954 if (instr->hydrogen()->NeedsWriteBarrier()) { |
2955 SmiCheck check_needed = | 2955 SmiCheck check_needed = |
2956 instr->hydrogen()->value()->type().IsHeapObject() | 2956 instr->hydrogen()->value()->type().IsHeapObject() |
2957 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2957 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3037 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3037 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
3038 Register function = ToRegister(instr->function()); | 3038 Register function = ToRegister(instr->function()); |
3039 Register result = ToRegister(instr->result()); | 3039 Register result = ToRegister(instr->result()); |
3040 | 3040 |
3041 // Get the prototype or initial map from the function. | 3041 // Get the prototype or initial map from the function. |
3042 __ movp(result, | 3042 __ movp(result, |
3043 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3043 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
3044 | 3044 |
3045 // Check that the function has a prototype or an initial map. | 3045 // Check that the function has a prototype or an initial map. |
3046 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 3046 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
3047 DeoptimizeIf(equal, instr, "hole"); | 3047 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
3048 | 3048 |
3049 // If the function does not have an initial map, we're done. | 3049 // If the function does not have an initial map, we're done. |
3050 Label done; | 3050 Label done; |
3051 __ CmpObjectType(result, MAP_TYPE, kScratchRegister); | 3051 __ CmpObjectType(result, MAP_TYPE, kScratchRegister); |
3052 __ j(not_equal, &done, Label::kNear); | 3052 __ j(not_equal, &done, Label::kNear); |
3053 | 3053 |
3054 // Get the prototype from the initial map. | 3054 // Get the prototype from the initial map. |
3055 __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); | 3055 __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); |
3056 | 3056 |
3057 // All done. | 3057 // All done. |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3149 break; | 3149 break; |
3150 case EXTERNAL_INT32_ELEMENTS: | 3150 case EXTERNAL_INT32_ELEMENTS: |
3151 case INT32_ELEMENTS: | 3151 case INT32_ELEMENTS: |
3152 __ movl(result, operand); | 3152 __ movl(result, operand); |
3153 break; | 3153 break; |
3154 case EXTERNAL_UINT32_ELEMENTS: | 3154 case EXTERNAL_UINT32_ELEMENTS: |
3155 case UINT32_ELEMENTS: | 3155 case UINT32_ELEMENTS: |
3156 __ movl(result, operand); | 3156 __ movl(result, operand); |
3157 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3157 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
3158 __ testl(result, result); | 3158 __ testl(result, result); |
3159 DeoptimizeIf(negative, instr, "negative value"); | 3159 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
3160 } | 3160 } |
3161 break; | 3161 break; |
3162 case EXTERNAL_FLOAT32_ELEMENTS: | 3162 case EXTERNAL_FLOAT32_ELEMENTS: |
3163 case EXTERNAL_FLOAT64_ELEMENTS: | 3163 case EXTERNAL_FLOAT64_ELEMENTS: |
3164 case FLOAT32_ELEMENTS: | 3164 case FLOAT32_ELEMENTS: |
3165 case FLOAT64_ELEMENTS: | 3165 case FLOAT64_ELEMENTS: |
3166 case FAST_ELEMENTS: | 3166 case FAST_ELEMENTS: |
3167 case FAST_SMI_ELEMENTS: | 3167 case FAST_SMI_ELEMENTS: |
3168 case FAST_DOUBLE_ELEMENTS: | 3168 case FAST_DOUBLE_ELEMENTS: |
3169 case FAST_HOLEY_ELEMENTS: | 3169 case FAST_HOLEY_ELEMENTS: |
(...skipping 18 matching lines...) Expand all Loading... |
3188 __ movsxlq(ToRegister(key), ToRegister(key)); | 3188 __ movsxlq(ToRegister(key), ToRegister(key)); |
3189 } | 3189 } |
3190 if (instr->hydrogen()->RequiresHoleCheck()) { | 3190 if (instr->hydrogen()->RequiresHoleCheck()) { |
3191 Operand hole_check_operand = BuildFastArrayOperand( | 3191 Operand hole_check_operand = BuildFastArrayOperand( |
3192 instr->elements(), | 3192 instr->elements(), |
3193 key, | 3193 key, |
3194 instr->hydrogen()->key()->representation(), | 3194 instr->hydrogen()->key()->representation(), |
3195 FAST_DOUBLE_ELEMENTS, | 3195 FAST_DOUBLE_ELEMENTS, |
3196 instr->base_offset() + sizeof(kHoleNanLower32)); | 3196 instr->base_offset() + sizeof(kHoleNanLower32)); |
3197 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); | 3197 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); |
3198 DeoptimizeIf(equal, instr, "hole"); | 3198 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
3199 } | 3199 } |
3200 | 3200 |
3201 Operand double_load_operand = BuildFastArrayOperand( | 3201 Operand double_load_operand = BuildFastArrayOperand( |
3202 instr->elements(), | 3202 instr->elements(), |
3203 key, | 3203 key, |
3204 instr->hydrogen()->key()->representation(), | 3204 instr->hydrogen()->key()->representation(), |
3205 FAST_DOUBLE_ELEMENTS, | 3205 FAST_DOUBLE_ELEMENTS, |
3206 instr->base_offset()); | 3206 instr->base_offset()); |
3207 __ movsd(result, double_load_operand); | 3207 __ movsd(result, double_load_operand); |
3208 } | 3208 } |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3245 __ Load(result, | 3245 __ Load(result, |
3246 BuildFastArrayOperand(instr->elements(), key, | 3246 BuildFastArrayOperand(instr->elements(), key, |
3247 instr->hydrogen()->key()->representation(), | 3247 instr->hydrogen()->key()->representation(), |
3248 FAST_ELEMENTS, offset), | 3248 FAST_ELEMENTS, offset), |
3249 representation); | 3249 representation); |
3250 | 3250 |
3251 // Check for the hole value. | 3251 // Check for the hole value. |
3252 if (requires_hole_check) { | 3252 if (requires_hole_check) { |
3253 if (IsFastSmiElementsKind(hinstr->elements_kind())) { | 3253 if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
3254 Condition smi = __ CheckSmi(result); | 3254 Condition smi = __ CheckSmi(result); |
3255 DeoptimizeIf(NegateCondition(smi), instr, "not a Smi"); | 3255 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi); |
3256 } else { | 3256 } else { |
3257 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 3257 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
3258 DeoptimizeIf(equal, instr, "hole"); | 3258 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
3259 } | 3259 } |
3260 } | 3260 } |
3261 } | 3261 } |
3262 | 3262 |
3263 | 3263 |
3264 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3264 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
3265 if (instr->is_typed_elements()) { | 3265 if (instr->is_typed_elements()) { |
3266 DoLoadKeyedExternalArray(instr); | 3266 DoLoadKeyedExternalArray(instr); |
3267 } else if (instr->hydrogen()->representation().IsDouble()) { | 3267 } else if (instr->hydrogen()->representation().IsDouble()) { |
3268 DoLoadKeyedFixedDoubleArray(instr); | 3268 DoLoadKeyedFixedDoubleArray(instr); |
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3395 } | 3395 } |
3396 | 3396 |
3397 // Normal function. Replace undefined or null with global receiver. | 3397 // Normal function. Replace undefined or null with global receiver. |
3398 __ CompareRoot(receiver, Heap::kNullValueRootIndex); | 3398 __ CompareRoot(receiver, Heap::kNullValueRootIndex); |
3399 __ j(equal, &global_object, Label::kNear); | 3399 __ j(equal, &global_object, Label::kNear); |
3400 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); | 3400 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); |
3401 __ j(equal, &global_object, Label::kNear); | 3401 __ j(equal, &global_object, Label::kNear); |
3402 | 3402 |
3403 // The receiver should be a JS object. | 3403 // The receiver should be a JS object. |
3404 Condition is_smi = __ CheckSmi(receiver); | 3404 Condition is_smi = __ CheckSmi(receiver); |
3405 DeoptimizeIf(is_smi, instr, "Smi"); | 3405 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi); |
3406 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); | 3406 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); |
3407 DeoptimizeIf(below, instr, "not a JavaScript object"); | 3407 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject); |
3408 | 3408 |
3409 __ jmp(&receiver_ok, Label::kNear); | 3409 __ jmp(&receiver_ok, Label::kNear); |
3410 __ bind(&global_object); | 3410 __ bind(&global_object); |
3411 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); | 3411 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); |
3412 __ movp(receiver, | 3412 __ movp(receiver, |
3413 Operand(receiver, | 3413 Operand(receiver, |
3414 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 3414 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
3415 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset)); | 3415 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset)); |
3416 | 3416 |
3417 __ bind(&receiver_ok); | 3417 __ bind(&receiver_ok); |
3418 } | 3418 } |
3419 | 3419 |
3420 | 3420 |
3421 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 3421 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
3422 Register receiver = ToRegister(instr->receiver()); | 3422 Register receiver = ToRegister(instr->receiver()); |
3423 Register function = ToRegister(instr->function()); | 3423 Register function = ToRegister(instr->function()); |
3424 Register length = ToRegister(instr->length()); | 3424 Register length = ToRegister(instr->length()); |
3425 Register elements = ToRegister(instr->elements()); | 3425 Register elements = ToRegister(instr->elements()); |
3426 DCHECK(receiver.is(rax)); // Used for parameter count. | 3426 DCHECK(receiver.is(rax)); // Used for parameter count. |
3427 DCHECK(function.is(rdi)); // Required by InvokeFunction. | 3427 DCHECK(function.is(rdi)); // Required by InvokeFunction. |
3428 DCHECK(ToRegister(instr->result()).is(rax)); | 3428 DCHECK(ToRegister(instr->result()).is(rax)); |
3429 | 3429 |
3430 // Copy the arguments to this function possibly from the | 3430 // Copy the arguments to this function possibly from the |
3431 // adaptor frame below it. | 3431 // adaptor frame below it. |
3432 const uint32_t kArgumentsLimit = 1 * KB; | 3432 const uint32_t kArgumentsLimit = 1 * KB; |
3433 __ cmpp(length, Immediate(kArgumentsLimit)); | 3433 __ cmpp(length, Immediate(kArgumentsLimit)); |
3434 DeoptimizeIf(above, instr, "too many arguments"); | 3434 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments); |
3435 | 3435 |
3436 __ Push(receiver); | 3436 __ Push(receiver); |
3437 __ movp(receiver, length); | 3437 __ movp(receiver, length); |
3438 | 3438 |
3439 // Loop through the arguments pushing them onto the execution | 3439 // Loop through the arguments pushing them onto the execution |
3440 // stack. | 3440 // stack. |
3441 Label invoke, loop; | 3441 Label invoke, loop; |
3442 // length is a small non-negative integer, due to the test above. | 3442 // length is a small non-negative integer, due to the test above. |
3443 __ testl(length, length); | 3443 __ testl(length, length); |
3444 __ j(zero, &invoke, Label::kNear); | 3444 __ j(zero, &invoke, Label::kNear); |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3638 __ Call(target); | 3638 __ Call(target); |
3639 } | 3639 } |
3640 generator.AfterCall(); | 3640 generator.AfterCall(); |
3641 } | 3641 } |
3642 | 3642 |
3643 | 3643 |
3644 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3644 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
3645 Register input_reg = ToRegister(instr->value()); | 3645 Register input_reg = ToRegister(instr->value()); |
3646 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 3646 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
3647 Heap::kHeapNumberMapRootIndex); | 3647 Heap::kHeapNumberMapRootIndex); |
3648 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3648 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
3649 | 3649 |
3650 Label slow, allocated, done; | 3650 Label slow, allocated, done; |
3651 Register tmp = input_reg.is(rax) ? rcx : rax; | 3651 Register tmp = input_reg.is(rax) ? rcx : rax; |
3652 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; | 3652 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; |
3653 | 3653 |
3654 // Preserve the value of all registers. | 3654 // Preserve the value of all registers. |
3655 PushSafepointRegistersScope scope(this); | 3655 PushSafepointRegistersScope scope(this); |
3656 | 3656 |
3657 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | 3657 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
3658 // Check the sign of the argument. If the argument is positive, just | 3658 // Check the sign of the argument. If the argument is positive, just |
(...skipping 25 matching lines...) Expand all Loading... |
3684 __ bind(&done); | 3684 __ bind(&done); |
3685 } | 3685 } |
3686 | 3686 |
3687 | 3687 |
3688 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3688 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
3689 Register input_reg = ToRegister(instr->value()); | 3689 Register input_reg = ToRegister(instr->value()); |
3690 __ testl(input_reg, input_reg); | 3690 __ testl(input_reg, input_reg); |
3691 Label is_positive; | 3691 Label is_positive; |
3692 __ j(not_sign, &is_positive, Label::kNear); | 3692 __ j(not_sign, &is_positive, Label::kNear); |
3693 __ negl(input_reg); // Sets flags. | 3693 __ negl(input_reg); // Sets flags. |
3694 DeoptimizeIf(negative, instr, "overflow"); | 3694 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); |
3695 __ bind(&is_positive); | 3695 __ bind(&is_positive); |
3696 } | 3696 } |
3697 | 3697 |
3698 | 3698 |
3699 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { | 3699 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { |
3700 Register input_reg = ToRegister(instr->value()); | 3700 Register input_reg = ToRegister(instr->value()); |
3701 __ testp(input_reg, input_reg); | 3701 __ testp(input_reg, input_reg); |
3702 Label is_positive; | 3702 Label is_positive; |
3703 __ j(not_sign, &is_positive, Label::kNear); | 3703 __ j(not_sign, &is_positive, Label::kNear); |
3704 __ negp(input_reg); // Sets flags. | 3704 __ negp(input_reg); // Sets flags. |
3705 DeoptimizeIf(negative, instr, "overflow"); | 3705 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); |
3706 __ bind(&is_positive); | 3706 __ bind(&is_positive); |
3707 } | 3707 } |
3708 | 3708 |
3709 | 3709 |
3710 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3710 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3711 // Class for deferred case. | 3711 // Class for deferred case. |
3712 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3712 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
3713 public: | 3713 public: |
3714 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3714 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
3715 : LDeferredCode(codegen), instr_(instr) { } | 3715 : LDeferredCode(codegen), instr_(instr) { } |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3751 XMMRegister xmm_scratch = double_scratch0(); | 3751 XMMRegister xmm_scratch = double_scratch0(); |
3752 Register output_reg = ToRegister(instr->result()); | 3752 Register output_reg = ToRegister(instr->result()); |
3753 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3753 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3754 | 3754 |
3755 if (CpuFeatures::IsSupported(SSE4_1)) { | 3755 if (CpuFeatures::IsSupported(SSE4_1)) { |
3756 CpuFeatureScope scope(masm(), SSE4_1); | 3756 CpuFeatureScope scope(masm(), SSE4_1); |
3757 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3757 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3758 // Deoptimize if minus zero. | 3758 // Deoptimize if minus zero. |
3759 __ movq(output_reg, input_reg); | 3759 __ movq(output_reg, input_reg); |
3760 __ subq(output_reg, Immediate(1)); | 3760 __ subq(output_reg, Immediate(1)); |
3761 DeoptimizeIf(overflow, instr, "minus zero"); | 3761 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero); |
3762 } | 3762 } |
3763 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | 3763 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
3764 __ cvttsd2si(output_reg, xmm_scratch); | 3764 __ cvttsd2si(output_reg, xmm_scratch); |
3765 __ cmpl(output_reg, Immediate(0x1)); | 3765 __ cmpl(output_reg, Immediate(0x1)); |
3766 DeoptimizeIf(overflow, instr, "overflow"); | 3766 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
3767 } else { | 3767 } else { |
3768 Label negative_sign, done; | 3768 Label negative_sign, done; |
3769 // Deoptimize on unordered. | 3769 // Deoptimize on unordered. |
3770 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3770 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
3771 __ ucomisd(input_reg, xmm_scratch); | 3771 __ ucomisd(input_reg, xmm_scratch); |
3772 DeoptimizeIf(parity_even, instr, "NaN"); | 3772 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
3773 __ j(below, &negative_sign, Label::kNear); | 3773 __ j(below, &negative_sign, Label::kNear); |
3774 | 3774 |
3775 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3775 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3776 // Check for negative zero. | 3776 // Check for negative zero. |
3777 Label positive_sign; | 3777 Label positive_sign; |
3778 __ j(above, &positive_sign, Label::kNear); | 3778 __ j(above, &positive_sign, Label::kNear); |
3779 __ movmskpd(output_reg, input_reg); | 3779 __ movmskpd(output_reg, input_reg); |
3780 __ testq(output_reg, Immediate(1)); | 3780 __ testq(output_reg, Immediate(1)); |
3781 DeoptimizeIf(not_zero, instr, "minus zero"); | 3781 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
3782 __ Set(output_reg, 0); | 3782 __ Set(output_reg, 0); |
3783 __ jmp(&done); | 3783 __ jmp(&done); |
3784 __ bind(&positive_sign); | 3784 __ bind(&positive_sign); |
3785 } | 3785 } |
3786 | 3786 |
3787 // Use truncating instruction (OK because input is positive). | 3787 // Use truncating instruction (OK because input is positive). |
3788 __ cvttsd2si(output_reg, input_reg); | 3788 __ cvttsd2si(output_reg, input_reg); |
3789 // Overflow is signalled with minint. | 3789 // Overflow is signalled with minint. |
3790 __ cmpl(output_reg, Immediate(0x1)); | 3790 __ cmpl(output_reg, Immediate(0x1)); |
3791 DeoptimizeIf(overflow, instr, "overflow"); | 3791 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
3792 __ jmp(&done, Label::kNear); | 3792 __ jmp(&done, Label::kNear); |
3793 | 3793 |
3794 // Non-zero negative reaches here. | 3794 // Non-zero negative reaches here. |
3795 __ bind(&negative_sign); | 3795 __ bind(&negative_sign); |
3796 // Truncate, then compare and compensate. | 3796 // Truncate, then compare and compensate. |
3797 __ cvttsd2si(output_reg, input_reg); | 3797 __ cvttsd2si(output_reg, input_reg); |
3798 __ Cvtlsi2sd(xmm_scratch, output_reg); | 3798 __ Cvtlsi2sd(xmm_scratch, output_reg); |
3799 __ ucomisd(input_reg, xmm_scratch); | 3799 __ ucomisd(input_reg, xmm_scratch); |
3800 __ j(equal, &done, Label::kNear); | 3800 __ j(equal, &done, Label::kNear); |
3801 __ subl(output_reg, Immediate(1)); | 3801 __ subl(output_reg, Immediate(1)); |
3802 DeoptimizeIf(overflow, instr, "overflow"); | 3802 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
3803 | 3803 |
3804 __ bind(&done); | 3804 __ bind(&done); |
3805 } | 3805 } |
3806 } | 3806 } |
3807 | 3807 |
3808 | 3808 |
3809 void LCodeGen::DoMathRound(LMathRound* instr) { | 3809 void LCodeGen::DoMathRound(LMathRound* instr) { |
3810 const XMMRegister xmm_scratch = double_scratch0(); | 3810 const XMMRegister xmm_scratch = double_scratch0(); |
3811 Register output_reg = ToRegister(instr->result()); | 3811 Register output_reg = ToRegister(instr->result()); |
3812 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3812 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3813 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | 3813 XMMRegister input_temp = ToDoubleRegister(instr->temp()); |
3814 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 | 3814 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
3815 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 | 3815 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
3816 | 3816 |
3817 Label done, round_to_zero, below_one_half; | 3817 Label done, round_to_zero, below_one_half; |
3818 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 3818 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
3819 __ movq(kScratchRegister, one_half); | 3819 __ movq(kScratchRegister, one_half); |
3820 __ movq(xmm_scratch, kScratchRegister); | 3820 __ movq(xmm_scratch, kScratchRegister); |
3821 __ ucomisd(xmm_scratch, input_reg); | 3821 __ ucomisd(xmm_scratch, input_reg); |
3822 __ j(above, &below_one_half, Label::kNear); | 3822 __ j(above, &below_one_half, Label::kNear); |
3823 | 3823 |
3824 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | 3824 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
3825 __ addsd(xmm_scratch, input_reg); | 3825 __ addsd(xmm_scratch, input_reg); |
3826 __ cvttsd2si(output_reg, xmm_scratch); | 3826 __ cvttsd2si(output_reg, xmm_scratch); |
3827 // Overflow is signalled with minint. | 3827 // Overflow is signalled with minint. |
3828 __ cmpl(output_reg, Immediate(0x1)); | 3828 __ cmpl(output_reg, Immediate(0x1)); |
3829 DeoptimizeIf(overflow, instr, "overflow"); | 3829 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
3830 __ jmp(&done, dist); | 3830 __ jmp(&done, dist); |
3831 | 3831 |
3832 __ bind(&below_one_half); | 3832 __ bind(&below_one_half); |
3833 __ movq(kScratchRegister, minus_one_half); | 3833 __ movq(kScratchRegister, minus_one_half); |
3834 __ movq(xmm_scratch, kScratchRegister); | 3834 __ movq(xmm_scratch, kScratchRegister); |
3835 __ ucomisd(xmm_scratch, input_reg); | 3835 __ ucomisd(xmm_scratch, input_reg); |
3836 __ j(below_equal, &round_to_zero, Label::kNear); | 3836 __ j(below_equal, &round_to_zero, Label::kNear); |
3837 | 3837 |
3838 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | 3838 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
3839 // compare and compensate. | 3839 // compare and compensate. |
3840 __ movq(input_temp, input_reg); // Do not alter input_reg. | 3840 __ movq(input_temp, input_reg); // Do not alter input_reg. |
3841 __ subsd(input_temp, xmm_scratch); | 3841 __ subsd(input_temp, xmm_scratch); |
3842 __ cvttsd2si(output_reg, input_temp); | 3842 __ cvttsd2si(output_reg, input_temp); |
3843 // Catch minint due to overflow, and to prevent overflow when compensating. | 3843 // Catch minint due to overflow, and to prevent overflow when compensating. |
3844 __ cmpl(output_reg, Immediate(0x1)); | 3844 __ cmpl(output_reg, Immediate(0x1)); |
3845 DeoptimizeIf(overflow, instr, "overflow"); | 3845 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
3846 | 3846 |
3847 __ Cvtlsi2sd(xmm_scratch, output_reg); | 3847 __ Cvtlsi2sd(xmm_scratch, output_reg); |
3848 __ ucomisd(xmm_scratch, input_temp); | 3848 __ ucomisd(xmm_scratch, input_temp); |
3849 __ j(equal, &done, dist); | 3849 __ j(equal, &done, dist); |
3850 __ subl(output_reg, Immediate(1)); | 3850 __ subl(output_reg, Immediate(1)); |
3851 // No overflow because we already ruled out minint. | 3851 // No overflow because we already ruled out minint. |
3852 __ jmp(&done, dist); | 3852 __ jmp(&done, dist); |
3853 | 3853 |
3854 __ bind(&round_to_zero); | 3854 __ bind(&round_to_zero); |
3855 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | 3855 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
3856 // we can ignore the difference between a result of -0 and +0. | 3856 // we can ignore the difference between a result of -0 and +0. |
3857 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3857 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3858 __ movq(output_reg, input_reg); | 3858 __ movq(output_reg, input_reg); |
3859 __ testq(output_reg, output_reg); | 3859 __ testq(output_reg, output_reg); |
3860 DeoptimizeIf(negative, instr, "minus zero"); | 3860 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero); |
3861 } | 3861 } |
3862 __ Set(output_reg, 0); | 3862 __ Set(output_reg, 0); |
3863 __ bind(&done); | 3863 __ bind(&done); |
3864 } | 3864 } |
3865 | 3865 |
3866 | 3866 |
3867 void LCodeGen::DoMathFround(LMathFround* instr) { | 3867 void LCodeGen::DoMathFround(LMathFround* instr) { |
3868 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3868 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3869 XMMRegister output_reg = ToDoubleRegister(instr->result()); | 3869 XMMRegister output_reg = ToDoubleRegister(instr->result()); |
3870 __ cvtsd2ss(output_reg, input_reg); | 3870 __ cvtsd2ss(output_reg, input_reg); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3929 DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); | 3929 DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); |
3930 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); | 3930 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); |
3931 | 3931 |
3932 if (exponent_type.IsSmi()) { | 3932 if (exponent_type.IsSmi()) { |
3933 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3933 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3934 __ CallStub(&stub); | 3934 __ CallStub(&stub); |
3935 } else if (exponent_type.IsTagged()) { | 3935 } else if (exponent_type.IsTagged()) { |
3936 Label no_deopt; | 3936 Label no_deopt; |
3937 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); | 3937 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); |
3938 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); | 3938 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); |
3939 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3939 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
3940 __ bind(&no_deopt); | 3940 __ bind(&no_deopt); |
3941 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3941 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3942 __ CallStub(&stub); | 3942 __ CallStub(&stub); |
3943 } else if (exponent_type.IsInteger32()) { | 3943 } else if (exponent_type.IsInteger32()) { |
3944 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3944 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
3945 __ CallStub(&stub); | 3945 __ CallStub(&stub); |
3946 } else { | 3946 } else { |
3947 DCHECK(exponent_type.IsDouble()); | 3947 DCHECK(exponent_type.IsDouble()); |
3948 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3948 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
3949 __ CallStub(&stub); | 3949 __ CallStub(&stub); |
(...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4323 __ cmpl(length, index); | 4323 __ cmpl(length, index); |
4324 } | 4324 } |
4325 } | 4325 } |
4326 } | 4326 } |
4327 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4327 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
4328 Label done; | 4328 Label done; |
4329 __ j(NegateCondition(cc), &done, Label::kNear); | 4329 __ j(NegateCondition(cc), &done, Label::kNear); |
4330 __ int3(); | 4330 __ int3(); |
4331 __ bind(&done); | 4331 __ bind(&done); |
4332 } else { | 4332 } else { |
4333 DeoptimizeIf(cc, instr, "out of bounds"); | 4333 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
4334 } | 4334 } |
4335 } | 4335 } |
4336 | 4336 |
4337 | 4337 |
4338 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4338 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
4339 ElementsKind elements_kind = instr->elements_kind(); | 4339 ElementsKind elements_kind = instr->elements_kind(); |
4340 LOperand* key = instr->key(); | 4340 LOperand* key = instr->key(); |
4341 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { | 4341 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { |
4342 Register key_reg = ToRegister(key); | 4342 Register key_reg = ToRegister(key); |
4343 Representation key_representation = | 4343 Representation key_representation = |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4564 } | 4564 } |
4565 __ bind(¬_applicable); | 4565 __ bind(¬_applicable); |
4566 } | 4566 } |
4567 | 4567 |
4568 | 4568 |
4569 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4569 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
4570 Register object = ToRegister(instr->object()); | 4570 Register object = ToRegister(instr->object()); |
4571 Register temp = ToRegister(instr->temp()); | 4571 Register temp = ToRegister(instr->temp()); |
4572 Label no_memento_found; | 4572 Label no_memento_found; |
4573 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4573 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
4574 DeoptimizeIf(equal, instr, "memento found"); | 4574 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound); |
4575 __ bind(&no_memento_found); | 4575 __ bind(&no_memento_found); |
4576 } | 4576 } |
4577 | 4577 |
4578 | 4578 |
4579 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4579 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4580 DCHECK(ToRegister(instr->context()).is(rsi)); | 4580 DCHECK(ToRegister(instr->context()).is(rsi)); |
4581 DCHECK(ToRegister(instr->left()).is(rdx)); | 4581 DCHECK(ToRegister(instr->left()).is(rdx)); |
4582 DCHECK(ToRegister(instr->right()).is(rax)); | 4582 DCHECK(ToRegister(instr->right()).is(rax)); |
4583 StringAddStub stub(isolate(), | 4583 StringAddStub stub(isolate(), |
4584 instr->hydrogen()->flags(), | 4584 instr->hydrogen()->flags(), |
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4884 } | 4884 } |
4885 | 4885 |
4886 | 4886 |
4887 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4887 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4888 HChange* hchange = instr->hydrogen(); | 4888 HChange* hchange = instr->hydrogen(); |
4889 Register input = ToRegister(instr->value()); | 4889 Register input = ToRegister(instr->value()); |
4890 Register output = ToRegister(instr->result()); | 4890 Register output = ToRegister(instr->result()); |
4891 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4891 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4892 hchange->value()->CheckFlag(HValue::kUint32)) { | 4892 hchange->value()->CheckFlag(HValue::kUint32)) { |
4893 Condition is_smi = __ CheckUInteger32ValidSmiValue(input); | 4893 Condition is_smi = __ CheckUInteger32ValidSmiValue(input); |
4894 DeoptimizeIf(NegateCondition(is_smi), instr, "overflow"); | 4894 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow); |
4895 } | 4895 } |
4896 __ Integer32ToSmi(output, input); | 4896 __ Integer32ToSmi(output, input); |
4897 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4897 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4898 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4898 !hchange->value()->CheckFlag(HValue::kUint32)) { |
4899 DeoptimizeIf(overflow, instr, "overflow"); | 4899 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
4900 } | 4900 } |
4901 } | 4901 } |
4902 | 4902 |
4903 | 4903 |
4904 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4904 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
4905 DCHECK(instr->value()->Equals(instr->result())); | 4905 DCHECK(instr->value()->Equals(instr->result())); |
4906 Register input = ToRegister(instr->value()); | 4906 Register input = ToRegister(instr->value()); |
4907 if (instr->needs_check()) { | 4907 if (instr->needs_check()) { |
4908 Condition is_smi = __ CheckSmi(input); | 4908 Condition is_smi = __ CheckSmi(input); |
4909 DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi"); | 4909 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi); |
4910 } else { | 4910 } else { |
4911 __ AssertSmi(input); | 4911 __ AssertSmi(input); |
4912 } | 4912 } |
4913 __ SmiToInteger32(input, input); | 4913 __ SmiToInteger32(input, input); |
4914 } | 4914 } |
4915 | 4915 |
4916 | 4916 |
4917 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4917 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
4918 XMMRegister result_reg, NumberUntagDMode mode) { | 4918 XMMRegister result_reg, NumberUntagDMode mode) { |
4919 bool can_convert_undefined_to_nan = | 4919 bool can_convert_undefined_to_nan = |
(...skipping 10 matching lines...) Expand all Loading... |
4930 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 4930 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
4931 Heap::kHeapNumberMapRootIndex); | 4931 Heap::kHeapNumberMapRootIndex); |
4932 | 4932 |
4933 // On x64 it is safe to load at heap number offset before evaluating the map | 4933 // On x64 it is safe to load at heap number offset before evaluating the map |
4934 // check, since all heap objects are at least two words long. | 4934 // check, since all heap objects are at least two words long. |
4935 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4935 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
4936 | 4936 |
4937 if (can_convert_undefined_to_nan) { | 4937 if (can_convert_undefined_to_nan) { |
4938 __ j(not_equal, &convert, Label::kNear); | 4938 __ j(not_equal, &convert, Label::kNear); |
4939 } else { | 4939 } else { |
4940 DeoptimizeIf(not_equal, instr, "not a heap number"); | 4940 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
4941 } | 4941 } |
4942 | 4942 |
4943 if (deoptimize_on_minus_zero) { | 4943 if (deoptimize_on_minus_zero) { |
4944 XMMRegister xmm_scratch = double_scratch0(); | 4944 XMMRegister xmm_scratch = double_scratch0(); |
4945 __ xorps(xmm_scratch, xmm_scratch); | 4945 __ xorps(xmm_scratch, xmm_scratch); |
4946 __ ucomisd(xmm_scratch, result_reg); | 4946 __ ucomisd(xmm_scratch, result_reg); |
4947 __ j(not_equal, &done, Label::kNear); | 4947 __ j(not_equal, &done, Label::kNear); |
4948 __ movmskpd(kScratchRegister, result_reg); | 4948 __ movmskpd(kScratchRegister, result_reg); |
4949 __ testq(kScratchRegister, Immediate(1)); | 4949 __ testq(kScratchRegister, Immediate(1)); |
4950 DeoptimizeIf(not_zero, instr, "minus zero"); | 4950 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
4951 } | 4951 } |
4952 __ jmp(&done, Label::kNear); | 4952 __ jmp(&done, Label::kNear); |
4953 | 4953 |
4954 if (can_convert_undefined_to_nan) { | 4954 if (can_convert_undefined_to_nan) { |
4955 __ bind(&convert); | 4955 __ bind(&convert); |
4956 | 4956 |
4957 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. | 4957 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. |
4958 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); | 4958 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
4959 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 4959 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
4960 | 4960 |
4961 __ pcmpeqd(result_reg, result_reg); | 4961 __ pcmpeqd(result_reg, result_reg); |
4962 __ jmp(&done, Label::kNear); | 4962 __ jmp(&done, Label::kNear); |
4963 } | 4963 } |
4964 } else { | 4964 } else { |
4965 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4965 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4966 } | 4966 } |
4967 | 4967 |
4968 // Smi to XMM conversion | 4968 // Smi to XMM conversion |
4969 __ bind(&load_smi); | 4969 __ bind(&load_smi); |
(...skipping 25 matching lines...) Expand all Loading... |
4995 __ jmp(done); | 4995 __ jmp(done); |
4996 | 4996 |
4997 __ bind(&check_bools); | 4997 __ bind(&check_bools); |
4998 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); | 4998 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); |
4999 __ j(not_equal, &check_false, Label::kNear); | 4999 __ j(not_equal, &check_false, Label::kNear); |
5000 __ Set(input_reg, 1); | 5000 __ Set(input_reg, 1); |
5001 __ jmp(done); | 5001 __ jmp(done); |
5002 | 5002 |
5003 __ bind(&check_false); | 5003 __ bind(&check_false); |
5004 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); | 5004 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); |
5005 DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false"); | 5005 DeoptimizeIf(not_equal, instr, |
| 5006 Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
5006 __ Set(input_reg, 0); | 5007 __ Set(input_reg, 0); |
5007 } else { | 5008 } else { |
5008 XMMRegister scratch = ToDoubleRegister(instr->temp()); | 5009 XMMRegister scratch = ToDoubleRegister(instr->temp()); |
5009 DCHECK(!scratch.is(xmm0)); | 5010 DCHECK(!scratch.is(xmm0)); |
5010 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 5011 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
5011 Heap::kHeapNumberMapRootIndex); | 5012 Heap::kHeapNumberMapRootIndex); |
5012 DeoptimizeIf(not_equal, instr, "not a heap number"); | 5013 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
5013 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5014 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
5014 __ cvttsd2si(input_reg, xmm0); | 5015 __ cvttsd2si(input_reg, xmm0); |
5015 __ Cvtlsi2sd(scratch, input_reg); | 5016 __ Cvtlsi2sd(scratch, input_reg); |
5016 __ ucomisd(xmm0, scratch); | 5017 __ ucomisd(xmm0, scratch); |
5017 DeoptimizeIf(not_equal, instr, "lost precision"); | 5018 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
5018 DeoptimizeIf(parity_even, instr, "NaN"); | 5019 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
5019 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | 5020 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { |
5020 __ testl(input_reg, input_reg); | 5021 __ testl(input_reg, input_reg); |
5021 __ j(not_zero, done); | 5022 __ j(not_zero, done); |
5022 __ movmskpd(input_reg, xmm0); | 5023 __ movmskpd(input_reg, xmm0); |
5023 __ andl(input_reg, Immediate(1)); | 5024 __ andl(input_reg, Immediate(1)); |
5024 DeoptimizeIf(not_zero, instr, "minus zero"); | 5025 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
5025 } | 5026 } |
5026 } | 5027 } |
5027 } | 5028 } |
5028 | 5029 |
5029 | 5030 |
5030 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5031 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
5031 class DeferredTaggedToI FINAL : public LDeferredCode { | 5032 class DeferredTaggedToI FINAL : public LDeferredCode { |
5032 public: | 5033 public: |
5033 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5034 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
5034 : LDeferredCode(codegen), instr_(instr) { } | 5035 : LDeferredCode(codegen), instr_(instr) { } |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5085 __ TruncateDoubleToI(result_reg, input_reg); | 5086 __ TruncateDoubleToI(result_reg, input_reg); |
5086 } else { | 5087 } else { |
5087 Label lost_precision, is_nan, minus_zero, done; | 5088 Label lost_precision, is_nan, minus_zero, done; |
5088 XMMRegister xmm_scratch = double_scratch0(); | 5089 XMMRegister xmm_scratch = double_scratch0(); |
5089 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 5090 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
5090 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 5091 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
5091 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, | 5092 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, |
5092 &is_nan, &minus_zero, dist); | 5093 &is_nan, &minus_zero, dist); |
5093 __ jmp(&done, dist); | 5094 __ jmp(&done, dist); |
5094 __ bind(&lost_precision); | 5095 __ bind(&lost_precision); |
5095 DeoptimizeIf(no_condition, instr, "lost precision"); | 5096 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
5096 __ bind(&is_nan); | 5097 __ bind(&is_nan); |
5097 DeoptimizeIf(no_condition, instr, "NaN"); | 5098 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
5098 __ bind(&minus_zero); | 5099 __ bind(&minus_zero); |
5099 DeoptimizeIf(no_condition, instr, "minus zero"); | 5100 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
5100 __ bind(&done); | 5101 __ bind(&done); |
5101 } | 5102 } |
5102 } | 5103 } |
5103 | 5104 |
5104 | 5105 |
5105 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5106 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
5106 LOperand* input = instr->value(); | 5107 LOperand* input = instr->value(); |
5107 DCHECK(input->IsDoubleRegister()); | 5108 DCHECK(input->IsDoubleRegister()); |
5108 LOperand* result = instr->result(); | 5109 LOperand* result = instr->result(); |
5109 DCHECK(result->IsRegister()); | 5110 DCHECK(result->IsRegister()); |
5110 | 5111 |
5111 XMMRegister input_reg = ToDoubleRegister(input); | 5112 XMMRegister input_reg = ToDoubleRegister(input); |
5112 Register result_reg = ToRegister(result); | 5113 Register result_reg = ToRegister(result); |
5113 | 5114 |
5114 Label lost_precision, is_nan, minus_zero, done; | 5115 Label lost_precision, is_nan, minus_zero, done; |
5115 XMMRegister xmm_scratch = double_scratch0(); | 5116 XMMRegister xmm_scratch = double_scratch0(); |
5116 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 5117 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
5117 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 5118 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
5118 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, | 5119 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, |
5119 &minus_zero, dist); | 5120 &minus_zero, dist); |
5120 __ jmp(&done, dist); | 5121 __ jmp(&done, dist); |
5121 __ bind(&lost_precision); | 5122 __ bind(&lost_precision); |
5122 DeoptimizeIf(no_condition, instr, "lost precision"); | 5123 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
5123 __ bind(&is_nan); | 5124 __ bind(&is_nan); |
5124 DeoptimizeIf(no_condition, instr, "NaN"); | 5125 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
5125 __ bind(&minus_zero); | 5126 __ bind(&minus_zero); |
5126 DeoptimizeIf(no_condition, instr, "minus zero"); | 5127 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
5127 __ bind(&done); | 5128 __ bind(&done); |
5128 __ Integer32ToSmi(result_reg, result_reg); | 5129 __ Integer32ToSmi(result_reg, result_reg); |
5129 DeoptimizeIf(overflow, instr, "overflow"); | 5130 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
5130 } | 5131 } |
5131 | 5132 |
5132 | 5133 |
5133 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5134 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
5134 LOperand* input = instr->value(); | 5135 LOperand* input = instr->value(); |
5135 Condition cc = masm()->CheckSmi(ToRegister(input)); | 5136 Condition cc = masm()->CheckSmi(ToRegister(input)); |
5136 DeoptimizeIf(NegateCondition(cc), instr, "not a Smi"); | 5137 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi); |
5137 } | 5138 } |
5138 | 5139 |
5139 | 5140 |
5140 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5141 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
5141 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5142 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
5142 LOperand* input = instr->value(); | 5143 LOperand* input = instr->value(); |
5143 Condition cc = masm()->CheckSmi(ToRegister(input)); | 5144 Condition cc = masm()->CheckSmi(ToRegister(input)); |
5144 DeoptimizeIf(cc, instr, "Smi"); | 5145 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
5145 } | 5146 } |
5146 } | 5147 } |
5147 | 5148 |
5148 | 5149 |
5149 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5150 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
5150 Register input = ToRegister(instr->value()); | 5151 Register input = ToRegister(instr->value()); |
5151 | 5152 |
5152 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); | 5153 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
5153 | 5154 |
5154 if (instr->hydrogen()->is_interval_check()) { | 5155 if (instr->hydrogen()->is_interval_check()) { |
5155 InstanceType first; | 5156 InstanceType first; |
5156 InstanceType last; | 5157 InstanceType last; |
5157 instr->hydrogen()->GetCheckInterval(&first, &last); | 5158 instr->hydrogen()->GetCheckInterval(&first, &last); |
5158 | 5159 |
5159 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5160 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
5160 Immediate(static_cast<int8_t>(first))); | 5161 Immediate(static_cast<int8_t>(first))); |
5161 | 5162 |
5162 // If there is only one type in the interval check for equality. | 5163 // If there is only one type in the interval check for equality. |
5163 if (first == last) { | 5164 if (first == last) { |
5164 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 5165 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
5165 } else { | 5166 } else { |
5166 DeoptimizeIf(below, instr, "wrong instance type"); | 5167 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType); |
5167 // Omit check for the last type. | 5168 // Omit check for the last type. |
5168 if (last != LAST_TYPE) { | 5169 if (last != LAST_TYPE) { |
5169 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5170 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
5170 Immediate(static_cast<int8_t>(last))); | 5171 Immediate(static_cast<int8_t>(last))); |
5171 DeoptimizeIf(above, instr, "wrong instance type"); | 5172 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType); |
5172 } | 5173 } |
5173 } | 5174 } |
5174 } else { | 5175 } else { |
5175 uint8_t mask; | 5176 uint8_t mask; |
5176 uint8_t tag; | 5177 uint8_t tag; |
5177 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5178 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
5178 | 5179 |
5179 if (base::bits::IsPowerOfTwo32(mask)) { | 5180 if (base::bits::IsPowerOfTwo32(mask)) { |
5180 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5181 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
5181 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5182 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
5182 Immediate(mask)); | 5183 Immediate(mask)); |
5183 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type"); | 5184 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, |
| 5185 Deoptimizer::kWrongInstanceType); |
5184 } else { | 5186 } else { |
5185 __ movzxbl(kScratchRegister, | 5187 __ movzxbl(kScratchRegister, |
5186 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); | 5188 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
5187 __ andb(kScratchRegister, Immediate(mask)); | 5189 __ andb(kScratchRegister, Immediate(mask)); |
5188 __ cmpb(kScratchRegister, Immediate(tag)); | 5190 __ cmpb(kScratchRegister, Immediate(tag)); |
5189 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 5191 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
5190 } | 5192 } |
5191 } | 5193 } |
5192 } | 5194 } |
5193 | 5195 |
5194 | 5196 |
5195 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5197 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
5196 Register reg = ToRegister(instr->value()); | 5198 Register reg = ToRegister(instr->value()); |
5197 __ Cmp(reg, instr->hydrogen()->object().handle()); | 5199 __ Cmp(reg, instr->hydrogen()->object().handle()); |
5198 DeoptimizeIf(not_equal, instr, "value mismatch"); | 5200 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch); |
5199 } | 5201 } |
5200 | 5202 |
5201 | 5203 |
5202 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5204 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
5203 { | 5205 { |
5204 PushSafepointRegistersScope scope(this); | 5206 PushSafepointRegistersScope scope(this); |
5205 __ Push(object); | 5207 __ Push(object); |
5206 __ Set(rsi, 0); | 5208 __ Set(rsi, 0); |
5207 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5209 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
5208 RecordSafepointWithRegisters( | 5210 RecordSafepointWithRegisters( |
5209 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5211 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
5210 | 5212 |
5211 __ testp(rax, Immediate(kSmiTagMask)); | 5213 __ testp(rax, Immediate(kSmiTagMask)); |
5212 } | 5214 } |
5213 DeoptimizeIf(zero, instr, "instance migration failed"); | 5215 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed); |
5214 } | 5216 } |
5215 | 5217 |
5216 | 5218 |
5217 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5219 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
5218 class DeferredCheckMaps FINAL : public LDeferredCode { | 5220 class DeferredCheckMaps FINAL : public LDeferredCode { |
5219 public: | 5221 public: |
5220 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5222 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
5221 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5223 : LDeferredCode(codegen), instr_(instr), object_(object) { |
5222 SetExit(check_maps()); | 5224 SetExit(check_maps()); |
5223 } | 5225 } |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5257 Handle<Map> map = maps->at(i).handle(); | 5259 Handle<Map> map = maps->at(i).handle(); |
5258 __ CompareMap(reg, map); | 5260 __ CompareMap(reg, map); |
5259 __ j(equal, &success, Label::kNear); | 5261 __ j(equal, &success, Label::kNear); |
5260 } | 5262 } |
5261 | 5263 |
5262 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5264 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5263 __ CompareMap(reg, map); | 5265 __ CompareMap(reg, map); |
5264 if (instr->hydrogen()->HasMigrationTarget()) { | 5266 if (instr->hydrogen()->HasMigrationTarget()) { |
5265 __ j(not_equal, deferred->entry()); | 5267 __ j(not_equal, deferred->entry()); |
5266 } else { | 5268 } else { |
5267 DeoptimizeIf(not_equal, instr, "wrong map"); | 5269 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
5268 } | 5270 } |
5269 | 5271 |
5270 __ bind(&success); | 5272 __ bind(&success); |
5271 } | 5273 } |
5272 | 5274 |
5273 | 5275 |
5274 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5276 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5275 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5277 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
5276 XMMRegister xmm_scratch = double_scratch0(); | 5278 XMMRegister xmm_scratch = double_scratch0(); |
5277 Register result_reg = ToRegister(instr->result()); | 5279 Register result_reg = ToRegister(instr->result()); |
(...skipping 18 matching lines...) Expand all Loading... |
5296 __ JumpIfSmi(input_reg, &is_smi, dist); | 5298 __ JumpIfSmi(input_reg, &is_smi, dist); |
5297 | 5299 |
5298 // Check for heap number | 5300 // Check for heap number |
5299 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5301 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
5300 factory()->heap_number_map()); | 5302 factory()->heap_number_map()); |
5301 __ j(equal, &heap_number, Label::kNear); | 5303 __ j(equal, &heap_number, Label::kNear); |
5302 | 5304 |
5303 // Check for undefined. Undefined is converted to zero for clamping | 5305 // Check for undefined. Undefined is converted to zero for clamping |
5304 // conversions. | 5306 // conversions. |
5305 __ Cmp(input_reg, factory()->undefined_value()); | 5307 __ Cmp(input_reg, factory()->undefined_value()); |
5306 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 5308 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
5307 __ xorl(input_reg, input_reg); | 5309 __ xorl(input_reg, input_reg); |
5308 __ jmp(&done, Label::kNear); | 5310 __ jmp(&done, Label::kNear); |
5309 | 5311 |
5310 // Heap number | 5312 // Heap number |
5311 __ bind(&heap_number); | 5313 __ bind(&heap_number); |
5312 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5314 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
5313 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5315 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
5314 __ jmp(&done, Label::kNear); | 5316 __ jmp(&done, Label::kNear); |
5315 | 5317 |
5316 // smi | 5318 // smi |
(...skipping 458 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5775 DCHECK(!environment->HasBeenRegistered()); | 5777 DCHECK(!environment->HasBeenRegistered()); |
5776 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5777 | 5779 |
5778 GenerateOsrPrologue(); | 5780 GenerateOsrPrologue(); |
5779 } | 5781 } |
5780 | 5782 |
5781 | 5783 |
5782 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5784 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
5783 DCHECK(ToRegister(instr->context()).is(rsi)); | 5785 DCHECK(ToRegister(instr->context()).is(rsi)); |
5784 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 5786 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
5785 DeoptimizeIf(equal, instr, "undefined"); | 5787 DeoptimizeIf(equal, instr, Deoptimizer::kUndefined); |
5786 | 5788 |
5787 Register null_value = rdi; | 5789 Register null_value = rdi; |
5788 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5790 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
5789 __ cmpp(rax, null_value); | 5791 __ cmpp(rax, null_value); |
5790 DeoptimizeIf(equal, instr, "null"); | 5792 DeoptimizeIf(equal, instr, Deoptimizer::kNull); |
5791 | 5793 |
5792 Condition cc = masm()->CheckSmi(rax); | 5794 Condition cc = masm()->CheckSmi(rax); |
5793 DeoptimizeIf(cc, instr, "Smi"); | 5795 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
5794 | 5796 |
5795 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5797 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
5796 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); | 5798 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); |
5797 DeoptimizeIf(below_equal, instr, "wrong instance type"); | 5799 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType); |
5798 | 5800 |
5799 Label use_cache, call_runtime; | 5801 Label use_cache, call_runtime; |
5800 __ CheckEnumCache(null_value, &call_runtime); | 5802 __ CheckEnumCache(null_value, &call_runtime); |
5801 | 5803 |
5802 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); | 5804 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); |
5803 __ jmp(&use_cache, Label::kNear); | 5805 __ jmp(&use_cache, Label::kNear); |
5804 | 5806 |
5805 // Get the set of properties to enumerate. | 5807 // Get the set of properties to enumerate. |
5806 __ bind(&call_runtime); | 5808 __ bind(&call_runtime); |
5807 __ Push(rax); | 5809 __ Push(rax); |
5808 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5810 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
5809 | 5811 |
5810 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), | 5812 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
5811 Heap::kMetaMapRootIndex); | 5813 Heap::kMetaMapRootIndex); |
5812 DeoptimizeIf(not_equal, instr, "wrong map"); | 5814 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
5813 __ bind(&use_cache); | 5815 __ bind(&use_cache); |
5814 } | 5816 } |
5815 | 5817 |
5816 | 5818 |
5817 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5819 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
5818 Register map = ToRegister(instr->map()); | 5820 Register map = ToRegister(instr->map()); |
5819 Register result = ToRegister(instr->result()); | 5821 Register result = ToRegister(instr->result()); |
5820 Label load_cache, done; | 5822 Label load_cache, done; |
5821 __ EnumLength(result, map); | 5823 __ EnumLength(result, map); |
5822 __ Cmp(result, Smi::FromInt(0)); | 5824 __ Cmp(result, Smi::FromInt(0)); |
5823 __ j(not_equal, &load_cache, Label::kNear); | 5825 __ j(not_equal, &load_cache, Label::kNear); |
5824 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); | 5826 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); |
5825 __ jmp(&done, Label::kNear); | 5827 __ jmp(&done, Label::kNear); |
5826 __ bind(&load_cache); | 5828 __ bind(&load_cache); |
5827 __ LoadInstanceDescriptors(map, result); | 5829 __ LoadInstanceDescriptors(map, result); |
5828 __ movp(result, | 5830 __ movp(result, |
5829 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); | 5831 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); |
5830 __ movp(result, | 5832 __ movp(result, |
5831 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); | 5833 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); |
5832 __ bind(&done); | 5834 __ bind(&done); |
5833 Condition cc = masm()->CheckSmi(result); | 5835 Condition cc = masm()->CheckSmi(result); |
5834 DeoptimizeIf(cc, instr, "no cache"); | 5836 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache); |
5835 } | 5837 } |
5836 | 5838 |
5837 | 5839 |
5838 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5840 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
5839 Register object = ToRegister(instr->value()); | 5841 Register object = ToRegister(instr->value()); |
5840 __ cmpp(ToRegister(instr->map()), | 5842 __ cmpp(ToRegister(instr->map()), |
5841 FieldOperand(object, HeapObject::kMapOffset)); | 5843 FieldOperand(object, HeapObject::kMapOffset)); |
5842 DeoptimizeIf(not_equal, instr, "wrong map"); | 5844 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
5843 } | 5845 } |
5844 | 5846 |
5845 | 5847 |
5846 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5848 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
5847 Register object, | 5849 Register object, |
5848 Register index) { | 5850 Register index) { |
5849 PushSafepointRegistersScope scope(this); | 5851 PushSafepointRegistersScope scope(this); |
5850 __ Push(object); | 5852 __ Push(object); |
5851 __ Push(index); | 5853 __ Push(index); |
5852 __ xorp(rsi, rsi); | 5854 __ xorp(rsi, rsi); |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5928 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5930 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
5929 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5931 RecordSafepoint(Safepoint::kNoLazyDeopt); |
5930 } | 5932 } |
5931 | 5933 |
5932 | 5934 |
5933 #undef __ | 5935 #undef __ |
5934 | 5936 |
5935 } } // namespace v8::internal | 5937 } } // namespace v8::internal |
5936 | 5938 |
5937 #endif // V8_TARGET_ARCH_X64 | 5939 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |