| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
| 8 #include "src/code-stubs.h" | 8 #include "src/code-stubs.h" |
| 9 #include "src/hydrogen-osr.h" | 9 #include "src/hydrogen-osr.h" |
| 10 #include "src/ic/ic.h" | 10 #include "src/ic/ic.h" |
| (...skipping 748 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 759 environment->Register(deoptimization_index, | 759 environment->Register(deoptimization_index, |
| 760 translation.index(), | 760 translation.index(), |
| 761 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 761 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 762 deoptimizations_.Add(environment, zone()); | 762 deoptimizations_.Add(environment, zone()); |
| 763 } | 763 } |
| 764 } | 764 } |
| 765 | 765 |
| 766 | 766 |
| 767 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 767 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 768 Deoptimizer::BailoutType bailout_type, | 768 Deoptimizer::BailoutType bailout_type, |
| 769 Register src1, const Operand& src2, | 769 const char* detail, Register src1, |
| 770 const char* detail) { | 770 const Operand& src2) { |
| 771 LEnvironment* environment = instr->environment(); | 771 LEnvironment* environment = instr->environment(); |
| 772 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 772 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 773 DCHECK(environment->HasBeenRegistered()); | 773 DCHECK(environment->HasBeenRegistered()); |
| 774 int id = environment->deoptimization_index(); | 774 int id = environment->deoptimization_index(); |
| 775 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 775 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 776 Address entry = | 776 Address entry = |
| 777 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 777 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 778 if (entry == NULL) { | 778 if (entry == NULL) { |
| 779 Abort(kBailoutWasNotPrepared); | 779 Abort(kBailoutWasNotPrepared); |
| 780 return; | 780 return; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 825 if (jump_table_.is_empty() || | 825 if (jump_table_.is_empty() || |
| 826 !table_entry.IsEquivalentTo(jump_table_.last())) { | 826 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 827 jump_table_.Add(table_entry, zone()); | 827 jump_table_.Add(table_entry, zone()); |
| 828 } | 828 } |
| 829 __ Branch(&jump_table_.last().label, condition, src1, src2); | 829 __ Branch(&jump_table_.last().label, condition, src1, src2); |
| 830 } | 830 } |
| 831 } | 831 } |
| 832 | 832 |
| 833 | 833 |
| 834 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 834 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 835 Register src1, const Operand& src2, | 835 const char* detail, Register src1, |
| 836 const char* detail) { | 836 const Operand& src2) { |
| 837 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 837 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 838 ? Deoptimizer::LAZY | 838 ? Deoptimizer::LAZY |
| 839 : Deoptimizer::EAGER; | 839 : Deoptimizer::EAGER; |
| 840 DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail); | 840 DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2); |
| 841 } | 841 } |
| 842 | 842 |
| 843 | 843 |
| 844 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 844 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 845 int length = deoptimizations_.length(); | 845 int length = deoptimizations_.length(); |
| 846 if (length == 0) return; | 846 if (length == 0) return; |
| 847 Handle<DeoptimizationInputData> data = | 847 Handle<DeoptimizationInputData> data = |
| 848 DeoptimizationInputData::New(isolate(), length, TENURED); | 848 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 849 | 849 |
| 850 Handle<ByteArray> translations = | 850 Handle<ByteArray> translations = |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 HMod* hmod = instr->hydrogen(); | 1060 HMod* hmod = instr->hydrogen(); |
| 1061 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1061 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1062 Label dividend_is_not_negative, done; | 1062 Label dividend_is_not_negative, done; |
| 1063 | 1063 |
| 1064 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1064 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1065 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); | 1065 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); |
| 1066 // Note: The code below even works when right contains kMinInt. | 1066 // Note: The code below even works when right contains kMinInt. |
| 1067 __ dsubu(dividend, zero_reg, dividend); | 1067 __ dsubu(dividend, zero_reg, dividend); |
| 1068 __ And(dividend, dividend, Operand(mask)); | 1068 __ And(dividend, dividend, Operand(mask)); |
| 1069 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1069 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1070 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); | 1070 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1071 } | 1071 } |
| 1072 __ Branch(USE_DELAY_SLOT, &done); | 1072 __ Branch(USE_DELAY_SLOT, &done); |
| 1073 __ dsubu(dividend, zero_reg, dividend); | 1073 __ dsubu(dividend, zero_reg, dividend); |
| 1074 } | 1074 } |
| 1075 | 1075 |
| 1076 __ bind(÷nd_is_not_negative); | 1076 __ bind(÷nd_is_not_negative); |
| 1077 __ And(dividend, dividend, Operand(mask)); | 1077 __ And(dividend, dividend, Operand(mask)); |
| 1078 __ bind(&done); | 1078 __ bind(&done); |
| 1079 } | 1079 } |
| 1080 | 1080 |
| 1081 | 1081 |
| 1082 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1082 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1083 Register dividend = ToRegister(instr->dividend()); | 1083 Register dividend = ToRegister(instr->dividend()); |
| 1084 int32_t divisor = instr->divisor(); | 1084 int32_t divisor = instr->divisor(); |
| 1085 Register result = ToRegister(instr->result()); | 1085 Register result = ToRegister(instr->result()); |
| 1086 DCHECK(!dividend.is(result)); | 1086 DCHECK(!dividend.is(result)); |
| 1087 | 1087 |
| 1088 if (divisor == 0) { | 1088 if (divisor == 0) { |
| 1089 DeoptimizeIf(al, instr); | 1089 DeoptimizeIf(al, instr, "division by zero"); |
| 1090 return; | 1090 return; |
| 1091 } | 1091 } |
| 1092 | 1092 |
| 1093 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1093 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1094 __ Dmul(result, result, Operand(Abs(divisor))); | 1094 __ Dmul(result, result, Operand(Abs(divisor))); |
| 1095 __ Dsubu(result, dividend, Operand(result)); | 1095 __ Dsubu(result, dividend, Operand(result)); |
| 1096 | 1096 |
| 1097 // Check for negative zero. | 1097 // Check for negative zero. |
| 1098 HMod* hmod = instr->hydrogen(); | 1098 HMod* hmod = instr->hydrogen(); |
| 1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1099 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1100 Label remainder_not_zero; | 1100 Label remainder_not_zero; |
| 1101 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); | 1101 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); |
| 1102 DeoptimizeIf(lt, instr, dividend, Operand(zero_reg)); | 1102 DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1103 __ bind(&remainder_not_zero); | 1103 __ bind(&remainder_not_zero); |
| 1104 } | 1104 } |
| 1105 } | 1105 } |
| 1106 | 1106 |
| 1107 | 1107 |
| 1108 void LCodeGen::DoModI(LModI* instr) { | 1108 void LCodeGen::DoModI(LModI* instr) { |
| 1109 HMod* hmod = instr->hydrogen(); | 1109 HMod* hmod = instr->hydrogen(); |
| 1110 const Register left_reg = ToRegister(instr->left()); | 1110 const Register left_reg = ToRegister(instr->left()); |
| 1111 const Register right_reg = ToRegister(instr->right()); | 1111 const Register right_reg = ToRegister(instr->right()); |
| 1112 const Register result_reg = ToRegister(instr->result()); | 1112 const Register result_reg = ToRegister(instr->result()); |
| 1113 | 1113 |
| 1114 // div runs in the background while we check for special cases. | 1114 // div runs in the background while we check for special cases. |
| 1115 __ Dmod(result_reg, left_reg, right_reg); | 1115 __ Dmod(result_reg, left_reg, right_reg); |
| 1116 | 1116 |
| 1117 Label done; | 1117 Label done; |
| 1118 // Check for x % 0, we have to deopt in this case because we can't return a | 1118 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1119 // NaN. | 1119 // NaN. |
| 1120 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1120 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1121 DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg)); | 1121 DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg)); |
| 1122 } | 1122 } |
| 1123 | 1123 |
| 1124 // Check for kMinInt % -1, div will return kMinInt, which is not what we | 1124 // Check for kMinInt % -1, div will return kMinInt, which is not what we |
| 1125 // want. We have to deopt if we care about -0, because we can't return that. | 1125 // want. We have to deopt if we care about -0, because we can't return that. |
| 1126 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1126 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1127 Label no_overflow_possible; | 1127 Label no_overflow_possible; |
| 1128 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); | 1128 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); |
| 1129 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1129 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1130 DeoptimizeIf(eq, instr, right_reg, Operand(-1)); | 1130 DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1)); |
| 1131 } else { | 1131 } else { |
| 1132 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); | 1132 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); |
| 1133 __ Branch(USE_DELAY_SLOT, &done); | 1133 __ Branch(USE_DELAY_SLOT, &done); |
| 1134 __ mov(result_reg, zero_reg); | 1134 __ mov(result_reg, zero_reg); |
| 1135 } | 1135 } |
| 1136 __ bind(&no_overflow_possible); | 1136 __ bind(&no_overflow_possible); |
| 1137 } | 1137 } |
| 1138 | 1138 |
| 1139 // If we care about -0, test if the dividend is <0 and the result is 0. | 1139 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1140 __ Branch(&done, ge, left_reg, Operand(zero_reg)); | 1140 __ Branch(&done, ge, left_reg, Operand(zero_reg)); |
| 1141 | 1141 |
| 1142 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1142 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1143 DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg)); | 1143 DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg)); |
| 1144 } | 1144 } |
| 1145 __ bind(&done); | 1145 __ bind(&done); |
| 1146 } | 1146 } |
| 1147 | 1147 |
| 1148 | 1148 |
| 1149 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1149 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1150 Register dividend = ToRegister(instr->dividend()); | 1150 Register dividend = ToRegister(instr->dividend()); |
| 1151 int32_t divisor = instr->divisor(); | 1151 int32_t divisor = instr->divisor(); |
| 1152 Register result = ToRegister(instr->result()); | 1152 Register result = ToRegister(instr->result()); |
| 1153 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1153 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1154 DCHECK(!result.is(dividend)); | 1154 DCHECK(!result.is(dividend)); |
| 1155 | 1155 |
| 1156 // Check for (0 / -x) that will produce negative zero. | 1156 // Check for (0 / -x) that will produce negative zero. |
| 1157 HDiv* hdiv = instr->hydrogen(); | 1157 HDiv* hdiv = instr->hydrogen(); |
| 1158 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1158 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1159 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); | 1159 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1160 } | 1160 } |
| 1161 // Check for (kMinInt / -1). | 1161 // Check for (kMinInt / -1). |
| 1162 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1162 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1163 DeoptimizeIf(eq, instr, dividend, Operand(kMinInt)); | 1163 DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt)); |
| 1164 } | 1164 } |
| 1165 // Deoptimize if remainder will not be 0. | 1165 // Deoptimize if remainder will not be 0. |
| 1166 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1166 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1167 divisor != 1 && divisor != -1) { | 1167 divisor != 1 && divisor != -1) { |
| 1168 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1168 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1169 __ And(at, dividend, Operand(mask)); | 1169 __ And(at, dividend, Operand(mask)); |
| 1170 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); | 1170 DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg)); |
| 1171 } | 1171 } |
| 1172 | 1172 |
| 1173 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1173 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1174 __ Dsubu(result, zero_reg, dividend); | 1174 __ Dsubu(result, zero_reg, dividend); |
| 1175 return; | 1175 return; |
| 1176 } | 1176 } |
| 1177 uint16_t shift = WhichPowerOf2Abs(divisor); | 1177 uint16_t shift = WhichPowerOf2Abs(divisor); |
| 1178 if (shift == 0) { | 1178 if (shift == 0) { |
| 1179 __ Move(result, dividend); | 1179 __ Move(result, dividend); |
| 1180 } else if (shift == 1) { | 1180 } else if (shift == 1) { |
| 1181 __ dsrl32(result, dividend, 31); | 1181 __ dsrl32(result, dividend, 31); |
| 1182 __ Daddu(result, dividend, Operand(result)); | 1182 __ Daddu(result, dividend, Operand(result)); |
| 1183 } else { | 1183 } else { |
| 1184 __ dsra32(result, dividend, 31); | 1184 __ dsra32(result, dividend, 31); |
| 1185 __ dsrl32(result, result, 32 - shift); | 1185 __ dsrl32(result, result, 32 - shift); |
| 1186 __ Daddu(result, dividend, Operand(result)); | 1186 __ Daddu(result, dividend, Operand(result)); |
| 1187 } | 1187 } |
| 1188 if (shift > 0) __ dsra(result, result, shift); | 1188 if (shift > 0) __ dsra(result, result, shift); |
| 1189 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1189 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1190 } | 1190 } |
| 1191 | 1191 |
| 1192 | 1192 |
| 1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1193 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1194 Register dividend = ToRegister(instr->dividend()); | 1194 Register dividend = ToRegister(instr->dividend()); |
| 1195 int32_t divisor = instr->divisor(); | 1195 int32_t divisor = instr->divisor(); |
| 1196 Register result = ToRegister(instr->result()); | 1196 Register result = ToRegister(instr->result()); |
| 1197 DCHECK(!dividend.is(result)); | 1197 DCHECK(!dividend.is(result)); |
| 1198 | 1198 |
| 1199 if (divisor == 0) { | 1199 if (divisor == 0) { |
| 1200 DeoptimizeIf(al, instr); | 1200 DeoptimizeIf(al, instr, "division by zero"); |
| 1201 return; | 1201 return; |
| 1202 } | 1202 } |
| 1203 | 1203 |
| 1204 // Check for (0 / -x) that will produce negative zero. | 1204 // Check for (0 / -x) that will produce negative zero. |
| 1205 HDiv* hdiv = instr->hydrogen(); | 1205 HDiv* hdiv = instr->hydrogen(); |
| 1206 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1206 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1207 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); | 1207 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1208 } | 1208 } |
| 1209 | 1209 |
| 1210 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1210 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1211 if (divisor < 0) __ Subu(result, zero_reg, result); | 1211 if (divisor < 0) __ Subu(result, zero_reg, result); |
| 1212 | 1212 |
| 1213 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1213 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1214 __ Dmul(scratch0(), result, Operand(divisor)); | 1214 __ Dmul(scratch0(), result, Operand(divisor)); |
| 1215 __ Dsubu(scratch0(), scratch0(), dividend); | 1215 __ Dsubu(scratch0(), scratch0(), dividend); |
| 1216 DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg)); | 1216 DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg)); |
| 1217 } | 1217 } |
| 1218 } | 1218 } |
| 1219 | 1219 |
| 1220 | 1220 |
| 1221 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1221 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1222 void LCodeGen::DoDivI(LDivI* instr) { | 1222 void LCodeGen::DoDivI(LDivI* instr) { |
| 1223 HBinaryOperation* hdiv = instr->hydrogen(); | 1223 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1224 Register dividend = ToRegister(instr->dividend()); | 1224 Register dividend = ToRegister(instr->dividend()); |
| 1225 Register divisor = ToRegister(instr->divisor()); | 1225 Register divisor = ToRegister(instr->divisor()); |
| 1226 const Register result = ToRegister(instr->result()); | 1226 const Register result = ToRegister(instr->result()); |
| 1227 | 1227 |
| 1228 // On MIPS div is asynchronous - it will run in the background while we | 1228 // On MIPS div is asynchronous - it will run in the background while we |
| 1229 // check for special cases. | 1229 // check for special cases. |
| 1230 __ Ddiv(result, dividend, divisor); | 1230 __ Ddiv(result, dividend, divisor); |
| 1231 | 1231 |
| 1232 // Check for x / 0. | 1232 // Check for x / 0. |
| 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1234 DeoptimizeIf(eq, instr, divisor, Operand(zero_reg)); | 1234 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1235 } | 1235 } |
| 1236 | 1236 |
| 1237 // Check for (0 / -x) that will produce negative zero. | 1237 // Check for (0 / -x) that will produce negative zero. |
| 1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1239 Label left_not_zero; | 1239 Label left_not_zero; |
| 1240 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1240 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1241 DeoptimizeIf(lt, instr, divisor, Operand(zero_reg)); | 1241 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1242 __ bind(&left_not_zero); | 1242 __ bind(&left_not_zero); |
| 1243 } | 1243 } |
| 1244 | 1244 |
| 1245 // Check for (kMinInt / -1). | 1245 // Check for (kMinInt / -1). |
| 1246 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1246 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1247 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1247 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1248 Label left_not_min_int; | 1248 Label left_not_min_int; |
| 1249 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1249 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1250 DeoptimizeIf(eq, instr, divisor, Operand(-1)); | 1250 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1251 __ bind(&left_not_min_int); | 1251 __ bind(&left_not_min_int); |
| 1252 } | 1252 } |
| 1253 | 1253 |
| 1254 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1254 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1255 // Calculate remainder. | 1255 // Calculate remainder. |
| 1256 Register remainder = ToRegister(instr->temp()); | 1256 Register remainder = ToRegister(instr->temp()); |
| 1257 if (kArchVariant != kMips64r6) { | 1257 if (kArchVariant != kMips64r6) { |
| 1258 __ mfhi(remainder); | 1258 __ mfhi(remainder); |
| 1259 } else { | 1259 } else { |
| 1260 __ dmod(remainder, dividend, divisor); | 1260 __ dmod(remainder, dividend, divisor); |
| 1261 } | 1261 } |
| 1262 DeoptimizeIf(ne, instr, remainder, Operand(zero_reg)); | 1262 DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg)); |
| 1263 } | 1263 } |
| 1264 } | 1264 } |
| 1265 | 1265 |
| 1266 | 1266 |
| 1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1268 DoubleRegister addend = ToDoubleRegister(instr->addend()); | 1268 DoubleRegister addend = ToDoubleRegister(instr->addend()); |
| 1269 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1269 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1270 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1270 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1271 | 1271 |
| 1272 // This is computed in-place. | 1272 // This is computed in-place. |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1297 return; | 1297 return; |
| 1298 } | 1298 } |
| 1299 | 1299 |
| 1300 // If the divisor is negative, we have to negate and handle edge cases. | 1300 // If the divisor is negative, we have to negate and handle edge cases. |
| 1301 // Dividend can be the same register as result so save the value of it | 1301 // Dividend can be the same register as result so save the value of it |
| 1302 // for checking overflow. | 1302 // for checking overflow. |
| 1303 __ Move(scratch, dividend); | 1303 __ Move(scratch, dividend); |
| 1304 | 1304 |
| 1305 __ Dsubu(result, zero_reg, dividend); | 1305 __ Dsubu(result, zero_reg, dividend); |
| 1306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1307 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); | 1307 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1308 } | 1308 } |
| 1309 | 1309 |
| 1310 __ Xor(scratch, scratch, result); | 1310 __ Xor(scratch, scratch, result); |
| 1311 // Dividing by -1 is basically negation, unless we overflow. | 1311 // Dividing by -1 is basically negation, unless we overflow. |
| 1312 if (divisor == -1) { | 1312 if (divisor == -1) { |
| 1313 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1313 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1314 DeoptimizeIf(gt, instr, result, Operand(kMaxInt)); | 1314 DeoptimizeIf(gt, instr, "overflow", result, Operand(kMaxInt)); |
| 1315 } | 1315 } |
| 1316 return; | 1316 return; |
| 1317 } | 1317 } |
| 1318 | 1318 |
| 1319 // If the negation could not overflow, simply shifting is OK. | 1319 // If the negation could not overflow, simply shifting is OK. |
| 1320 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1320 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1321 __ dsra(result, result, shift); | 1321 __ dsra(result, result, shift); |
| 1322 return; | 1322 return; |
| 1323 } | 1323 } |
| 1324 | 1324 |
| 1325 Label no_overflow, done; | 1325 Label no_overflow, done; |
| 1326 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); | 1326 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); |
| 1327 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); | 1327 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); |
| 1328 __ Branch(&done); | 1328 __ Branch(&done); |
| 1329 __ bind(&no_overflow); | 1329 __ bind(&no_overflow); |
| 1330 __ dsra(result, result, shift); | 1330 __ dsra(result, result, shift); |
| 1331 __ bind(&done); | 1331 __ bind(&done); |
| 1332 } | 1332 } |
| 1333 | 1333 |
| 1334 | 1334 |
| 1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1336 Register dividend = ToRegister(instr->dividend()); | 1336 Register dividend = ToRegister(instr->dividend()); |
| 1337 int32_t divisor = instr->divisor(); | 1337 int32_t divisor = instr->divisor(); |
| 1338 Register result = ToRegister(instr->result()); | 1338 Register result = ToRegister(instr->result()); |
| 1339 DCHECK(!dividend.is(result)); | 1339 DCHECK(!dividend.is(result)); |
| 1340 | 1340 |
| 1341 if (divisor == 0) { | 1341 if (divisor == 0) { |
| 1342 DeoptimizeIf(al, instr); | 1342 DeoptimizeIf(al, instr, "division by zero"); |
| 1343 return; | 1343 return; |
| 1344 } | 1344 } |
| 1345 | 1345 |
| 1346 // Check for (0 / -x) that will produce negative zero. | 1346 // Check for (0 / -x) that will produce negative zero. |
| 1347 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1347 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1348 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1348 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1349 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); | 1349 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1350 } | 1350 } |
| 1351 | 1351 |
| 1352 // Easy case: We need no dynamic check for the dividend and the flooring | 1352 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1353 // division is the same as the truncating division. | 1353 // division is the same as the truncating division. |
| 1354 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1354 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1355 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1355 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1356 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1356 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1357 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1357 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1358 return; | 1358 return; |
| 1359 } | 1359 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1383 Register dividend = ToRegister(instr->dividend()); | 1383 Register dividend = ToRegister(instr->dividend()); |
| 1384 Register divisor = ToRegister(instr->divisor()); | 1384 Register divisor = ToRegister(instr->divisor()); |
| 1385 const Register result = ToRegister(instr->result()); | 1385 const Register result = ToRegister(instr->result()); |
| 1386 | 1386 |
| 1387 // On MIPS div is asynchronous - it will run in the background while we | 1387 // On MIPS div is asynchronous - it will run in the background while we |
| 1388 // check for special cases. | 1388 // check for special cases. |
| 1389 __ Ddiv(result, dividend, divisor); | 1389 __ Ddiv(result, dividend, divisor); |
| 1390 | 1390 |
| 1391 // Check for x / 0. | 1391 // Check for x / 0. |
| 1392 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1392 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1393 DeoptimizeIf(eq, instr, divisor, Operand(zero_reg)); | 1393 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1394 } | 1394 } |
| 1395 | 1395 |
| 1396 // Check for (0 / -x) that will produce negative zero. | 1396 // Check for (0 / -x) that will produce negative zero. |
| 1397 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1397 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1398 Label left_not_zero; | 1398 Label left_not_zero; |
| 1399 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1399 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1400 DeoptimizeIf(lt, instr, divisor, Operand(zero_reg)); | 1400 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1401 __ bind(&left_not_zero); | 1401 __ bind(&left_not_zero); |
| 1402 } | 1402 } |
| 1403 | 1403 |
| 1404 // Check for (kMinInt / -1). | 1404 // Check for (kMinInt / -1). |
| 1405 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1405 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1406 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1406 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1407 Label left_not_min_int; | 1407 Label left_not_min_int; |
| 1408 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1408 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1409 DeoptimizeIf(eq, instr, divisor, Operand(-1)); | 1409 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1410 __ bind(&left_not_min_int); | 1410 __ bind(&left_not_min_int); |
| 1411 } | 1411 } |
| 1412 | 1412 |
| 1413 // We performed a truncating division. Correct the result if necessary. | 1413 // We performed a truncating division. Correct the result if necessary. |
| 1414 Label done; | 1414 Label done; |
| 1415 Register remainder = scratch0(); | 1415 Register remainder = scratch0(); |
| 1416 if (kArchVariant != kMips64r6) { | 1416 if (kArchVariant != kMips64r6) { |
| 1417 __ mfhi(remainder); | 1417 __ mfhi(remainder); |
| 1418 } else { | 1418 } else { |
| 1419 __ dmod(remainder, dividend, divisor); | 1419 __ dmod(remainder, dividend, divisor); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1436 bool bailout_on_minus_zero = | 1436 bool bailout_on_minus_zero = |
| 1437 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1437 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1438 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1438 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1439 | 1439 |
| 1440 if (right_op->IsConstantOperand()) { | 1440 if (right_op->IsConstantOperand()) { |
| 1441 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1441 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1442 | 1442 |
| 1443 if (bailout_on_minus_zero && (constant < 0)) { | 1443 if (bailout_on_minus_zero && (constant < 0)) { |
| 1444 // The case of a null constant will be handled separately. | 1444 // The case of a null constant will be handled separately. |
| 1445 // If constant is negative and left is null, the result should be -0. | 1445 // If constant is negative and left is null, the result should be -0. |
| 1446 DeoptimizeIf(eq, instr, left, Operand(zero_reg)); | 1446 DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg)); |
| 1447 } | 1447 } |
| 1448 | 1448 |
| 1449 switch (constant) { | 1449 switch (constant) { |
| 1450 case -1: | 1450 case -1: |
| 1451 if (overflow) { | 1451 if (overflow) { |
| 1452 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); | 1452 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); |
| 1453 DeoptimizeIf(gt, instr, scratch, Operand(kMaxInt)); | 1453 DeoptimizeIf(gt, instr, "overflow", scratch, Operand(kMaxInt)); |
| 1454 } else { | 1454 } else { |
| 1455 __ Dsubu(result, zero_reg, left); | 1455 __ Dsubu(result, zero_reg, left); |
| 1456 } | 1456 } |
| 1457 break; | 1457 break; |
| 1458 case 0: | 1458 case 0: |
| 1459 if (bailout_on_minus_zero) { | 1459 if (bailout_on_minus_zero) { |
| 1460 // If left is strictly negative and the constant is null, the | 1460 // If left is strictly negative and the constant is null, the |
| 1461 // result is -0. Deoptimize if required, otherwise return 0. | 1461 // result is -0. Deoptimize if required, otherwise return 0. |
| 1462 DeoptimizeIf(lt, instr, left, Operand(zero_reg)); | 1462 DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg)); |
| 1463 } | 1463 } |
| 1464 __ mov(result, zero_reg); | 1464 __ mov(result, zero_reg); |
| 1465 break; | 1465 break; |
| 1466 case 1: | 1466 case 1: |
| 1467 // Nothing to do. | 1467 // Nothing to do. |
| 1468 __ Move(result, left); | 1468 __ Move(result, left); |
| 1469 break; | 1469 break; |
| 1470 default: | 1470 default: |
| 1471 // Multiplying by powers of two and powers of two plus or minus | 1471 // Multiplying by powers of two and powers of two plus or minus |
| 1472 // one can be done faster with shifted operands. | 1472 // one can be done faster with shifted operands. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1507 if (instr->hydrogen()->representation().IsSmi()) { | 1507 if (instr->hydrogen()->representation().IsSmi()) { |
| 1508 __ Dmulh(result, left, right); | 1508 __ Dmulh(result, left, right); |
| 1509 } else { | 1509 } else { |
| 1510 __ Dmul(result, left, right); | 1510 __ Dmul(result, left, right); |
| 1511 } | 1511 } |
| 1512 __ dsra32(scratch, result, 0); | 1512 __ dsra32(scratch, result, 0); |
| 1513 __ sra(at, result, 31); | 1513 __ sra(at, result, 31); |
| 1514 if (instr->hydrogen()->representation().IsSmi()) { | 1514 if (instr->hydrogen()->representation().IsSmi()) { |
| 1515 __ SmiTag(result); | 1515 __ SmiTag(result); |
| 1516 } | 1516 } |
| 1517 DeoptimizeIf(ne, instr, scratch, Operand(at)); | 1517 DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at)); |
| 1518 } else { | 1518 } else { |
| 1519 if (instr->hydrogen()->representation().IsSmi()) { | 1519 if (instr->hydrogen()->representation().IsSmi()) { |
| 1520 __ SmiUntag(result, left); | 1520 __ SmiUntag(result, left); |
| 1521 __ Dmul(result, result, right); | 1521 __ Dmul(result, result, right); |
| 1522 } else { | 1522 } else { |
| 1523 __ Dmul(result, left, right); | 1523 __ Dmul(result, left, right); |
| 1524 } | 1524 } |
| 1525 } | 1525 } |
| 1526 | 1526 |
| 1527 if (bailout_on_minus_zero) { | 1527 if (bailout_on_minus_zero) { |
| 1528 Label done; | 1528 Label done; |
| 1529 __ Xor(at, left, right); | 1529 __ Xor(at, left, right); |
| 1530 __ Branch(&done, ge, at, Operand(zero_reg)); | 1530 __ Branch(&done, ge, at, Operand(zero_reg)); |
| 1531 // Bail out if the result is minus zero. | 1531 // Bail out if the result is minus zero. |
| 1532 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); | 1532 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1533 __ bind(&done); | 1533 __ bind(&done); |
| 1534 } | 1534 } |
| 1535 } | 1535 } |
| 1536 } | 1536 } |
| 1537 | 1537 |
| 1538 | 1538 |
| 1539 void LCodeGen::DoBitI(LBitI* instr) { | 1539 void LCodeGen::DoBitI(LBitI* instr) { |
| 1540 LOperand* left_op = instr->left(); | 1540 LOperand* left_op = instr->left(); |
| 1541 LOperand* right_op = instr->right(); | 1541 LOperand* right_op = instr->right(); |
| 1542 DCHECK(left_op->IsRegister()); | 1542 DCHECK(left_op->IsRegister()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1586 case Token::ROR: | 1586 case Token::ROR: |
| 1587 __ Ror(result, left, Operand(ToRegister(right_op))); | 1587 __ Ror(result, left, Operand(ToRegister(right_op))); |
| 1588 break; | 1588 break; |
| 1589 case Token::SAR: | 1589 case Token::SAR: |
| 1590 __ srav(result, left, ToRegister(right_op)); | 1590 __ srav(result, left, ToRegister(right_op)); |
| 1591 break; | 1591 break; |
| 1592 case Token::SHR: | 1592 case Token::SHR: |
| 1593 __ srlv(result, left, ToRegister(right_op)); | 1593 __ srlv(result, left, ToRegister(right_op)); |
| 1594 if (instr->can_deopt()) { | 1594 if (instr->can_deopt()) { |
| 1595 // TODO(yy): (-1) >>> 0. anything else? | 1595 // TODO(yy): (-1) >>> 0. anything else? |
| 1596 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); | 1596 DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg)); |
| 1597 DeoptimizeIf(gt, instr, result, Operand(kMaxInt)); | 1597 DeoptimizeIf(gt, instr, "negative value", result, Operand(kMaxInt)); |
| 1598 } | 1598 } |
| 1599 break; | 1599 break; |
| 1600 case Token::SHL: | 1600 case Token::SHL: |
| 1601 __ sllv(result, left, ToRegister(right_op)); | 1601 __ sllv(result, left, ToRegister(right_op)); |
| 1602 break; | 1602 break; |
| 1603 default: | 1603 default: |
| 1604 UNREACHABLE(); | 1604 UNREACHABLE(); |
| 1605 break; | 1605 break; |
| 1606 } | 1606 } |
| 1607 } else { | 1607 } else { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1622 } else { | 1622 } else { |
| 1623 __ Move(result, left); | 1623 __ Move(result, left); |
| 1624 } | 1624 } |
| 1625 break; | 1625 break; |
| 1626 case Token::SHR: | 1626 case Token::SHR: |
| 1627 if (shift_count != 0) { | 1627 if (shift_count != 0) { |
| 1628 __ srl(result, left, shift_count); | 1628 __ srl(result, left, shift_count); |
| 1629 } else { | 1629 } else { |
| 1630 if (instr->can_deopt()) { | 1630 if (instr->can_deopt()) { |
| 1631 __ And(at, left, Operand(0x80000000)); | 1631 __ And(at, left, Operand(0x80000000)); |
| 1632 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); | 1632 DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg)); |
| 1633 } | 1633 } |
| 1634 __ Move(result, left); | 1634 __ Move(result, left); |
| 1635 } | 1635 } |
| 1636 break; | 1636 break; |
| 1637 case Token::SHL: | 1637 case Token::SHL: |
| 1638 if (shift_count != 0) { | 1638 if (shift_count != 0) { |
| 1639 if (instr->hydrogen_value()->representation().IsSmi()) { | 1639 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1640 __ dsll(result, left, shift_count); | 1640 __ dsll(result, left, shift_count); |
| 1641 } else { | 1641 } else { |
| 1642 __ sll(result, left, shift_count); | 1642 __ sll(result, left, shift_count); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1678 overflow); // Reg at also used as scratch. | 1678 overflow); // Reg at also used as scratch. |
| 1679 } else { | 1679 } else { |
| 1680 DCHECK(right->IsRegister()); | 1680 DCHECK(right->IsRegister()); |
| 1681 // Due to overflow check macros not supporting constant operands, | 1681 // Due to overflow check macros not supporting constant operands, |
| 1682 // handling the IsConstantOperand case was moved to prev if clause. | 1682 // handling the IsConstantOperand case was moved to prev if clause. |
| 1683 __ SubuAndCheckForOverflow(ToRegister(result), | 1683 __ SubuAndCheckForOverflow(ToRegister(result), |
| 1684 ToRegister(left), | 1684 ToRegister(left), |
| 1685 ToRegister(right), | 1685 ToRegister(right), |
| 1686 overflow); // Reg at also used as scratch. | 1686 overflow); // Reg at also used as scratch. |
| 1687 } | 1687 } |
| 1688 DeoptimizeIf(lt, instr, overflow, Operand(zero_reg)); | 1688 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1689 if (!instr->hydrogen()->representation().IsSmi()) { | 1689 if (!instr->hydrogen()->representation().IsSmi()) { |
| 1690 DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt)); | 1690 DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt)); |
| 1691 DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt)); | 1691 DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt)); |
| 1692 } | 1692 } |
| 1693 } | 1693 } |
| 1694 } | 1694 } |
| 1695 | 1695 |
| 1696 | 1696 |
| 1697 void LCodeGen::DoConstantI(LConstantI* instr) { | 1697 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1698 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1698 __ li(ToRegister(instr->result()), Operand(instr->value())); |
| 1699 } | 1699 } |
| 1700 | 1700 |
| 1701 | 1701 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1736 Register result = ToRegister(instr->result()); | 1736 Register result = ToRegister(instr->result()); |
| 1737 Register scratch = ToRegister(instr->temp()); | 1737 Register scratch = ToRegister(instr->temp()); |
| 1738 Smi* index = instr->index(); | 1738 Smi* index = instr->index(); |
| 1739 Label runtime, done; | 1739 Label runtime, done; |
| 1740 DCHECK(object.is(a0)); | 1740 DCHECK(object.is(a0)); |
| 1741 DCHECK(result.is(v0)); | 1741 DCHECK(result.is(v0)); |
| 1742 DCHECK(!scratch.is(scratch0())); | 1742 DCHECK(!scratch.is(scratch0())); |
| 1743 DCHECK(!scratch.is(object)); | 1743 DCHECK(!scratch.is(object)); |
| 1744 | 1744 |
| 1745 __ SmiTst(object, at); | 1745 __ SmiTst(object, at); |
| 1746 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); | 1746 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 1747 __ GetObjectType(object, scratch, scratch); | 1747 __ GetObjectType(object, scratch, scratch); |
| 1748 DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE)); | 1748 DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE)); |
| 1749 | 1749 |
| 1750 if (index->value() == 0) { | 1750 if (index->value() == 0) { |
| 1751 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1751 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 1752 } else { | 1752 } else { |
| 1753 if (index->value() < JSDate::kFirstUncachedField) { | 1753 if (index->value() < JSDate::kFirstUncachedField) { |
| 1754 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1754 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1755 __ li(scratch, Operand(stamp)); | 1755 __ li(scratch, Operand(stamp)); |
| 1756 __ ld(scratch, MemOperand(scratch)); | 1756 __ ld(scratch, MemOperand(scratch)); |
| 1757 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1757 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 1758 __ Branch(&runtime, ne, scratch, Operand(scratch0())); | 1758 __ Branch(&runtime, ne, scratch, Operand(scratch0())); |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1873 overflow); // Reg at also used as scratch. | 1873 overflow); // Reg at also used as scratch. |
| 1874 } else { | 1874 } else { |
| 1875 DCHECK(right->IsRegister()); | 1875 DCHECK(right->IsRegister()); |
| 1876 // Due to overflow check macros not supporting constant operands, | 1876 // Due to overflow check macros not supporting constant operands, |
| 1877 // handling the IsConstantOperand case was moved to prev if clause. | 1877 // handling the IsConstantOperand case was moved to prev if clause. |
| 1878 __ AdduAndCheckForOverflow(ToRegister(result), | 1878 __ AdduAndCheckForOverflow(ToRegister(result), |
| 1879 ToRegister(left), | 1879 ToRegister(left), |
| 1880 ToRegister(right), | 1880 ToRegister(right), |
| 1881 overflow); // Reg at also used as scratch. | 1881 overflow); // Reg at also used as scratch. |
| 1882 } | 1882 } |
| 1883 DeoptimizeIf(lt, instr, overflow, Operand(zero_reg)); | 1883 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1884 // if not smi, it must int32. | 1884 // if not smi, it must int32. |
| 1885 if (!instr->hydrogen()->representation().IsSmi()) { | 1885 if (!instr->hydrogen()->representation().IsSmi()) { |
| 1886 DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt)); | 1886 DeoptimizeIf(gt, instr, "overflow", ToRegister(result), Operand(kMaxInt)); |
| 1887 DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt)); | 1887 DeoptimizeIf(lt, instr, "overflow", ToRegister(result), Operand(kMinInt)); |
| 1888 } | 1888 } |
| 1889 } | 1889 } |
| 1890 } | 1890 } |
| 1891 | 1891 |
| 1892 | 1892 |
| 1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1893 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1894 LOperand* left = instr->left(); | 1894 LOperand* left = instr->left(); |
| 1895 LOperand* right = instr->right(); | 1895 LOperand* right = instr->right(); |
| 1896 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1896 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1897 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 1897 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| (...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2139 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | 2139 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); |
| 2140 } | 2140 } |
| 2141 | 2141 |
| 2142 if (expected.Contains(ToBooleanStub::SMI)) { | 2142 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2143 // Smis: 0 -> false, all other -> true. | 2143 // Smis: 0 -> false, all other -> true. |
| 2144 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); | 2144 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); |
| 2145 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2145 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2146 } else if (expected.NeedsMap()) { | 2146 } else if (expected.NeedsMap()) { |
| 2147 // If we need a map later and have a Smi -> deopt. | 2147 // If we need a map later and have a Smi -> deopt. |
| 2148 __ SmiTst(reg, at); | 2148 __ SmiTst(reg, at); |
| 2149 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); | 2149 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 2150 } | 2150 } |
| 2151 | 2151 |
| 2152 const Register map = scratch0(); | 2152 const Register map = scratch0(); |
| 2153 if (expected.NeedsMap()) { | 2153 if (expected.NeedsMap()) { |
| 2154 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2154 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2155 if (expected.CanBeUndetectable()) { | 2155 if (expected.CanBeUndetectable()) { |
| 2156 // Undetectable -> false. | 2156 // Undetectable -> false. |
| 2157 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 2157 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2158 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 2158 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 2159 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); | 2159 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2195 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | 2195 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
| 2196 ne, dbl_scratch, kDoubleRegZero); | 2196 ne, dbl_scratch, kDoubleRegZero); |
| 2197 // Falls through if dbl_scratch == 0. | 2197 // Falls through if dbl_scratch == 0. |
| 2198 __ Branch(instr->FalseLabel(chunk_)); | 2198 __ Branch(instr->FalseLabel(chunk_)); |
| 2199 __ bind(¬_heap_number); | 2199 __ bind(¬_heap_number); |
| 2200 } | 2200 } |
| 2201 | 2201 |
| 2202 if (!expected.IsGeneric()) { | 2202 if (!expected.IsGeneric()) { |
| 2203 // We've seen something for the first time -> deopt. | 2203 // We've seen something for the first time -> deopt. |
| 2204 // This can only happen if we are not generic already. | 2204 // This can only happen if we are not generic already. |
| 2205 DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg)); | 2205 DeoptimizeIf(al, instr, "unexpected object", zero_reg, |
| 2206 Operand(zero_reg)); |
| 2206 } | 2207 } |
| 2207 } | 2208 } |
| 2208 } | 2209 } |
| 2209 } | 2210 } |
| 2210 | 2211 |
| 2211 | 2212 |
| 2212 void LCodeGen::EmitGoto(int block) { | 2213 void LCodeGen::EmitGoto(int block) { |
| 2213 if (!IsNextEmittedBlock(block)) { | 2214 if (!IsNextEmittedBlock(block)) { |
| 2214 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2215 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2215 } | 2216 } |
| (...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2841 } | 2842 } |
| 2842 } | 2843 } |
| 2843 | 2844 |
| 2844 | 2845 |
| 2845 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2846 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2846 Register result = ToRegister(instr->result()); | 2847 Register result = ToRegister(instr->result()); |
| 2847 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2848 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 2848 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); | 2849 __ ld(result, FieldMemOperand(at, Cell::kValueOffset)); |
| 2849 if (instr->hydrogen()->RequiresHoleCheck()) { | 2850 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2850 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2851 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2851 DeoptimizeIf(eq, instr, result, Operand(at)); | 2852 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2852 } | 2853 } |
| 2853 } | 2854 } |
| 2854 | 2855 |
| 2855 | 2856 |
| 2856 template <class T> | 2857 template <class T> |
| 2857 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2858 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 2858 DCHECK(FLAG_vector_ics); | 2859 DCHECK(FLAG_vector_ics); |
| 2859 Register vector = ToRegister(instr->temp_vector()); | 2860 Register vector = ToRegister(instr->temp_vector()); |
| 2860 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); | 2861 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); |
| 2861 __ li(vector, instr->hydrogen()->feedback_vector()); | 2862 __ li(vector, instr->hydrogen()->feedback_vector()); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 2891 | 2892 |
| 2892 // If the cell we are storing to contains the hole it could have | 2893 // If the cell we are storing to contains the hole it could have |
| 2893 // been deleted from the property dictionary. In that case, we need | 2894 // been deleted from the property dictionary. In that case, we need |
| 2894 // to update the property details in the property dictionary to mark | 2895 // to update the property details in the property dictionary to mark |
| 2895 // it as no longer deleted. | 2896 // it as no longer deleted. |
| 2896 if (instr->hydrogen()->RequiresHoleCheck()) { | 2897 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2897 // We use a temp to check the payload. | 2898 // We use a temp to check the payload. |
| 2898 Register payload = ToRegister(instr->temp()); | 2899 Register payload = ToRegister(instr->temp()); |
| 2899 __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 2900 __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2900 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2901 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2901 DeoptimizeIf(eq, instr, payload, Operand(at)); | 2902 DeoptimizeIf(eq, instr, "hole", payload, Operand(at)); |
| 2902 } | 2903 } |
| 2903 | 2904 |
| 2904 // Store the value. | 2905 // Store the value. |
| 2905 __ sd(value, FieldMemOperand(cell, Cell::kValueOffset)); | 2906 __ sd(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2906 // Cells are always rescanned, so no write barrier here. | 2907 // Cells are always rescanned, so no write barrier here. |
| 2907 } | 2908 } |
| 2908 | 2909 |
| 2909 | 2910 |
| 2910 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2911 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2911 Register context = ToRegister(instr->context()); | 2912 Register context = ToRegister(instr->context()); |
| 2912 Register result = ToRegister(instr->result()); | 2913 Register result = ToRegister(instr->result()); |
| 2913 | 2914 |
| 2914 __ ld(result, ContextOperand(context, instr->slot_index())); | 2915 __ ld(result, ContextOperand(context, instr->slot_index())); |
| 2915 if (instr->hydrogen()->RequiresHoleCheck()) { | 2916 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2916 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2917 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2917 | 2918 |
| 2918 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2919 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2919 DeoptimizeIf(eq, instr, result, Operand(at)); | 2920 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2920 } else { | 2921 } else { |
| 2921 Label is_not_hole; | 2922 Label is_not_hole; |
| 2922 __ Branch(&is_not_hole, ne, result, Operand(at)); | 2923 __ Branch(&is_not_hole, ne, result, Operand(at)); |
| 2923 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2924 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2924 __ bind(&is_not_hole); | 2925 __ bind(&is_not_hole); |
| 2925 } | 2926 } |
| 2926 } | 2927 } |
| 2927 } | 2928 } |
| 2928 | 2929 |
| 2929 | 2930 |
| 2930 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2931 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2931 Register context = ToRegister(instr->context()); | 2932 Register context = ToRegister(instr->context()); |
| 2932 Register value = ToRegister(instr->value()); | 2933 Register value = ToRegister(instr->value()); |
| 2933 Register scratch = scratch0(); | 2934 Register scratch = scratch0(); |
| 2934 MemOperand target = ContextOperand(context, instr->slot_index()); | 2935 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 2935 | 2936 |
| 2936 Label skip_assignment; | 2937 Label skip_assignment; |
| 2937 | 2938 |
| 2938 if (instr->hydrogen()->RequiresHoleCheck()) { | 2939 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2939 __ ld(scratch, target); | 2940 __ ld(scratch, target); |
| 2940 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2941 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2941 | 2942 |
| 2942 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2943 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2943 DeoptimizeIf(eq, instr, scratch, Operand(at)); | 2944 DeoptimizeIf(eq, instr, "hole", scratch, Operand(at)); |
| 2944 } else { | 2945 } else { |
| 2945 __ Branch(&skip_assignment, ne, scratch, Operand(at)); | 2946 __ Branch(&skip_assignment, ne, scratch, Operand(at)); |
| 2946 } | 2947 } |
| 2947 } | 2948 } |
| 2948 | 2949 |
| 2949 __ sd(value, target); | 2950 __ sd(value, target); |
| 2950 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2951 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2951 SmiCheck check_needed = | 2952 SmiCheck check_needed = |
| 2952 instr->hydrogen()->value()->type().IsHeapObject() | 2953 instr->hydrogen()->value()->type().IsHeapObject() |
| 2953 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2954 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3027 Register scratch = scratch0(); | 3028 Register scratch = scratch0(); |
| 3028 Register function = ToRegister(instr->function()); | 3029 Register function = ToRegister(instr->function()); |
| 3029 Register result = ToRegister(instr->result()); | 3030 Register result = ToRegister(instr->result()); |
| 3030 | 3031 |
| 3031 // Get the prototype or initial map from the function. | 3032 // Get the prototype or initial map from the function. |
| 3032 __ ld(result, | 3033 __ ld(result, |
| 3033 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3034 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3034 | 3035 |
| 3035 // Check that the function has a prototype or an initial map. | 3036 // Check that the function has a prototype or an initial map. |
| 3036 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 3037 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 3037 DeoptimizeIf(eq, instr, result, Operand(at)); | 3038 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 3038 | 3039 |
| 3039 // If the function does not have an initial map, we're done. | 3040 // If the function does not have an initial map, we're done. |
| 3040 Label done; | 3041 Label done; |
| 3041 __ GetObjectType(result, scratch, scratch); | 3042 __ GetObjectType(result, scratch, scratch); |
| 3042 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | 3043 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
| 3043 | 3044 |
| 3044 // Get the prototype from the initial map. | 3045 // Get the prototype from the initial map. |
| 3045 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3046 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3046 | 3047 |
| 3047 // All done. | 3048 // All done. |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3173 __ lhu(result, mem_operand); | 3174 __ lhu(result, mem_operand); |
| 3174 break; | 3175 break; |
| 3175 case EXTERNAL_INT32_ELEMENTS: | 3176 case EXTERNAL_INT32_ELEMENTS: |
| 3176 case INT32_ELEMENTS: | 3177 case INT32_ELEMENTS: |
| 3177 __ lw(result, mem_operand); | 3178 __ lw(result, mem_operand); |
| 3178 break; | 3179 break; |
| 3179 case EXTERNAL_UINT32_ELEMENTS: | 3180 case EXTERNAL_UINT32_ELEMENTS: |
| 3180 case UINT32_ELEMENTS: | 3181 case UINT32_ELEMENTS: |
| 3181 __ lw(result, mem_operand); | 3182 __ lw(result, mem_operand); |
| 3182 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3183 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3183 DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000)); | 3184 DeoptimizeIf(Ugreater_equal, instr, "negative value", result, |
| 3185 Operand(0x80000000)); |
| 3184 } | 3186 } |
| 3185 break; | 3187 break; |
| 3186 case FLOAT32_ELEMENTS: | 3188 case FLOAT32_ELEMENTS: |
| 3187 case FLOAT64_ELEMENTS: | 3189 case FLOAT64_ELEMENTS: |
| 3188 case EXTERNAL_FLOAT32_ELEMENTS: | 3190 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3189 case EXTERNAL_FLOAT64_ELEMENTS: | 3191 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3190 case FAST_DOUBLE_ELEMENTS: | 3192 case FAST_DOUBLE_ELEMENTS: |
| 3191 case FAST_ELEMENTS: | 3193 case FAST_ELEMENTS: |
| 3192 case FAST_SMI_ELEMENTS: | 3194 case FAST_SMI_ELEMENTS: |
| 3193 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3195 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3233 } else { | 3235 } else { |
| 3234 __ dsra(at, key, -shift_size); | 3236 __ dsra(at, key, -shift_size); |
| 3235 } | 3237 } |
| 3236 __ Daddu(scratch, scratch, at); | 3238 __ Daddu(scratch, scratch, at); |
| 3237 } | 3239 } |
| 3238 | 3240 |
| 3239 __ ldc1(result, MemOperand(scratch)); | 3241 __ ldc1(result, MemOperand(scratch)); |
| 3240 | 3242 |
| 3241 if (instr->hydrogen()->RequiresHoleCheck()) { | 3243 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3242 __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3244 __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
| 3243 DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32)); | 3245 DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32)); |
| 3244 } | 3246 } |
| 3245 } | 3247 } |
| 3246 | 3248 |
| 3247 | 3249 |
| 3248 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3250 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3249 HLoadKeyed* hinstr = instr->hydrogen(); | 3251 HLoadKeyed* hinstr = instr->hydrogen(); |
| 3250 Register elements = ToRegister(instr->elements()); | 3252 Register elements = ToRegister(instr->elements()); |
| 3251 Register result = ToRegister(instr->result()); | 3253 Register result = ToRegister(instr->result()); |
| 3252 Register scratch = scratch0(); | 3254 Register scratch = scratch0(); |
| 3253 Register store_base = scratch; | 3255 Register store_base = scratch; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3287 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 3289 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| 3288 offset += kPointerSize / 2; | 3290 offset += kPointerSize / 2; |
| 3289 } | 3291 } |
| 3290 | 3292 |
| 3291 __ Load(result, MemOperand(store_base, offset), representation); | 3293 __ Load(result, MemOperand(store_base, offset), representation); |
| 3292 | 3294 |
| 3293 // Check for the hole value. | 3295 // Check for the hole value. |
| 3294 if (hinstr->RequiresHoleCheck()) { | 3296 if (hinstr->RequiresHoleCheck()) { |
| 3295 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3297 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3296 __ SmiTst(result, scratch); | 3298 __ SmiTst(result, scratch); |
| 3297 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); | 3299 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 3298 } else { | 3300 } else { |
| 3299 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3301 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3300 DeoptimizeIf(eq, instr, result, Operand(scratch)); | 3302 DeoptimizeIf(eq, instr, "hole", result, Operand(scratch)); |
| 3301 } | 3303 } |
| 3302 } | 3304 } |
| 3303 } | 3305 } |
| 3304 | 3306 |
| 3305 | 3307 |
| 3306 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3308 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3307 if (instr->is_typed_elements()) { | 3309 if (instr->is_typed_elements()) { |
| 3308 DoLoadKeyedExternalArray(instr); | 3310 DoLoadKeyedExternalArray(instr); |
| 3309 } else if (instr->hydrogen()->representation().IsDouble()) { | 3311 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3310 DoLoadKeyedFixedDoubleArray(instr); | 3312 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3446 } | 3448 } |
| 3447 | 3449 |
| 3448 // Normal function. Replace undefined or null with global receiver. | 3450 // Normal function. Replace undefined or null with global receiver. |
| 3449 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3451 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3450 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3452 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3451 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3453 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3452 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3454 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3453 | 3455 |
| 3454 // Deoptimize if the receiver is not a JS object. | 3456 // Deoptimize if the receiver is not a JS object. |
| 3455 __ SmiTst(receiver, scratch); | 3457 __ SmiTst(receiver, scratch); |
| 3456 DeoptimizeIf(eq, instr, scratch, Operand(zero_reg)); | 3458 DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg)); |
| 3457 | 3459 |
| 3458 __ GetObjectType(receiver, scratch, scratch); | 3460 __ GetObjectType(receiver, scratch, scratch); |
| 3459 DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); | 3461 DeoptimizeIf(lt, instr, "not a JavaScript object", scratch, |
| 3462 Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 3460 __ Branch(&result_in_receiver); | 3463 __ Branch(&result_in_receiver); |
| 3461 | 3464 |
| 3462 __ bind(&global_object); | 3465 __ bind(&global_object); |
| 3463 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3466 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3464 __ ld(result, | 3467 __ ld(result, |
| 3465 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3468 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 3466 __ ld(result, | 3469 __ ld(result, |
| 3467 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3470 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 3468 | 3471 |
| 3469 if (result.is(receiver)) { | 3472 if (result.is(receiver)) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3484 Register length = ToRegister(instr->length()); | 3487 Register length = ToRegister(instr->length()); |
| 3485 Register elements = ToRegister(instr->elements()); | 3488 Register elements = ToRegister(instr->elements()); |
| 3486 Register scratch = scratch0(); | 3489 Register scratch = scratch0(); |
| 3487 DCHECK(receiver.is(a0)); // Used for parameter count. | 3490 DCHECK(receiver.is(a0)); // Used for parameter count. |
| 3488 DCHECK(function.is(a1)); // Required by InvokeFunction. | 3491 DCHECK(function.is(a1)); // Required by InvokeFunction. |
| 3489 DCHECK(ToRegister(instr->result()).is(v0)); | 3492 DCHECK(ToRegister(instr->result()).is(v0)); |
| 3490 | 3493 |
| 3491 // Copy the arguments to this function possibly from the | 3494 // Copy the arguments to this function possibly from the |
| 3492 // adaptor frame below it. | 3495 // adaptor frame below it. |
| 3493 const uint32_t kArgumentsLimit = 1 * KB; | 3496 const uint32_t kArgumentsLimit = 1 * KB; |
| 3494 DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit)); | 3497 DeoptimizeIf(hi, instr, "too many arguments", length, |
| 3498 Operand(kArgumentsLimit)); |
| 3495 | 3499 |
| 3496 // Push the receiver and use the register to keep the original | 3500 // Push the receiver and use the register to keep the original |
| 3497 // number of arguments. | 3501 // number of arguments. |
| 3498 __ push(receiver); | 3502 __ push(receiver); |
| 3499 __ Move(receiver, length); | 3503 __ Move(receiver, length); |
| 3500 // The arguments are at a one pointer size offset from elements. | 3504 // The arguments are at a one pointer size offset from elements. |
| 3501 __ Daddu(elements, elements, Operand(1 * kPointerSize)); | 3505 __ Daddu(elements, elements, Operand(1 * kPointerSize)); |
| 3502 | 3506 |
| 3503 // Loop through the arguments pushing them onto the execution | 3507 // Loop through the arguments pushing them onto the execution |
| 3504 // stack. | 3508 // stack. |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3614 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3618 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3615 DCHECK(instr->context() != NULL); | 3619 DCHECK(instr->context() != NULL); |
| 3616 DCHECK(ToRegister(instr->context()).is(cp)); | 3620 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3617 Register input = ToRegister(instr->value()); | 3621 Register input = ToRegister(instr->value()); |
| 3618 Register result = ToRegister(instr->result()); | 3622 Register result = ToRegister(instr->result()); |
| 3619 Register scratch = scratch0(); | 3623 Register scratch = scratch0(); |
| 3620 | 3624 |
| 3621 // Deoptimize if not a heap number. | 3625 // Deoptimize if not a heap number. |
| 3622 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3626 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3623 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3627 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3624 DeoptimizeIf(ne, instr, scratch, Operand(at)); | 3628 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 3625 | 3629 |
| 3626 Label done; | 3630 Label done; |
| 3627 Register exponent = scratch0(); | 3631 Register exponent = scratch0(); |
| 3628 scratch = no_reg; | 3632 scratch = no_reg; |
| 3629 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3633 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3630 // Check the sign of the argument. If the argument is positive, just | 3634 // Check the sign of the argument. If the argument is positive, just |
| 3631 // return it. | 3635 // return it. |
| 3632 __ Move(result, input); | 3636 __ Move(result, input); |
| 3633 __ And(at, exponent, Operand(HeapNumber::kSignMask)); | 3637 __ And(at, exponent, Operand(HeapNumber::kSignMask)); |
| 3634 __ Branch(&done, eq, at, Operand(zero_reg)); | 3638 __ Branch(&done, eq, at, Operand(zero_reg)); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3681 | 3685 |
| 3682 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3686 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3683 Register input = ToRegister(instr->value()); | 3687 Register input = ToRegister(instr->value()); |
| 3684 Register result = ToRegister(instr->result()); | 3688 Register result = ToRegister(instr->result()); |
| 3685 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 3689 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 3686 Label done; | 3690 Label done; |
| 3687 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 3691 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 3688 __ mov(result, input); | 3692 __ mov(result, input); |
| 3689 __ dsubu(result, zero_reg, input); | 3693 __ dsubu(result, zero_reg, input); |
| 3690 // Overflow if result is still negative, i.e. 0x80000000. | 3694 // Overflow if result is still negative, i.e. 0x80000000. |
| 3691 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); | 3695 DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg)); |
| 3692 __ bind(&done); | 3696 __ bind(&done); |
| 3693 } | 3697 } |
| 3694 | 3698 |
| 3695 | 3699 |
| 3696 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3700 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3697 // Class for deferred case. | 3701 // Class for deferred case. |
| 3698 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3702 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3699 public: | 3703 public: |
| 3700 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3704 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3701 : LDeferredCode(codegen), instr_(instr) { } | 3705 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3735 Register except_flag = ToRegister(instr->temp()); | 3739 Register except_flag = ToRegister(instr->temp()); |
| 3736 | 3740 |
| 3737 __ EmitFPUTruncate(kRoundToMinusInf, | 3741 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3738 result, | 3742 result, |
| 3739 input, | 3743 input, |
| 3740 scratch1, | 3744 scratch1, |
| 3741 double_scratch0(), | 3745 double_scratch0(), |
| 3742 except_flag); | 3746 except_flag); |
| 3743 | 3747 |
| 3744 // Deopt if the operation did not succeed. | 3748 // Deopt if the operation did not succeed. |
| 3745 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); | 3749 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3750 Operand(zero_reg)); |
| 3746 | 3751 |
| 3747 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3752 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3748 // Test for -0. | 3753 // Test for -0. |
| 3749 Label done; | 3754 Label done; |
| 3750 __ Branch(&done, ne, result, Operand(zero_reg)); | 3755 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3751 __ mfhc1(scratch1, input); // Get exponent/sign bits. | 3756 __ mfhc1(scratch1, input); // Get exponent/sign bits. |
| 3752 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 3757 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 3753 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); | 3758 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 3754 __ bind(&done); | 3759 __ bind(&done); |
| 3755 } | 3760 } |
| 3756 } | 3761 } |
| 3757 | 3762 |
| 3758 | 3763 |
| 3759 void LCodeGen::DoMathRound(LMathRound* instr) { | 3764 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3760 DoubleRegister input = ToDoubleRegister(instr->value()); | 3765 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3761 Register result = ToRegister(instr->result()); | 3766 Register result = ToRegister(instr->result()); |
| 3762 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3767 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3763 Register scratch = scratch0(); | 3768 Register scratch = scratch0(); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3776 __ mov(result, zero_reg); | 3781 __ mov(result, zero_reg); |
| 3777 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3782 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3778 __ Branch(&check_sign_on_zero); | 3783 __ Branch(&check_sign_on_zero); |
| 3779 } else { | 3784 } else { |
| 3780 __ Branch(&done); | 3785 __ Branch(&done); |
| 3781 } | 3786 } |
| 3782 __ bind(&skip1); | 3787 __ bind(&skip1); |
| 3783 | 3788 |
| 3784 // The following conversion will not work with numbers | 3789 // The following conversion will not work with numbers |
| 3785 // outside of ]-2^32, 2^32[. | 3790 // outside of ]-2^32, 2^32[. |
| 3786 DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32)); | 3791 DeoptimizeIf(ge, instr, "overflow", scratch, |
| 3792 Operand(HeapNumber::kExponentBias + 32)); |
| 3787 | 3793 |
| 3788 // Save the original sign for later comparison. | 3794 // Save the original sign for later comparison. |
| 3789 __ And(scratch, result, Operand(HeapNumber::kSignMask)); | 3795 __ And(scratch, result, Operand(HeapNumber::kSignMask)); |
| 3790 | 3796 |
| 3791 __ Move(double_scratch0(), 0.5); | 3797 __ Move(double_scratch0(), 0.5); |
| 3792 __ add_d(double_scratch0(), input, double_scratch0()); | 3798 __ add_d(double_scratch0(), input, double_scratch0()); |
| 3793 | 3799 |
| 3794 // Check sign of the result: if the sign changed, the input | 3800 // Check sign of the result: if the sign changed, the input |
| 3795 // value was in ]0.5, 0[ and the result should be -0. | 3801 // value was in ]0.5, 0[ and the result should be -0. |
| 3796 __ mfhc1(result, double_scratch0()); | 3802 __ mfhc1(result, double_scratch0()); |
| 3797 // mfhc1 sign-extends, clear the upper bits. | 3803 // mfhc1 sign-extends, clear the upper bits. |
| 3798 __ dsll32(result, result, 0); | 3804 __ dsll32(result, result, 0); |
| 3799 __ dsrl32(result, result, 0); | 3805 __ dsrl32(result, result, 0); |
| 3800 __ Xor(result, result, Operand(scratch)); | 3806 __ Xor(result, result, Operand(scratch)); |
| 3801 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3807 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3802 // ARM uses 'mi' here, which is 'lt' | 3808 // ARM uses 'mi' here, which is 'lt' |
| 3803 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); | 3809 DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg)); |
| 3804 } else { | 3810 } else { |
| 3805 Label skip2; | 3811 Label skip2; |
| 3806 // ARM uses 'mi' here, which is 'lt' | 3812 // ARM uses 'mi' here, which is 'lt' |
| 3807 // Negating it results in 'ge' | 3813 // Negating it results in 'ge' |
| 3808 __ Branch(&skip2, ge, result, Operand(zero_reg)); | 3814 __ Branch(&skip2, ge, result, Operand(zero_reg)); |
| 3809 __ mov(result, zero_reg); | 3815 __ mov(result, zero_reg); |
| 3810 __ Branch(&done); | 3816 __ Branch(&done); |
| 3811 __ bind(&skip2); | 3817 __ bind(&skip2); |
| 3812 } | 3818 } |
| 3813 | 3819 |
| 3814 Register except_flag = scratch; | 3820 Register except_flag = scratch; |
| 3815 __ EmitFPUTruncate(kRoundToMinusInf, | 3821 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3816 result, | 3822 result, |
| 3817 double_scratch0(), | 3823 double_scratch0(), |
| 3818 at, | 3824 at, |
| 3819 double_scratch1, | 3825 double_scratch1, |
| 3820 except_flag); | 3826 except_flag); |
| 3821 | 3827 |
| 3822 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); | 3828 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3829 Operand(zero_reg)); |
| 3823 | 3830 |
| 3824 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3831 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3825 // Test for -0. | 3832 // Test for -0. |
| 3826 __ Branch(&done, ne, result, Operand(zero_reg)); | 3833 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3827 __ bind(&check_sign_on_zero); | 3834 __ bind(&check_sign_on_zero); |
| 3828 __ mfhc1(scratch, input); // Get exponent/sign bits. | 3835 __ mfhc1(scratch, input); // Get exponent/sign bits. |
| 3829 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 3836 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
| 3830 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); | 3837 DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg)); |
| 3831 } | 3838 } |
| 3832 __ bind(&done); | 3839 __ bind(&done); |
| 3833 } | 3840 } |
| 3834 | 3841 |
| 3835 | 3842 |
| 3836 void LCodeGen::DoMathFround(LMathFround* instr) { | 3843 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3837 DoubleRegister input = ToDoubleRegister(instr->value()); | 3844 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3838 DoubleRegister result = ToDoubleRegister(instr->result()); | 3845 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3839 __ cvt_s_d(result, input); | 3846 __ cvt_s_d(result, input); |
| 3840 __ cvt_d_s(result, result); | 3847 __ cvt_d_s(result, result); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3886 | 3893 |
| 3887 if (exponent_type.IsSmi()) { | 3894 if (exponent_type.IsSmi()) { |
| 3888 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3895 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3889 __ CallStub(&stub); | 3896 __ CallStub(&stub); |
| 3890 } else if (exponent_type.IsTagged()) { | 3897 } else if (exponent_type.IsTagged()) { |
| 3891 Label no_deopt; | 3898 Label no_deopt; |
| 3892 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3899 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3893 DCHECK(!a7.is(tagged_exponent)); | 3900 DCHECK(!a7.is(tagged_exponent)); |
| 3894 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3901 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3895 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3902 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3896 DeoptimizeIf(ne, instr, a7, Operand(at)); | 3903 DeoptimizeIf(ne, instr, "not a heap number", a7, Operand(at)); |
| 3897 __ bind(&no_deopt); | 3904 __ bind(&no_deopt); |
| 3898 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3905 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3899 __ CallStub(&stub); | 3906 __ CallStub(&stub); |
| 3900 } else if (exponent_type.IsInteger32()) { | 3907 } else if (exponent_type.IsInteger32()) { |
| 3901 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3908 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3902 __ CallStub(&stub); | 3909 __ CallStub(&stub); |
| 3903 } else { | 3910 } else { |
| 3904 DCHECK(exponent_type.IsDouble()); | 3911 DCHECK(exponent_type.IsDouble()); |
| 3905 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3912 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3906 __ CallStub(&stub); | 3913 __ CallStub(&stub); |
| (...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4228 } else { | 4235 } else { |
| 4229 reg = ToRegister(instr->index()); | 4236 reg = ToRegister(instr->index()); |
| 4230 operand = ToOperand(instr->length()); | 4237 operand = ToOperand(instr->length()); |
| 4231 } | 4238 } |
| 4232 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4239 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4233 Label done; | 4240 Label done; |
| 4234 __ Branch(&done, NegateCondition(cc), reg, operand); | 4241 __ Branch(&done, NegateCondition(cc), reg, operand); |
| 4235 __ stop("eliminated bounds check failed"); | 4242 __ stop("eliminated bounds check failed"); |
| 4236 __ bind(&done); | 4243 __ bind(&done); |
| 4237 } else { | 4244 } else { |
| 4238 DeoptimizeIf(cc, instr, reg, operand); | 4245 DeoptimizeIf(cc, instr, "out of bounds", reg, operand); |
| 4239 } | 4246 } |
| 4240 } | 4247 } |
| 4241 | 4248 |
| 4242 | 4249 |
| 4243 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4250 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4244 Register external_pointer = ToRegister(instr->elements()); | 4251 Register external_pointer = ToRegister(instr->elements()); |
| 4245 Register key = no_reg; | 4252 Register key = no_reg; |
| 4246 ElementsKind elements_kind = instr->elements_kind(); | 4253 ElementsKind elements_kind = instr->elements_kind(); |
| 4247 bool key_is_constant = instr->key()->IsConstantOperand(); | 4254 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4248 int constant_key = 0; | 4255 int constant_key = 0; |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4519 __ bind(¬_applicable); | 4526 __ bind(¬_applicable); |
| 4520 } | 4527 } |
| 4521 | 4528 |
| 4522 | 4529 |
| 4523 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4530 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4524 Register object = ToRegister(instr->object()); | 4531 Register object = ToRegister(instr->object()); |
| 4525 Register temp = ToRegister(instr->temp()); | 4532 Register temp = ToRegister(instr->temp()); |
| 4526 Label no_memento_found; | 4533 Label no_memento_found; |
| 4527 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, | 4534 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, |
| 4528 ne, &no_memento_found); | 4535 ne, &no_memento_found); |
| 4529 DeoptimizeIf(al, instr); | 4536 DeoptimizeIf(al, instr, "memento found"); |
| 4530 __ bind(&no_memento_found); | 4537 __ bind(&no_memento_found); |
| 4531 } | 4538 } |
| 4532 | 4539 |
| 4533 | 4540 |
| 4534 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4541 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4535 DCHECK(ToRegister(instr->context()).is(cp)); | 4542 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4536 DCHECK(ToRegister(instr->left()).is(a1)); | 4543 DCHECK(ToRegister(instr->left()).is(a1)); |
| 4537 DCHECK(ToRegister(instr->right()).is(a0)); | 4544 DCHECK(ToRegister(instr->right()).is(a0)); |
| 4538 StringAddStub stub(isolate(), | 4545 StringAddStub stub(isolate(), |
| 4539 instr->hydrogen()->flags(), | 4546 instr->hydrogen()->flags(), |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4822 } | 4829 } |
| 4823 | 4830 |
| 4824 | 4831 |
| 4825 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4832 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4826 HChange* hchange = instr->hydrogen(); | 4833 HChange* hchange = instr->hydrogen(); |
| 4827 Register input = ToRegister(instr->value()); | 4834 Register input = ToRegister(instr->value()); |
| 4828 Register output = ToRegister(instr->result()); | 4835 Register output = ToRegister(instr->result()); |
| 4829 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4836 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4830 hchange->value()->CheckFlag(HValue::kUint32)) { | 4837 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4831 __ And(at, input, Operand(0x80000000)); | 4838 __ And(at, input, Operand(0x80000000)); |
| 4832 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); | 4839 DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg)); |
| 4833 } | 4840 } |
| 4834 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4841 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4835 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4842 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4836 __ SmiTagCheckOverflow(output, input, at); | 4843 __ SmiTagCheckOverflow(output, input, at); |
| 4837 DeoptimizeIf(lt, instr, at, Operand(zero_reg)); | 4844 DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg)); |
| 4838 } else { | 4845 } else { |
| 4839 __ SmiTag(output, input); | 4846 __ SmiTag(output, input); |
| 4840 } | 4847 } |
| 4841 } | 4848 } |
| 4842 | 4849 |
| 4843 | 4850 |
| 4844 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4851 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4845 Register scratch = scratch0(); | 4852 Register scratch = scratch0(); |
| 4846 Register input = ToRegister(instr->value()); | 4853 Register input = ToRegister(instr->value()); |
| 4847 Register result = ToRegister(instr->result()); | 4854 Register result = ToRegister(instr->result()); |
| 4848 if (instr->needs_check()) { | 4855 if (instr->needs_check()) { |
| 4849 STATIC_ASSERT(kHeapObjectTag == 1); | 4856 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4850 // If the input is a HeapObject, value of scratch won't be zero. | 4857 // If the input is a HeapObject, value of scratch won't be zero. |
| 4851 __ And(scratch, input, Operand(kHeapObjectTag)); | 4858 __ And(scratch, input, Operand(kHeapObjectTag)); |
| 4852 __ SmiUntag(result, input); | 4859 __ SmiUntag(result, input); |
| 4853 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); | 4860 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 4854 } else { | 4861 } else { |
| 4855 __ SmiUntag(result, input); | 4862 __ SmiUntag(result, input); |
| 4856 } | 4863 } |
| 4857 } | 4864 } |
| 4858 | 4865 |
| 4859 | 4866 |
| 4860 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4867 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4861 DoubleRegister result_reg, | 4868 DoubleRegister result_reg, |
| 4862 NumberUntagDMode mode) { | 4869 NumberUntagDMode mode) { |
| 4863 bool can_convert_undefined_to_nan = | 4870 bool can_convert_undefined_to_nan = |
| 4864 instr->hydrogen()->can_convert_undefined_to_nan(); | 4871 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4865 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4872 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4866 | 4873 |
| 4867 Register scratch = scratch0(); | 4874 Register scratch = scratch0(); |
| 4868 Label convert, load_smi, done; | 4875 Label convert, load_smi, done; |
| 4869 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4876 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4870 // Smi check. | 4877 // Smi check. |
| 4871 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4878 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4872 // Heap number map check. | 4879 // Heap number map check. |
| 4873 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4880 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4874 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4881 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4875 if (can_convert_undefined_to_nan) { | 4882 if (can_convert_undefined_to_nan) { |
| 4876 __ Branch(&convert, ne, scratch, Operand(at)); | 4883 __ Branch(&convert, ne, scratch, Operand(at)); |
| 4877 } else { | 4884 } else { |
| 4878 DeoptimizeIf(ne, instr, scratch, Operand(at)); | 4885 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 4879 } | 4886 } |
| 4880 // Load heap number. | 4887 // Load heap number. |
| 4881 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4888 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4882 if (deoptimize_on_minus_zero) { | 4889 if (deoptimize_on_minus_zero) { |
| 4883 __ mfc1(at, result_reg); | 4890 __ mfc1(at, result_reg); |
| 4884 __ Branch(&done, ne, at, Operand(zero_reg)); | 4891 __ Branch(&done, ne, at, Operand(zero_reg)); |
| 4885 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. | 4892 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. |
| 4886 DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask)); | 4893 DeoptimizeIf(eq, instr, "minus zero", scratch, |
| 4894 Operand(HeapNumber::kSignMask)); |
| 4887 } | 4895 } |
| 4888 __ Branch(&done); | 4896 __ Branch(&done); |
| 4889 if (can_convert_undefined_to_nan) { | 4897 if (can_convert_undefined_to_nan) { |
| 4890 __ bind(&convert); | 4898 __ bind(&convert); |
| 4891 // Convert undefined (and hole) to NaN. | 4899 // Convert undefined (and hole) to NaN. |
| 4892 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4900 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4893 DeoptimizeIf(ne, instr, input_reg, Operand(at)); | 4901 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 4902 Operand(at)); |
| 4894 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4903 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4895 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4904 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4896 __ Branch(&done); | 4905 __ Branch(&done); |
| 4897 } | 4906 } |
| 4898 } else { | 4907 } else { |
| 4899 __ SmiUntag(scratch, input_reg); | 4908 __ SmiUntag(scratch, input_reg); |
| 4900 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4909 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4901 } | 4910 } |
| 4902 // Smi to double register conversion | 4911 // Smi to double register conversion |
| 4903 __ bind(&load_smi); | 4912 __ bind(&load_smi); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4947 __ mov(input_reg, zero_reg); // In delay slot. | 4956 __ mov(input_reg, zero_reg); // In delay slot. |
| 4948 | 4957 |
| 4949 __ bind(&check_bools); | 4958 __ bind(&check_bools); |
| 4950 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 4959 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 4951 __ Branch(&check_false, ne, scratch2, Operand(at)); | 4960 __ Branch(&check_false, ne, scratch2, Operand(at)); |
| 4952 __ Branch(USE_DELAY_SLOT, &done); | 4961 __ Branch(USE_DELAY_SLOT, &done); |
| 4953 __ li(input_reg, Operand(1)); // In delay slot. | 4962 __ li(input_reg, Operand(1)); // In delay slot. |
| 4954 | 4963 |
| 4955 __ bind(&check_false); | 4964 __ bind(&check_false); |
| 4956 __ LoadRoot(at, Heap::kFalseValueRootIndex); | 4965 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 4957 DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate"); | 4966 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2, |
| 4967 Operand(at)); |
| 4958 __ Branch(USE_DELAY_SLOT, &done); | 4968 __ Branch(USE_DELAY_SLOT, &done); |
| 4959 __ mov(input_reg, zero_reg); // In delay slot. | 4969 __ mov(input_reg, zero_reg); // In delay slot. |
| 4960 } else { | 4970 } else { |
| 4961 DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number"); | 4971 DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at)); |
| 4962 | 4972 |
| 4963 // Load the double value. | 4973 // Load the double value. |
| 4964 __ ldc1(double_scratch, | 4974 __ ldc1(double_scratch, |
| 4965 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4975 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4966 | 4976 |
| 4967 Register except_flag = scratch2; | 4977 Register except_flag = scratch2; |
| 4968 __ EmitFPUTruncate(kRoundToZero, | 4978 __ EmitFPUTruncate(kRoundToZero, |
| 4969 input_reg, | 4979 input_reg, |
| 4970 double_scratch, | 4980 double_scratch, |
| 4971 scratch1, | 4981 scratch1, |
| 4972 double_scratch2, | 4982 double_scratch2, |
| 4973 except_flag, | 4983 except_flag, |
| 4974 kCheckForInexactConversion); | 4984 kCheckForInexactConversion); |
| 4975 | 4985 |
| 4976 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg), | 4986 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 4977 "lost precision or NaN"); | 4987 Operand(zero_reg)); |
| 4978 | 4988 |
| 4979 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4989 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4980 __ Branch(&done, ne, input_reg, Operand(zero_reg)); | 4990 __ Branch(&done, ne, input_reg, Operand(zero_reg)); |
| 4981 | 4991 |
| 4982 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. | 4992 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. |
| 4983 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 4993 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 4984 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero"); | 4994 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 4985 } | 4995 } |
| 4986 } | 4996 } |
| 4987 __ bind(&done); | 4997 __ bind(&done); |
| 4988 } | 4998 } |
| 4989 | 4999 |
| 4990 | 5000 |
| 4991 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5001 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4992 class DeferredTaggedToI FINAL : public LDeferredCode { | 5002 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 4993 public: | 5003 public: |
| 4994 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5004 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5051 | 5061 |
| 5052 __ EmitFPUTruncate(kRoundToMinusInf, | 5062 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5053 result_reg, | 5063 result_reg, |
| 5054 double_input, | 5064 double_input, |
| 5055 scratch1, | 5065 scratch1, |
| 5056 double_scratch0(), | 5066 double_scratch0(), |
| 5057 except_flag, | 5067 except_flag, |
| 5058 kCheckForInexactConversion); | 5068 kCheckForInexactConversion); |
| 5059 | 5069 |
| 5060 // Deopt if the operation did not succeed (except_flag != 0). | 5070 // Deopt if the operation did not succeed (except_flag != 0). |
| 5061 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); | 5071 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5072 Operand(zero_reg)); |
| 5062 | 5073 |
| 5063 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5074 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5064 Label done; | 5075 Label done; |
| 5065 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5076 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5066 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 5077 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 5067 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5078 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5068 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); | 5079 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5069 __ bind(&done); | 5080 __ bind(&done); |
| 5070 } | 5081 } |
| 5071 } | 5082 } |
| 5072 } | 5083 } |
| 5073 | 5084 |
| 5074 | 5085 |
| 5075 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5086 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5076 Register result_reg = ToRegister(instr->result()); | 5087 Register result_reg = ToRegister(instr->result()); |
| 5077 Register scratch1 = LCodeGen::scratch0(); | 5088 Register scratch1 = LCodeGen::scratch0(); |
| 5078 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5089 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 5079 | 5090 |
| 5080 if (instr->truncating()) { | 5091 if (instr->truncating()) { |
| 5081 __ TruncateDoubleToI(result_reg, double_input); | 5092 __ TruncateDoubleToI(result_reg, double_input); |
| 5082 } else { | 5093 } else { |
| 5083 Register except_flag = LCodeGen::scratch1(); | 5094 Register except_flag = LCodeGen::scratch1(); |
| 5084 | 5095 |
| 5085 __ EmitFPUTruncate(kRoundToMinusInf, | 5096 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5086 result_reg, | 5097 result_reg, |
| 5087 double_input, | 5098 double_input, |
| 5088 scratch1, | 5099 scratch1, |
| 5089 double_scratch0(), | 5100 double_scratch0(), |
| 5090 except_flag, | 5101 except_flag, |
| 5091 kCheckForInexactConversion); | 5102 kCheckForInexactConversion); |
| 5092 | 5103 |
| 5093 // Deopt if the operation did not succeed (except_flag != 0). | 5104 // Deopt if the operation did not succeed (except_flag != 0). |
| 5094 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); | 5105 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5106 Operand(zero_reg)); |
| 5095 | 5107 |
| 5096 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5108 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5097 Label done; | 5109 Label done; |
| 5098 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5110 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5099 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 5111 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 5100 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5112 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5101 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); | 5113 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5102 __ bind(&done); | 5114 __ bind(&done); |
| 5103 } | 5115 } |
| 5104 } | 5116 } |
| 5105 __ SmiTag(result_reg, result_reg); | 5117 __ SmiTag(result_reg, result_reg); |
| 5106 } | 5118 } |
| 5107 | 5119 |
| 5108 | 5120 |
| 5109 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5121 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5110 LOperand* input = instr->value(); | 5122 LOperand* input = instr->value(); |
| 5111 __ SmiTst(ToRegister(input), at); | 5123 __ SmiTst(ToRegister(input), at); |
| 5112 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); | 5124 DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg)); |
| 5113 } | 5125 } |
| 5114 | 5126 |
| 5115 | 5127 |
| 5116 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5128 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5117 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5129 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5118 LOperand* input = instr->value(); | 5130 LOperand* input = instr->value(); |
| 5119 __ SmiTst(ToRegister(input), at); | 5131 __ SmiTst(ToRegister(input), at); |
| 5120 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); | 5132 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5121 } | 5133 } |
| 5122 } | 5134 } |
| 5123 | 5135 |
| 5124 | 5136 |
| 5125 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5137 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5126 Register input = ToRegister(instr->value()); | 5138 Register input = ToRegister(instr->value()); |
| 5127 Register scratch = scratch0(); | 5139 Register scratch = scratch0(); |
| 5128 | 5140 |
| 5129 __ GetObjectType(input, scratch, scratch); | 5141 __ GetObjectType(input, scratch, scratch); |
| 5130 | 5142 |
| 5131 if (instr->hydrogen()->is_interval_check()) { | 5143 if (instr->hydrogen()->is_interval_check()) { |
| 5132 InstanceType first; | 5144 InstanceType first; |
| 5133 InstanceType last; | 5145 InstanceType last; |
| 5134 instr->hydrogen()->GetCheckInterval(&first, &last); | 5146 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5135 | 5147 |
| 5136 // If there is only one type in the interval check for equality. | 5148 // If there is only one type in the interval check for equality. |
| 5137 if (first == last) { | 5149 if (first == last) { |
| 5138 DeoptimizeIf(ne, instr, scratch, Operand(first)); | 5150 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first)); |
| 5139 } else { | 5151 } else { |
| 5140 DeoptimizeIf(lo, instr, scratch, Operand(first)); | 5152 DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first)); |
| 5141 // Omit check for the last type. | 5153 // Omit check for the last type. |
| 5142 if (last != LAST_TYPE) { | 5154 if (last != LAST_TYPE) { |
| 5143 DeoptimizeIf(hi, instr, scratch, Operand(last)); | 5155 DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last)); |
| 5144 } | 5156 } |
| 5145 } | 5157 } |
| 5146 } else { | 5158 } else { |
| 5147 uint8_t mask; | 5159 uint8_t mask; |
| 5148 uint8_t tag; | 5160 uint8_t tag; |
| 5149 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5161 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5150 | 5162 |
| 5151 if (base::bits::IsPowerOfTwo32(mask)) { | 5163 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5152 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5164 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5153 __ And(at, scratch, mask); | 5165 __ And(at, scratch, mask); |
| 5154 DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg)); | 5166 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at, |
| 5167 Operand(zero_reg)); |
| 5155 } else { | 5168 } else { |
| 5156 __ And(scratch, scratch, Operand(mask)); | 5169 __ And(scratch, scratch, Operand(mask)); |
| 5157 DeoptimizeIf(ne, instr, scratch, Operand(tag)); | 5170 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag)); |
| 5158 } | 5171 } |
| 5159 } | 5172 } |
| 5160 } | 5173 } |
| 5161 | 5174 |
| 5162 | 5175 |
| 5163 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5176 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5164 Register reg = ToRegister(instr->value()); | 5177 Register reg = ToRegister(instr->value()); |
| 5165 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5178 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5166 AllowDeferredHandleDereference smi_check; | 5179 AllowDeferredHandleDereference smi_check; |
| 5167 if (isolate()->heap()->InNewSpace(*object)) { | 5180 if (isolate()->heap()->InNewSpace(*object)) { |
| 5168 Register reg = ToRegister(instr->value()); | 5181 Register reg = ToRegister(instr->value()); |
| 5169 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5182 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5170 __ li(at, Operand(Handle<Object>(cell))); | 5183 __ li(at, Operand(Handle<Object>(cell))); |
| 5171 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); | 5184 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); |
| 5172 DeoptimizeIf(ne, instr, reg, Operand(at)); | 5185 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at)); |
| 5173 } else { | 5186 } else { |
| 5174 DeoptimizeIf(ne, instr, reg, Operand(object)); | 5187 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object)); |
| 5175 } | 5188 } |
| 5176 } | 5189 } |
| 5177 | 5190 |
| 5178 | 5191 |
| 5179 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5192 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5180 { | 5193 { |
| 5181 PushSafepointRegistersScope scope(this); | 5194 PushSafepointRegistersScope scope(this); |
| 5182 __ push(object); | 5195 __ push(object); |
| 5183 __ mov(cp, zero_reg); | 5196 __ mov(cp, zero_reg); |
| 5184 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5197 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5185 RecordSafepointWithRegisters( | 5198 RecordSafepointWithRegisters( |
| 5186 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5199 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5187 __ StoreToSafepointRegisterSlot(v0, scratch0()); | 5200 __ StoreToSafepointRegisterSlot(v0, scratch0()); |
| 5188 } | 5201 } |
| 5189 __ SmiTst(scratch0(), at); | 5202 __ SmiTst(scratch0(), at); |
| 5190 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); | 5203 DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg)); |
| 5191 } | 5204 } |
| 5192 | 5205 |
| 5193 | 5206 |
| 5194 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5207 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5195 class DeferredCheckMaps FINAL : public LDeferredCode { | 5208 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5196 public: | 5209 public: |
| 5197 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5210 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5198 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5211 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5199 SetExit(check_maps()); | 5212 SetExit(check_maps()); |
| 5200 } | 5213 } |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5233 Label success; | 5246 Label success; |
| 5234 for (int i = 0; i < maps->size() - 1; i++) { | 5247 for (int i = 0; i < maps->size() - 1; i++) { |
| 5235 Handle<Map> map = maps->at(i).handle(); | 5248 Handle<Map> map = maps->at(i).handle(); |
| 5236 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); | 5249 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); |
| 5237 } | 5250 } |
| 5238 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5251 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5239 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). | 5252 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). |
| 5240 if (instr->hydrogen()->HasMigrationTarget()) { | 5253 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5241 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); | 5254 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); |
| 5242 } else { | 5255 } else { |
| 5243 DeoptimizeIf(ne, instr, map_reg, Operand(map)); | 5256 DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map)); |
| 5244 } | 5257 } |
| 5245 | 5258 |
| 5246 __ bind(&success); | 5259 __ bind(&success); |
| 5247 } | 5260 } |
| 5248 | 5261 |
| 5249 | 5262 |
| 5250 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5263 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5251 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5264 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5252 Register result_reg = ToRegister(instr->result()); | 5265 Register result_reg = ToRegister(instr->result()); |
| 5253 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5266 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5271 | 5284 |
| 5272 // Both smi and heap number cases are handled. | 5285 // Both smi and heap number cases are handled. |
| 5273 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | 5286 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); |
| 5274 | 5287 |
| 5275 // Check for heap number | 5288 // Check for heap number |
| 5276 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5289 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5277 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | 5290 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); |
| 5278 | 5291 |
| 5279 // Check for undefined. Undefined is converted to zero for clamping | 5292 // Check for undefined. Undefined is converted to zero for clamping |
| 5280 // conversions. | 5293 // conversions. |
| 5281 DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value())); | 5294 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 5295 Operand(factory()->undefined_value())); |
| 5282 __ mov(result_reg, zero_reg); | 5296 __ mov(result_reg, zero_reg); |
| 5283 __ jmp(&done); | 5297 __ jmp(&done); |
| 5284 | 5298 |
| 5285 // Heap number | 5299 // Heap number |
| 5286 __ bind(&heap_number); | 5300 __ bind(&heap_number); |
| 5287 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | 5301 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, |
| 5288 HeapNumber::kValueOffset)); | 5302 HeapNumber::kValueOffset)); |
| 5289 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 5303 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
| 5290 __ jmp(&done); | 5304 __ jmp(&done); |
| 5291 | 5305 |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5696 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 5710 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 5697 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 5711 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 5698 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 5712 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 5699 // needed return address), even though the implementation of LAZY and EAGER is | 5713 // needed return address), even though the implementation of LAZY and EAGER is |
| 5700 // now identical. When LAZY is eventually completely folded into EAGER, remove | 5714 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 5701 // the special case below. | 5715 // the special case below. |
| 5702 if (info()->IsStub() && type == Deoptimizer::EAGER) { | 5716 if (info()->IsStub() && type == Deoptimizer::EAGER) { |
| 5703 type = Deoptimizer::LAZY; | 5717 type = Deoptimizer::LAZY; |
| 5704 } | 5718 } |
| 5705 | 5719 |
| 5706 DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg), | 5720 DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg, |
| 5707 instr->hydrogen()->reason()); | 5721 Operand(zero_reg)); |
| 5708 } | 5722 } |
| 5709 | 5723 |
| 5710 | 5724 |
| 5711 void LCodeGen::DoDummy(LDummy* instr) { | 5725 void LCodeGen::DoDummy(LDummy* instr) { |
| 5712 // Nothing to see here, move on! | 5726 // Nothing to see here, move on! |
| 5713 } | 5727 } |
| 5714 | 5728 |
| 5715 | 5729 |
| 5716 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 5730 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| 5717 // Nothing to see here, move on! | 5731 // Nothing to see here, move on! |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5788 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5802 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5789 | 5803 |
| 5790 GenerateOsrPrologue(); | 5804 GenerateOsrPrologue(); |
| 5791 } | 5805 } |
| 5792 | 5806 |
| 5793 | 5807 |
| 5794 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5808 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5795 Register result = ToRegister(instr->result()); | 5809 Register result = ToRegister(instr->result()); |
| 5796 Register object = ToRegister(instr->object()); | 5810 Register object = ToRegister(instr->object()); |
| 5797 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 5811 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 5798 DeoptimizeIf(eq, instr, object, Operand(at)); | 5812 DeoptimizeIf(eq, instr, "undefined", object, Operand(at)); |
| 5799 | 5813 |
| 5800 Register null_value = a5; | 5814 Register null_value = a5; |
| 5801 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5815 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5802 DeoptimizeIf(eq, instr, object, Operand(null_value)); | 5816 DeoptimizeIf(eq, instr, "null", object, Operand(null_value)); |
| 5803 | 5817 |
| 5804 __ And(at, object, kSmiTagMask); | 5818 __ And(at, object, kSmiTagMask); |
| 5805 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); | 5819 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5806 | 5820 |
| 5807 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5821 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5808 __ GetObjectType(object, a1, a1); | 5822 __ GetObjectType(object, a1, a1); |
| 5809 DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE)); | 5823 DeoptimizeIf(le, instr, "not a JavaScript object", a1, |
| 5824 Operand(LAST_JS_PROXY_TYPE)); |
| 5810 | 5825 |
| 5811 Label use_cache, call_runtime; | 5826 Label use_cache, call_runtime; |
| 5812 DCHECK(object.is(a0)); | 5827 DCHECK(object.is(a0)); |
| 5813 __ CheckEnumCache(null_value, &call_runtime); | 5828 __ CheckEnumCache(null_value, &call_runtime); |
| 5814 | 5829 |
| 5815 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 5830 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5816 __ Branch(&use_cache); | 5831 __ Branch(&use_cache); |
| 5817 | 5832 |
| 5818 // Get the set of properties to enumerate. | 5833 // Get the set of properties to enumerate. |
| 5819 __ bind(&call_runtime); | 5834 __ bind(&call_runtime); |
| 5820 __ push(object); | 5835 __ push(object); |
| 5821 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5836 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5822 | 5837 |
| 5823 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | 5838 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 5824 DCHECK(result.is(v0)); | 5839 DCHECK(result.is(v0)); |
| 5825 __ LoadRoot(at, Heap::kMetaMapRootIndex); | 5840 __ LoadRoot(at, Heap::kMetaMapRootIndex); |
| 5826 DeoptimizeIf(ne, instr, a1, Operand(at)); | 5841 DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at)); |
| 5827 __ bind(&use_cache); | 5842 __ bind(&use_cache); |
| 5828 } | 5843 } |
| 5829 | 5844 |
| 5830 | 5845 |
| 5831 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5846 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5832 Register map = ToRegister(instr->map()); | 5847 Register map = ToRegister(instr->map()); |
| 5833 Register result = ToRegister(instr->result()); | 5848 Register result = ToRegister(instr->result()); |
| 5834 Label load_cache, done; | 5849 Label load_cache, done; |
| 5835 __ EnumLength(result, map); | 5850 __ EnumLength(result, map); |
| 5836 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); | 5851 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); |
| 5837 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); | 5852 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5838 __ jmp(&done); | 5853 __ jmp(&done); |
| 5839 | 5854 |
| 5840 __ bind(&load_cache); | 5855 __ bind(&load_cache); |
| 5841 __ LoadInstanceDescriptors(map, result); | 5856 __ LoadInstanceDescriptors(map, result); |
| 5842 __ ld(result, | 5857 __ ld(result, |
| 5843 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5858 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5844 __ ld(result, | 5859 __ ld(result, |
| 5845 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5860 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5846 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); | 5861 DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg)); |
| 5847 | 5862 |
| 5848 __ bind(&done); | 5863 __ bind(&done); |
| 5849 } | 5864 } |
| 5850 | 5865 |
| 5851 | 5866 |
| 5852 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5867 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5853 Register object = ToRegister(instr->value()); | 5868 Register object = ToRegister(instr->value()); |
| 5854 Register map = ToRegister(instr->map()); | 5869 Register map = ToRegister(instr->map()); |
| 5855 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5870 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5856 DeoptimizeIf(ne, instr, map, Operand(scratch0())); | 5871 DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0())); |
| 5857 } | 5872 } |
| 5858 | 5873 |
| 5859 | 5874 |
| 5860 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5875 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5861 Register result, | 5876 Register result, |
| 5862 Register object, | 5877 Register object, |
| 5863 Register index) { | 5878 Register index) { |
| 5864 PushSafepointRegistersScope scope(this); | 5879 PushSafepointRegistersScope scope(this); |
| 5865 __ Push(object, index); | 5880 __ Push(object, index); |
| 5866 __ mov(cp, zero_reg); | 5881 __ mov(cp, zero_reg); |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5940 __ li(at, scope_info); | 5955 __ li(at, scope_info); |
| 5941 __ Push(at, ToRegister(instr->function())); | 5956 __ Push(at, ToRegister(instr->function())); |
| 5942 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5957 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5943 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5958 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5944 } | 5959 } |
| 5945 | 5960 |
| 5946 | 5961 |
| 5947 #undef __ | 5962 #undef __ |
| 5948 | 5963 |
| 5949 } } // namespace v8::internal | 5964 } } // namespace v8::internal |
| OLD | NEW |