| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved.7 | 1 // Copyright 2012 the V8 project authors. All rights reserved.7 |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 796 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 807 int pc_offset = masm()->pc_offset(); | 807 int pc_offset = masm()->pc_offset(); |
| 808 environment->Register(deoptimization_index, | 808 environment->Register(deoptimization_index, |
| 809 translation.index(), | 809 translation.index(), |
| 810 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 810 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 811 deoptimizations_.Add(environment, zone()); | 811 deoptimizations_.Add(environment, zone()); |
| 812 } | 812 } |
| 813 } | 813 } |
| 814 | 814 |
| 815 | 815 |
| 816 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 816 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 817 Deoptimizer::DeoptReason deopt_reason, | |
| 818 Deoptimizer::BailoutType bailout_type, | 817 Deoptimizer::BailoutType bailout_type, |
| 819 Register src1, const Operand& src2) { | 818 const char* detail, Register src1, |
| 819 const Operand& src2) { |
| 820 LEnvironment* environment = instr->environment(); | 820 LEnvironment* environment = instr->environment(); |
| 821 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 821 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 822 DCHECK(environment->HasBeenRegistered()); | 822 DCHECK(environment->HasBeenRegistered()); |
| 823 int id = environment->deoptimization_index(); | 823 int id = environment->deoptimization_index(); |
| 824 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 824 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 825 Address entry = | 825 Address entry = |
| 826 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 826 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 827 if (entry == NULL) { | 827 if (entry == NULL) { |
| 828 Abort(kBailoutWasNotPrepared); | 828 Abort(kBailoutWasNotPrepared); |
| 829 return; | 829 return; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 851 if (info()->ShouldTrapOnDeopt()) { | 851 if (info()->ShouldTrapOnDeopt()) { |
| 852 Label skip; | 852 Label skip; |
| 853 if (condition != al) { | 853 if (condition != al) { |
| 854 __ Branch(&skip, NegateCondition(condition), src1, src2); | 854 __ Branch(&skip, NegateCondition(condition), src1, src2); |
| 855 } | 855 } |
| 856 __ stop("trap_on_deopt"); | 856 __ stop("trap_on_deopt"); |
| 857 __ bind(&skip); | 857 __ bind(&skip); |
| 858 } | 858 } |
| 859 | 859 |
| 860 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 860 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
| 861 instr->Mnemonic(), deopt_reason); | 861 instr->Mnemonic(), detail); |
| 862 DCHECK(info()->IsStub() || frame_is_built_); | 862 DCHECK(info()->IsStub() || frame_is_built_); |
| 863 // Go through jump table if we need to handle condition, build frame, or | 863 // Go through jump table if we need to handle condition, build frame, or |
| 864 // restore caller doubles. | 864 // restore caller doubles. |
| 865 if (condition == al && frame_is_built_ && | 865 if (condition == al && frame_is_built_ && |
| 866 !info()->saves_caller_doubles()) { | 866 !info()->saves_caller_doubles()) { |
| 867 DeoptComment(reason); | 867 DeoptComment(reason); |
| 868 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); | 868 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); |
| 869 } else { | 869 } else { |
| 870 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 870 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
| 871 !frame_is_built_); | 871 !frame_is_built_); |
| 872 // We often have several deopts to the same entry, reuse the last | 872 // We often have several deopts to the same entry, reuse the last |
| 873 // jump entry if this is the case. | 873 // jump entry if this is the case. |
| 874 if (jump_table_.is_empty() || | 874 if (jump_table_.is_empty() || |
| 875 !table_entry.IsEquivalentTo(jump_table_.last())) { | 875 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 876 jump_table_.Add(table_entry, zone()); | 876 jump_table_.Add(table_entry, zone()); |
| 877 } | 877 } |
| 878 __ Branch(&jump_table_.last().label, condition, src1, src2); | 878 __ Branch(&jump_table_.last().label, condition, src1, src2); |
| 879 } | 879 } |
| 880 } | 880 } |
| 881 | 881 |
| 882 | 882 |
| 883 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 883 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 884 Deoptimizer::DeoptReason deopt_reason, | 884 const char* detail, Register src1, |
| 885 Register src1, const Operand& src2) { | 885 const Operand& src2) { |
| 886 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 886 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 887 ? Deoptimizer::LAZY | 887 ? Deoptimizer::LAZY |
| 888 : Deoptimizer::EAGER; | 888 : Deoptimizer::EAGER; |
| 889 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); | 889 DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2); |
| 890 } | 890 } |
| 891 | 891 |
| 892 | 892 |
| 893 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 893 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 894 int length = deoptimizations_.length(); | 894 int length = deoptimizations_.length(); |
| 895 if (length == 0) return; | 895 if (length == 0) return; |
| 896 Handle<DeoptimizationInputData> data = | 896 Handle<DeoptimizationInputData> data = |
| 897 DeoptimizationInputData::New(isolate(), length, TENURED); | 897 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 898 | 898 |
| 899 Handle<ByteArray> translations = | 899 Handle<ByteArray> translations = |
| (...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1110 HMod* hmod = instr->hydrogen(); | 1110 HMod* hmod = instr->hydrogen(); |
| 1111 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1111 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1112 Label dividend_is_not_negative, done; | 1112 Label dividend_is_not_negative, done; |
| 1113 | 1113 |
| 1114 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1114 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1115 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); | 1115 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); |
| 1116 // Note: The code below even works when right contains kMinInt. | 1116 // Note: The code below even works when right contains kMinInt. |
| 1117 __ subu(dividend, zero_reg, dividend); | 1117 __ subu(dividend, zero_reg, dividend); |
| 1118 __ And(dividend, dividend, Operand(mask)); | 1118 __ And(dividend, dividend, Operand(mask)); |
| 1119 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1119 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1120 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1120 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1121 Operand(zero_reg)); | |
| 1122 } | 1121 } |
| 1123 __ Branch(USE_DELAY_SLOT, &done); | 1122 __ Branch(USE_DELAY_SLOT, &done); |
| 1124 __ subu(dividend, zero_reg, dividend); | 1123 __ subu(dividend, zero_reg, dividend); |
| 1125 } | 1124 } |
| 1126 | 1125 |
| 1127 __ bind(÷nd_is_not_negative); | 1126 __ bind(÷nd_is_not_negative); |
| 1128 __ And(dividend, dividend, Operand(mask)); | 1127 __ And(dividend, dividend, Operand(mask)); |
| 1129 __ bind(&done); | 1128 __ bind(&done); |
| 1130 } | 1129 } |
| 1131 | 1130 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1143 | 1142 |
| 1144 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1143 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1145 __ Mul(result, result, Operand(Abs(divisor))); | 1144 __ Mul(result, result, Operand(Abs(divisor))); |
| 1146 __ Subu(result, dividend, Operand(result)); | 1145 __ Subu(result, dividend, Operand(result)); |
| 1147 | 1146 |
| 1148 // Check for negative zero. | 1147 // Check for negative zero. |
| 1149 HMod* hmod = instr->hydrogen(); | 1148 HMod* hmod = instr->hydrogen(); |
| 1150 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1149 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1151 Label remainder_not_zero; | 1150 Label remainder_not_zero; |
| 1152 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); | 1151 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); |
| 1153 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend, | 1152 DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1154 Operand(zero_reg)); | |
| 1155 __ bind(&remainder_not_zero); | 1153 __ bind(&remainder_not_zero); |
| 1156 } | 1154 } |
| 1157 } | 1155 } |
| 1158 | 1156 |
| 1159 | 1157 |
| 1160 void LCodeGen::DoModI(LModI* instr) { | 1158 void LCodeGen::DoModI(LModI* instr) { |
| 1161 HMod* hmod = instr->hydrogen(); | 1159 HMod* hmod = instr->hydrogen(); |
| 1162 const Register left_reg = ToRegister(instr->left()); | 1160 const Register left_reg = ToRegister(instr->left()); |
| 1163 const Register right_reg = ToRegister(instr->right()); | 1161 const Register right_reg = ToRegister(instr->right()); |
| 1164 const Register result_reg = ToRegister(instr->result()); | 1162 const Register result_reg = ToRegister(instr->result()); |
| 1165 | 1163 |
| 1166 // div runs in the background while we check for special cases. | 1164 // div runs in the background while we check for special cases. |
| 1167 __ Mod(result_reg, left_reg, right_reg); | 1165 __ Mod(result_reg, left_reg, right_reg); |
| 1168 | 1166 |
| 1169 Label done; | 1167 Label done; |
| 1170 // Check for x % 0, we have to deopt in this case because we can't return a | 1168 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1171 // NaN. | 1169 // NaN. |
| 1172 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1170 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1173 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg, | 1171 DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg)); |
| 1174 Operand(zero_reg)); | |
| 1175 } | 1172 } |
| 1176 | 1173 |
| 1177 // Check for kMinInt % -1, div will return kMinInt, which is not what we | 1174 // Check for kMinInt % -1, div will return kMinInt, which is not what we |
| 1178 // want. We have to deopt if we care about -0, because we can't return that. | 1175 // want. We have to deopt if we care about -0, because we can't return that. |
| 1179 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1176 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1180 Label no_overflow_possible; | 1177 Label no_overflow_possible; |
| 1181 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); | 1178 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); |
| 1182 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1179 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1183 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1)); | 1180 DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1)); |
| 1184 } else { | 1181 } else { |
| 1185 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); | 1182 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); |
| 1186 __ Branch(USE_DELAY_SLOT, &done); | 1183 __ Branch(USE_DELAY_SLOT, &done); |
| 1187 __ mov(result_reg, zero_reg); | 1184 __ mov(result_reg, zero_reg); |
| 1188 } | 1185 } |
| 1189 __ bind(&no_overflow_possible); | 1186 __ bind(&no_overflow_possible); |
| 1190 } | 1187 } |
| 1191 | 1188 |
| 1192 // If we care about -0, test if the dividend is <0 and the result is 0. | 1189 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1193 __ Branch(&done, ge, left_reg, Operand(zero_reg)); | 1190 __ Branch(&done, ge, left_reg, Operand(zero_reg)); |
| 1194 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1191 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1195 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg, | 1192 DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg)); |
| 1196 Operand(zero_reg)); | |
| 1197 } | 1193 } |
| 1198 __ bind(&done); | 1194 __ bind(&done); |
| 1199 } | 1195 } |
| 1200 | 1196 |
| 1201 | 1197 |
| 1202 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1198 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1203 Register dividend = ToRegister(instr->dividend()); | 1199 Register dividend = ToRegister(instr->dividend()); |
| 1204 int32_t divisor = instr->divisor(); | 1200 int32_t divisor = instr->divisor(); |
| 1205 Register result = ToRegister(instr->result()); | 1201 Register result = ToRegister(instr->result()); |
| 1206 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1202 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1207 DCHECK(!result.is(dividend)); | 1203 DCHECK(!result.is(dividend)); |
| 1208 | 1204 |
| 1209 // Check for (0 / -x) that will produce negative zero. | 1205 // Check for (0 / -x) that will produce negative zero. |
| 1210 HDiv* hdiv = instr->hydrogen(); | 1206 HDiv* hdiv = instr->hydrogen(); |
| 1211 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1207 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1212 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1208 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1213 Operand(zero_reg)); | |
| 1214 } | 1209 } |
| 1215 // Check for (kMinInt / -1). | 1210 // Check for (kMinInt / -1). |
| 1216 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1211 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1217 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt)); | 1212 DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt)); |
| 1218 } | 1213 } |
| 1219 // Deoptimize if remainder will not be 0. | 1214 // Deoptimize if remainder will not be 0. |
| 1220 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1215 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1221 divisor != 1 && divisor != -1) { | 1216 divisor != 1 && divisor != -1) { |
| 1222 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1217 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1223 __ And(at, dividend, Operand(mask)); | 1218 __ And(at, dividend, Operand(mask)); |
| 1224 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg)); | 1219 DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg)); |
| 1225 } | 1220 } |
| 1226 | 1221 |
| 1227 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1222 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1228 __ Subu(result, zero_reg, dividend); | 1223 __ Subu(result, zero_reg, dividend); |
| 1229 return; | 1224 return; |
| 1230 } | 1225 } |
| 1231 uint16_t shift = WhichPowerOf2Abs(divisor); | 1226 uint16_t shift = WhichPowerOf2Abs(divisor); |
| 1232 if (shift == 0) { | 1227 if (shift == 0) { |
| 1233 __ Move(result, dividend); | 1228 __ Move(result, dividend); |
| 1234 } else if (shift == 1) { | 1229 } else if (shift == 1) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1251 DCHECK(!dividend.is(result)); | 1246 DCHECK(!dividend.is(result)); |
| 1252 | 1247 |
| 1253 if (divisor == 0) { | 1248 if (divisor == 0) { |
| 1254 DeoptimizeIf(al, instr); | 1249 DeoptimizeIf(al, instr); |
| 1255 return; | 1250 return; |
| 1256 } | 1251 } |
| 1257 | 1252 |
| 1258 // Check for (0 / -x) that will produce negative zero. | 1253 // Check for (0 / -x) that will produce negative zero. |
| 1259 HDiv* hdiv = instr->hydrogen(); | 1254 HDiv* hdiv = instr->hydrogen(); |
| 1260 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1255 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1261 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1256 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1262 Operand(zero_reg)); | |
| 1263 } | 1257 } |
| 1264 | 1258 |
| 1265 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1259 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1266 if (divisor < 0) __ Subu(result, zero_reg, result); | 1260 if (divisor < 0) __ Subu(result, zero_reg, result); |
| 1267 | 1261 |
| 1268 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1262 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1269 __ Mul(scratch0(), result, Operand(divisor)); | 1263 __ Mul(scratch0(), result, Operand(divisor)); |
| 1270 __ Subu(scratch0(), scratch0(), dividend); | 1264 __ Subu(scratch0(), scratch0(), dividend); |
| 1271 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(), | 1265 DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg)); |
| 1272 Operand(zero_reg)); | |
| 1273 } | 1266 } |
| 1274 } | 1267 } |
| 1275 | 1268 |
| 1276 | 1269 |
| 1277 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1278 void LCodeGen::DoDivI(LDivI* instr) { | 1271 void LCodeGen::DoDivI(LDivI* instr) { |
| 1279 HBinaryOperation* hdiv = instr->hydrogen(); | 1272 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1280 Register dividend = ToRegister(instr->dividend()); | 1273 Register dividend = ToRegister(instr->dividend()); |
| 1281 Register divisor = ToRegister(instr->divisor()); | 1274 Register divisor = ToRegister(instr->divisor()); |
| 1282 const Register result = ToRegister(instr->result()); | 1275 const Register result = ToRegister(instr->result()); |
| 1283 Register remainder = ToRegister(instr->temp()); | 1276 Register remainder = ToRegister(instr->temp()); |
| 1284 | 1277 |
| 1285 // On MIPS div is asynchronous - it will run in the background while we | 1278 // On MIPS div is asynchronous - it will run in the background while we |
| 1286 // check for special cases. | 1279 // check for special cases. |
| 1287 __ Div(remainder, result, dividend, divisor); | 1280 __ Div(remainder, result, dividend, divisor); |
| 1288 | 1281 |
| 1289 // Check for x / 0. | 1282 // Check for x / 0. |
| 1290 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1283 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1291 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1284 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1292 Operand(zero_reg)); | |
| 1293 } | 1285 } |
| 1294 | 1286 |
| 1295 // Check for (0 / -x) that will produce negative zero. | 1287 // Check for (0 / -x) that will produce negative zero. |
| 1296 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1288 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1297 Label left_not_zero; | 1289 Label left_not_zero; |
| 1298 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1290 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1299 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1291 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1300 Operand(zero_reg)); | |
| 1301 __ bind(&left_not_zero); | 1292 __ bind(&left_not_zero); |
| 1302 } | 1293 } |
| 1303 | 1294 |
| 1304 // Check for (kMinInt / -1). | 1295 // Check for (kMinInt / -1). |
| 1305 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1296 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1306 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1297 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1307 Label left_not_min_int; | 1298 Label left_not_min_int; |
| 1308 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1299 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1309 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1300 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1310 __ bind(&left_not_min_int); | 1301 __ bind(&left_not_min_int); |
| 1311 } | 1302 } |
| 1312 | 1303 |
| 1313 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1304 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1314 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder, | 1305 DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg)); |
| 1315 Operand(zero_reg)); | |
| 1316 } | 1306 } |
| 1317 } | 1307 } |
| 1318 | 1308 |
| 1319 | 1309 |
| 1320 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1310 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1321 DoubleRegister addend = ToDoubleRegister(instr->addend()); | 1311 DoubleRegister addend = ToDoubleRegister(instr->addend()); |
| 1322 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1312 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1323 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1313 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1324 | 1314 |
| 1325 // This is computed in-place. | 1315 // This is computed in-place. |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1351 } | 1341 } |
| 1352 | 1342 |
| 1353 // If the divisor is negative, we have to negate and handle edge cases. | 1343 // If the divisor is negative, we have to negate and handle edge cases. |
| 1354 | 1344 |
| 1355 // dividend can be the same register as result so save the value of it | 1345 // dividend can be the same register as result so save the value of it |
| 1356 // for checking overflow. | 1346 // for checking overflow. |
| 1357 __ Move(scratch, dividend); | 1347 __ Move(scratch, dividend); |
| 1358 | 1348 |
| 1359 __ Subu(result, zero_reg, dividend); | 1349 __ Subu(result, zero_reg, dividend); |
| 1360 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1350 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1361 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 1351 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1362 } | 1352 } |
| 1363 | 1353 |
| 1364 // Dividing by -1 is basically negation, unless we overflow. | 1354 // Dividing by -1 is basically negation, unless we overflow. |
| 1365 __ Xor(scratch, scratch, result); | 1355 __ Xor(scratch, scratch, result); |
| 1366 if (divisor == -1) { | 1356 if (divisor == -1) { |
| 1367 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1357 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1368 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, | 1358 DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg)); |
| 1369 Operand(zero_reg)); | |
| 1370 } | 1359 } |
| 1371 return; | 1360 return; |
| 1372 } | 1361 } |
| 1373 | 1362 |
| 1374 // If the negation could not overflow, simply shifting is OK. | 1363 // If the negation could not overflow, simply shifting is OK. |
| 1375 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1364 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1376 __ sra(result, result, shift); | 1365 __ sra(result, result, shift); |
| 1377 return; | 1366 return; |
| 1378 } | 1367 } |
| 1379 | 1368 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1394 DCHECK(!dividend.is(result)); | 1383 DCHECK(!dividend.is(result)); |
| 1395 | 1384 |
| 1396 if (divisor == 0) { | 1385 if (divisor == 0) { |
| 1397 DeoptimizeIf(al, instr); | 1386 DeoptimizeIf(al, instr); |
| 1398 return; | 1387 return; |
| 1399 } | 1388 } |
| 1400 | 1389 |
| 1401 // Check for (0 / -x) that will produce negative zero. | 1390 // Check for (0 / -x) that will produce negative zero. |
| 1402 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1391 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1403 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1392 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1404 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1393 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg)); |
| 1405 Operand(zero_reg)); | |
| 1406 } | 1394 } |
| 1407 | 1395 |
| 1408 // Easy case: We need no dynamic check for the dividend and the flooring | 1396 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1409 // division is the same as the truncating division. | 1397 // division is the same as the truncating division. |
| 1410 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1398 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1411 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1399 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1412 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1400 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1413 if (divisor < 0) __ Subu(result, zero_reg, result); | 1401 if (divisor < 0) __ Subu(result, zero_reg, result); |
| 1414 return; | 1402 return; |
| 1415 } | 1403 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1439 Register dividend = ToRegister(instr->dividend()); | 1427 Register dividend = ToRegister(instr->dividend()); |
| 1440 Register divisor = ToRegister(instr->divisor()); | 1428 Register divisor = ToRegister(instr->divisor()); |
| 1441 const Register result = ToRegister(instr->result()); | 1429 const Register result = ToRegister(instr->result()); |
| 1442 Register remainder = scratch0(); | 1430 Register remainder = scratch0(); |
| 1443 // On MIPS div is asynchronous - it will run in the background while we | 1431 // On MIPS div is asynchronous - it will run in the background while we |
| 1444 // check for special cases. | 1432 // check for special cases. |
| 1445 __ Div(remainder, result, dividend, divisor); | 1433 __ Div(remainder, result, dividend, divisor); |
| 1446 | 1434 |
| 1447 // Check for x / 0. | 1435 // Check for x / 0. |
| 1448 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1436 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1449 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1437 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg)); |
| 1450 Operand(zero_reg)); | |
| 1451 } | 1438 } |
| 1452 | 1439 |
| 1453 // Check for (0 / -x) that will produce negative zero. | 1440 // Check for (0 / -x) that will produce negative zero. |
| 1454 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1441 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1455 Label left_not_zero; | 1442 Label left_not_zero; |
| 1456 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1443 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1457 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1444 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg)); |
| 1458 Operand(zero_reg)); | |
| 1459 __ bind(&left_not_zero); | 1445 __ bind(&left_not_zero); |
| 1460 } | 1446 } |
| 1461 | 1447 |
| 1462 // Check for (kMinInt / -1). | 1448 // Check for (kMinInt / -1). |
| 1463 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1449 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1464 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1450 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1465 Label left_not_min_int; | 1451 Label left_not_min_int; |
| 1466 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1452 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1467 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1453 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1)); |
| 1468 __ bind(&left_not_min_int); | 1454 __ bind(&left_not_min_int); |
| 1469 } | 1455 } |
| 1470 | 1456 |
| 1471 // We performed a truncating division. Correct the result if necessary. | 1457 // We performed a truncating division. Correct the result if necessary. |
| 1472 Label done; | 1458 Label done; |
| 1473 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); | 1459 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); |
| 1474 __ Xor(remainder, remainder, Operand(divisor)); | 1460 __ Xor(remainder, remainder, Operand(divisor)); |
| 1475 __ Branch(&done, ge, remainder, Operand(zero_reg)); | 1461 __ Branch(&done, ge, remainder, Operand(zero_reg)); |
| 1476 __ Subu(result, result, Operand(1)); | 1462 __ Subu(result, result, Operand(1)); |
| 1477 __ bind(&done); | 1463 __ bind(&done); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1488 bool bailout_on_minus_zero = | 1474 bool bailout_on_minus_zero = |
| 1489 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1475 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1490 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1476 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1491 | 1477 |
| 1492 if (right_op->IsConstantOperand()) { | 1478 if (right_op->IsConstantOperand()) { |
| 1493 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1479 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1494 | 1480 |
| 1495 if (bailout_on_minus_zero && (constant < 0)) { | 1481 if (bailout_on_minus_zero && (constant < 0)) { |
| 1496 // The case of a null constant will be handled separately. | 1482 // The case of a null constant will be handled separately. |
| 1497 // If constant is negative and left is null, the result should be -0. | 1483 // If constant is negative and left is null, the result should be -0. |
| 1498 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | 1484 DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg)); |
| 1499 } | 1485 } |
| 1500 | 1486 |
| 1501 switch (constant) { | 1487 switch (constant) { |
| 1502 case -1: | 1488 case -1: |
| 1503 if (overflow) { | 1489 if (overflow) { |
| 1504 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); | 1490 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); |
| 1505 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch, | 1491 DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg)); |
| 1506 Operand(zero_reg)); | |
| 1507 } else { | 1492 } else { |
| 1508 __ Subu(result, zero_reg, left); | 1493 __ Subu(result, zero_reg, left); |
| 1509 } | 1494 } |
| 1510 break; | 1495 break; |
| 1511 case 0: | 1496 case 0: |
| 1512 if (bailout_on_minus_zero) { | 1497 if (bailout_on_minus_zero) { |
| 1513 // If left is strictly negative and the constant is null, the | 1498 // If left is strictly negative and the constant is null, the |
| 1514 // result is -0. Deoptimize if required, otherwise return 0. | 1499 // result is -0. Deoptimize if required, otherwise return 0. |
| 1515 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | 1500 DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg)); |
| 1516 Operand(zero_reg)); | |
| 1517 } | 1501 } |
| 1518 __ mov(result, zero_reg); | 1502 __ mov(result, zero_reg); |
| 1519 break; | 1503 break; |
| 1520 case 1: | 1504 case 1: |
| 1521 // Nothing to do. | 1505 // Nothing to do. |
| 1522 __ Move(result, left); | 1506 __ Move(result, left); |
| 1523 break; | 1507 break; |
| 1524 default: | 1508 default: |
| 1525 // Multiplying by powers of two and powers of two plus or minus | 1509 // Multiplying by powers of two and powers of two plus or minus |
| 1526 // one can be done faster with shifted operands. | 1510 // one can be done faster with shifted operands. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1558 | 1542 |
| 1559 if (overflow) { | 1543 if (overflow) { |
| 1560 // hi:lo = left * right. | 1544 // hi:lo = left * right. |
| 1561 if (instr->hydrogen()->representation().IsSmi()) { | 1545 if (instr->hydrogen()->representation().IsSmi()) { |
| 1562 __ SmiUntag(result, left); | 1546 __ SmiUntag(result, left); |
| 1563 __ Mul(scratch, result, result, right); | 1547 __ Mul(scratch, result, result, right); |
| 1564 } else { | 1548 } else { |
| 1565 __ Mul(scratch, result, left, right); | 1549 __ Mul(scratch, result, left, right); |
| 1566 } | 1550 } |
| 1567 __ sra(at, result, 31); | 1551 __ sra(at, result, 31); |
| 1568 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | 1552 DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at)); |
| 1569 } else { | 1553 } else { |
| 1570 if (instr->hydrogen()->representation().IsSmi()) { | 1554 if (instr->hydrogen()->representation().IsSmi()) { |
| 1571 __ SmiUntag(result, left); | 1555 __ SmiUntag(result, left); |
| 1572 __ Mul(result, result, right); | 1556 __ Mul(result, result, right); |
| 1573 } else { | 1557 } else { |
| 1574 __ Mul(result, left, right); | 1558 __ Mul(result, left, right); |
| 1575 } | 1559 } |
| 1576 } | 1560 } |
| 1577 | 1561 |
| 1578 if (bailout_on_minus_zero) { | 1562 if (bailout_on_minus_zero) { |
| 1579 Label done; | 1563 Label done; |
| 1580 __ Xor(at, left, right); | 1564 __ Xor(at, left, right); |
| 1581 __ Branch(&done, ge, at, Operand(zero_reg)); | 1565 __ Branch(&done, ge, at, Operand(zero_reg)); |
| 1582 // Bail out if the result is minus zero. | 1566 // Bail out if the result is minus zero. |
| 1583 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | 1567 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg)); |
| 1584 Operand(zero_reg)); | |
| 1585 __ bind(&done); | 1568 __ bind(&done); |
| 1586 } | 1569 } |
| 1587 } | 1570 } |
| 1588 } | 1571 } |
| 1589 | 1572 |
| 1590 | 1573 |
| 1591 void LCodeGen::DoBitI(LBitI* instr) { | 1574 void LCodeGen::DoBitI(LBitI* instr) { |
| 1592 LOperand* left_op = instr->left(); | 1575 LOperand* left_op = instr->left(); |
| 1593 LOperand* right_op = instr->right(); | 1576 LOperand* right_op = instr->right(); |
| 1594 DCHECK(left_op->IsRegister()); | 1577 DCHECK(left_op->IsRegister()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1638 switch (instr->op()) { | 1621 switch (instr->op()) { |
| 1639 case Token::ROR: | 1622 case Token::ROR: |
| 1640 __ Ror(result, left, Operand(ToRegister(right_op))); | 1623 __ Ror(result, left, Operand(ToRegister(right_op))); |
| 1641 break; | 1624 break; |
| 1642 case Token::SAR: | 1625 case Token::SAR: |
| 1643 __ srav(result, left, ToRegister(right_op)); | 1626 __ srav(result, left, ToRegister(right_op)); |
| 1644 break; | 1627 break; |
| 1645 case Token::SHR: | 1628 case Token::SHR: |
| 1646 __ srlv(result, left, ToRegister(right_op)); | 1629 __ srlv(result, left, ToRegister(right_op)); |
| 1647 if (instr->can_deopt()) { | 1630 if (instr->can_deopt()) { |
| 1648 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result, | 1631 DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg)); |
| 1649 Operand(zero_reg)); | |
| 1650 } | 1632 } |
| 1651 break; | 1633 break; |
| 1652 case Token::SHL: | 1634 case Token::SHL: |
| 1653 __ sllv(result, left, ToRegister(right_op)); | 1635 __ sllv(result, left, ToRegister(right_op)); |
| 1654 break; | 1636 break; |
| 1655 default: | 1637 default: |
| 1656 UNREACHABLE(); | 1638 UNREACHABLE(); |
| 1657 break; | 1639 break; |
| 1658 } | 1640 } |
| 1659 } else { | 1641 } else { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1674 } else { | 1656 } else { |
| 1675 __ Move(result, left); | 1657 __ Move(result, left); |
| 1676 } | 1658 } |
| 1677 break; | 1659 break; |
| 1678 case Token::SHR: | 1660 case Token::SHR: |
| 1679 if (shift_count != 0) { | 1661 if (shift_count != 0) { |
| 1680 __ srl(result, left, shift_count); | 1662 __ srl(result, left, shift_count); |
| 1681 } else { | 1663 } else { |
| 1682 if (instr->can_deopt()) { | 1664 if (instr->can_deopt()) { |
| 1683 __ And(at, left, Operand(0x80000000)); | 1665 __ And(at, left, Operand(0x80000000)); |
| 1684 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at, | 1666 DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg)); |
| 1685 Operand(zero_reg)); | |
| 1686 } | 1667 } |
| 1687 __ Move(result, left); | 1668 __ Move(result, left); |
| 1688 } | 1669 } |
| 1689 break; | 1670 break; |
| 1690 case Token::SHL: | 1671 case Token::SHL: |
| 1691 if (shift_count != 0) { | 1672 if (shift_count != 0) { |
| 1692 if (instr->hydrogen_value()->representation().IsSmi() && | 1673 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1693 instr->can_deopt()) { | 1674 instr->can_deopt()) { |
| 1694 if (shift_count != 1) { | 1675 if (shift_count != 1) { |
| 1695 __ sll(result, left, shift_count - 1); | 1676 __ sll(result, left, shift_count - 1); |
| 1696 __ SmiTagCheckOverflow(result, result, scratch); | 1677 __ SmiTagCheckOverflow(result, result, scratch); |
| 1697 } else { | 1678 } else { |
| 1698 __ SmiTagCheckOverflow(result, left, scratch); | 1679 __ SmiTagCheckOverflow(result, left, scratch); |
| 1699 } | 1680 } |
| 1700 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch, | 1681 DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg)); |
| 1701 Operand(zero_reg)); | |
| 1702 } else { | 1682 } else { |
| 1703 __ sll(result, left, shift_count); | 1683 __ sll(result, left, shift_count); |
| 1704 } | 1684 } |
| 1705 } else { | 1685 } else { |
| 1706 __ Move(result, left); | 1686 __ Move(result, left); |
| 1707 } | 1687 } |
| 1708 break; | 1688 break; |
| 1709 default: | 1689 default: |
| 1710 UNREACHABLE(); | 1690 UNREACHABLE(); |
| 1711 break; | 1691 break; |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1739 overflow); // Reg at also used as scratch. | 1719 overflow); // Reg at also used as scratch. |
| 1740 } else { | 1720 } else { |
| 1741 DCHECK(right->IsRegister()); | 1721 DCHECK(right->IsRegister()); |
| 1742 // Due to overflow check macros not supporting constant operands, | 1722 // Due to overflow check macros not supporting constant operands, |
| 1743 // handling the IsConstantOperand case was moved to prev if clause. | 1723 // handling the IsConstantOperand case was moved to prev if clause. |
| 1744 __ SubuAndCheckForOverflow(ToRegister(result), | 1724 __ SubuAndCheckForOverflow(ToRegister(result), |
| 1745 ToRegister(left), | 1725 ToRegister(left), |
| 1746 ToRegister(right), | 1726 ToRegister(right), |
| 1747 overflow); // Reg at also used as scratch. | 1727 overflow); // Reg at also used as scratch. |
| 1748 } | 1728 } |
| 1749 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | 1729 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1750 Operand(zero_reg)); | |
| 1751 } | 1730 } |
| 1752 } | 1731 } |
| 1753 | 1732 |
| 1754 | 1733 |
| 1755 void LCodeGen::DoConstantI(LConstantI* instr) { | 1734 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1756 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1735 __ li(ToRegister(instr->result()), Operand(instr->value())); |
| 1757 } | 1736 } |
| 1758 | 1737 |
| 1759 | 1738 |
| 1760 void LCodeGen::DoConstantS(LConstantS* instr) { | 1739 void LCodeGen::DoConstantS(LConstantS* instr) { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1808 Register result = ToRegister(instr->result()); | 1787 Register result = ToRegister(instr->result()); |
| 1809 Register scratch = ToRegister(instr->temp()); | 1788 Register scratch = ToRegister(instr->temp()); |
| 1810 Smi* index = instr->index(); | 1789 Smi* index = instr->index(); |
| 1811 Label runtime, done; | 1790 Label runtime, done; |
| 1812 DCHECK(object.is(a0)); | 1791 DCHECK(object.is(a0)); |
| 1813 DCHECK(result.is(v0)); | 1792 DCHECK(result.is(v0)); |
| 1814 DCHECK(!scratch.is(scratch0())); | 1793 DCHECK(!scratch.is(scratch0())); |
| 1815 DCHECK(!scratch.is(object)); | 1794 DCHECK(!scratch.is(object)); |
| 1816 | 1795 |
| 1817 __ SmiTst(object, at); | 1796 __ SmiTst(object, at); |
| 1818 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 1797 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 1819 __ GetObjectType(object, scratch, scratch); | 1798 __ GetObjectType(object, scratch, scratch); |
| 1820 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject, scratch, | 1799 DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE)); |
| 1821 Operand(JS_DATE_TYPE)); | |
| 1822 | 1800 |
| 1823 if (index->value() == 0) { | 1801 if (index->value() == 0) { |
| 1824 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1802 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 1825 } else { | 1803 } else { |
| 1826 if (index->value() < JSDate::kFirstUncachedField) { | 1804 if (index->value() < JSDate::kFirstUncachedField) { |
| 1827 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1805 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1828 __ li(scratch, Operand(stamp)); | 1806 __ li(scratch, Operand(stamp)); |
| 1829 __ lw(scratch, MemOperand(scratch)); | 1807 __ lw(scratch, MemOperand(scratch)); |
| 1830 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1808 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 1831 __ Branch(&runtime, ne, scratch, Operand(scratch0())); | 1809 __ Branch(&runtime, ne, scratch, Operand(scratch0())); |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1946 overflow); // Reg at also used as scratch. | 1924 overflow); // Reg at also used as scratch. |
| 1947 } else { | 1925 } else { |
| 1948 DCHECK(right->IsRegister()); | 1926 DCHECK(right->IsRegister()); |
| 1949 // Due to overflow check macros not supporting constant operands, | 1927 // Due to overflow check macros not supporting constant operands, |
| 1950 // handling the IsConstantOperand case was moved to prev if clause. | 1928 // handling the IsConstantOperand case was moved to prev if clause. |
| 1951 __ AdduAndCheckForOverflow(ToRegister(result), | 1929 __ AdduAndCheckForOverflow(ToRegister(result), |
| 1952 ToRegister(left), | 1930 ToRegister(left), |
| 1953 ToRegister(right), | 1931 ToRegister(right), |
| 1954 overflow); // Reg at also used as scratch. | 1932 overflow); // Reg at also used as scratch. |
| 1955 } | 1933 } |
| 1956 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow, | 1934 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg)); |
| 1957 Operand(zero_reg)); | |
| 1958 } | 1935 } |
| 1959 } | 1936 } |
| 1960 | 1937 |
| 1961 | 1938 |
| 1962 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1939 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1963 LOperand* left = instr->left(); | 1940 LOperand* left = instr->left(); |
| 1964 LOperand* right = instr->right(); | 1941 LOperand* right = instr->right(); |
| 1965 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1942 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1966 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 1943 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| 1967 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 1944 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2208 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | 2185 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); |
| 2209 } | 2186 } |
| 2210 | 2187 |
| 2211 if (expected.Contains(ToBooleanStub::SMI)) { | 2188 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2212 // Smis: 0 -> false, all other -> true. | 2189 // Smis: 0 -> false, all other -> true. |
| 2213 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); | 2190 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); |
| 2214 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2191 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2215 } else if (expected.NeedsMap()) { | 2192 } else if (expected.NeedsMap()) { |
| 2216 // If we need a map later and have a Smi -> deopt. | 2193 // If we need a map later and have a Smi -> deopt. |
| 2217 __ SmiTst(reg, at); | 2194 __ SmiTst(reg, at); |
| 2218 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 2195 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 2219 } | 2196 } |
| 2220 | 2197 |
| 2221 const Register map = scratch0(); | 2198 const Register map = scratch0(); |
| 2222 if (expected.NeedsMap()) { | 2199 if (expected.NeedsMap()) { |
| 2223 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2200 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2224 if (expected.CanBeUndetectable()) { | 2201 if (expected.CanBeUndetectable()) { |
| 2225 // Undetectable -> false. | 2202 // Undetectable -> false. |
| 2226 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 2203 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2227 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 2204 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 2228 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); | 2205 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2264 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | 2241 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
| 2265 ne, dbl_scratch, kDoubleRegZero); | 2242 ne, dbl_scratch, kDoubleRegZero); |
| 2266 // Falls through if dbl_scratch == 0. | 2243 // Falls through if dbl_scratch == 0. |
| 2267 __ Branch(instr->FalseLabel(chunk_)); | 2244 __ Branch(instr->FalseLabel(chunk_)); |
| 2268 __ bind(¬_heap_number); | 2245 __ bind(¬_heap_number); |
| 2269 } | 2246 } |
| 2270 | 2247 |
| 2271 if (!expected.IsGeneric()) { | 2248 if (!expected.IsGeneric()) { |
| 2272 // We've seen something for the first time -> deopt. | 2249 // We've seen something for the first time -> deopt. |
| 2273 // This can only happen if we are not generic already. | 2250 // This can only happen if we are not generic already. |
| 2274 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg, | 2251 DeoptimizeIf(al, instr, "unexpected object", zero_reg, |
| 2275 Operand(zero_reg)); | 2252 Operand(zero_reg)); |
| 2276 } | 2253 } |
| 2277 } | 2254 } |
| 2278 } | 2255 } |
| 2279 } | 2256 } |
| 2280 | 2257 |
| 2281 | 2258 |
| 2282 void LCodeGen::EmitGoto(int block) { | 2259 void LCodeGen::EmitGoto(int block) { |
| 2283 if (!IsNextEmittedBlock(block)) { | 2260 if (!IsNextEmittedBlock(block)) { |
| 2284 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2261 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| (...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2910 } | 2887 } |
| 2911 } | 2888 } |
| 2912 | 2889 |
| 2913 | 2890 |
| 2914 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2891 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2915 Register result = ToRegister(instr->result()); | 2892 Register result = ToRegister(instr->result()); |
| 2916 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2893 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 2917 __ lw(result, FieldMemOperand(at, Cell::kValueOffset)); | 2894 __ lw(result, FieldMemOperand(at, Cell::kValueOffset)); |
| 2918 if (instr->hydrogen()->RequiresHoleCheck()) { | 2895 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2919 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2896 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2920 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2897 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2921 } | 2898 } |
| 2922 } | 2899 } |
| 2923 | 2900 |
| 2924 | 2901 |
| 2925 template <class T> | 2902 template <class T> |
| 2926 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2903 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 2927 DCHECK(FLAG_vector_ics); | 2904 DCHECK(FLAG_vector_ics); |
| 2928 Register vector_register = ToRegister(instr->temp_vector()); | 2905 Register vector_register = ToRegister(instr->temp_vector()); |
| 2929 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 2906 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
| 2930 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 2907 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2965 | 2942 |
| 2966 // If the cell we are storing to contains the hole it could have | 2943 // If the cell we are storing to contains the hole it could have |
| 2967 // been deleted from the property dictionary. In that case, we need | 2944 // been deleted from the property dictionary. In that case, we need |
| 2968 // to update the property details in the property dictionary to mark | 2945 // to update the property details in the property dictionary to mark |
| 2969 // it as no longer deleted. | 2946 // it as no longer deleted. |
| 2970 if (instr->hydrogen()->RequiresHoleCheck()) { | 2947 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2971 // We use a temp to check the payload. | 2948 // We use a temp to check the payload. |
| 2972 Register payload = ToRegister(instr->temp()); | 2949 Register payload = ToRegister(instr->temp()); |
| 2973 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 2950 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2974 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2951 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2975 DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at)); | 2952 DeoptimizeIf(eq, instr, "hole", payload, Operand(at)); |
| 2976 } | 2953 } |
| 2977 | 2954 |
| 2978 // Store the value. | 2955 // Store the value. |
| 2979 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset)); | 2956 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 2980 // Cells are always rescanned, so no write barrier here. | 2957 // Cells are always rescanned, so no write barrier here. |
| 2981 } | 2958 } |
| 2982 | 2959 |
| 2983 | 2960 |
| 2984 | 2961 |
| 2985 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2962 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2986 Register context = ToRegister(instr->context()); | 2963 Register context = ToRegister(instr->context()); |
| 2987 Register result = ToRegister(instr->result()); | 2964 Register result = ToRegister(instr->result()); |
| 2988 | 2965 |
| 2989 __ lw(result, ContextOperand(context, instr->slot_index())); | 2966 __ lw(result, ContextOperand(context, instr->slot_index())); |
| 2990 if (instr->hydrogen()->RequiresHoleCheck()) { | 2967 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2991 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2968 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2992 | 2969 |
| 2993 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2970 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2994 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2971 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 2995 } else { | 2972 } else { |
| 2996 Label is_not_hole; | 2973 Label is_not_hole; |
| 2997 __ Branch(&is_not_hole, ne, result, Operand(at)); | 2974 __ Branch(&is_not_hole, ne, result, Operand(at)); |
| 2998 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2975 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2999 __ bind(&is_not_hole); | 2976 __ bind(&is_not_hole); |
| 3000 } | 2977 } |
| 3001 } | 2978 } |
| 3002 } | 2979 } |
| 3003 | 2980 |
| 3004 | 2981 |
| 3005 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2982 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 3006 Register context = ToRegister(instr->context()); | 2983 Register context = ToRegister(instr->context()); |
| 3007 Register value = ToRegister(instr->value()); | 2984 Register value = ToRegister(instr->value()); |
| 3008 Register scratch = scratch0(); | 2985 Register scratch = scratch0(); |
| 3009 MemOperand target = ContextOperand(context, instr->slot_index()); | 2986 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 3010 | 2987 |
| 3011 Label skip_assignment; | 2988 Label skip_assignment; |
| 3012 | 2989 |
| 3013 if (instr->hydrogen()->RequiresHoleCheck()) { | 2990 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3014 __ lw(scratch, target); | 2991 __ lw(scratch, target); |
| 3015 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2992 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 3016 | 2993 |
| 3017 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2994 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3018 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at)); | 2995 DeoptimizeIf(eq, instr, "hole", scratch, Operand(at)); |
| 3019 } else { | 2996 } else { |
| 3020 __ Branch(&skip_assignment, ne, scratch, Operand(at)); | 2997 __ Branch(&skip_assignment, ne, scratch, Operand(at)); |
| 3021 } | 2998 } |
| 3022 } | 2999 } |
| 3023 | 3000 |
| 3024 __ sw(value, target); | 3001 __ sw(value, target); |
| 3025 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3002 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3026 SmiCheck check_needed = | 3003 SmiCheck check_needed = |
| 3027 instr->hydrogen()->value()->type().IsHeapObject() | 3004 instr->hydrogen()->value()->type().IsHeapObject() |
| 3028 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3005 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3088 Register scratch = scratch0(); | 3065 Register scratch = scratch0(); |
| 3089 Register function = ToRegister(instr->function()); | 3066 Register function = ToRegister(instr->function()); |
| 3090 Register result = ToRegister(instr->result()); | 3067 Register result = ToRegister(instr->result()); |
| 3091 | 3068 |
| 3092 // Get the prototype or initial map from the function. | 3069 // Get the prototype or initial map from the function. |
| 3093 __ lw(result, | 3070 __ lw(result, |
| 3094 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3071 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3095 | 3072 |
| 3096 // Check that the function has a prototype or an initial map. | 3073 // Check that the function has a prototype or an initial map. |
| 3097 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 3074 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 3098 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 3075 DeoptimizeIf(eq, instr, "hole", result, Operand(at)); |
| 3099 | 3076 |
| 3100 // If the function does not have an initial map, we're done. | 3077 // If the function does not have an initial map, we're done. |
| 3101 Label done; | 3078 Label done; |
| 3102 __ GetObjectType(result, scratch, scratch); | 3079 __ GetObjectType(result, scratch, scratch); |
| 3103 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | 3080 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
| 3104 | 3081 |
| 3105 // Get the prototype from the initial map. | 3082 // Get the prototype from the initial map. |
| 3106 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3083 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3107 | 3084 |
| 3108 // All done. | 3085 // All done. |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3224 __ lhu(result, mem_operand); | 3201 __ lhu(result, mem_operand); |
| 3225 break; | 3202 break; |
| 3226 case EXTERNAL_INT32_ELEMENTS: | 3203 case EXTERNAL_INT32_ELEMENTS: |
| 3227 case INT32_ELEMENTS: | 3204 case INT32_ELEMENTS: |
| 3228 __ lw(result, mem_operand); | 3205 __ lw(result, mem_operand); |
| 3229 break; | 3206 break; |
| 3230 case EXTERNAL_UINT32_ELEMENTS: | 3207 case EXTERNAL_UINT32_ELEMENTS: |
| 3231 case UINT32_ELEMENTS: | 3208 case UINT32_ELEMENTS: |
| 3232 __ lw(result, mem_operand); | 3209 __ lw(result, mem_operand); |
| 3233 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3210 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3234 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue, | 3211 DeoptimizeIf(Ugreater_equal, instr, "negative value", result, |
| 3235 result, Operand(0x80000000)); | 3212 Operand(0x80000000)); |
| 3236 } | 3213 } |
| 3237 break; | 3214 break; |
| 3238 case FLOAT32_ELEMENTS: | 3215 case FLOAT32_ELEMENTS: |
| 3239 case FLOAT64_ELEMENTS: | 3216 case FLOAT64_ELEMENTS: |
| 3240 case EXTERNAL_FLOAT32_ELEMENTS: | 3217 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3241 case EXTERNAL_FLOAT64_ELEMENTS: | 3218 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3242 case FAST_DOUBLE_ELEMENTS: | 3219 case FAST_DOUBLE_ELEMENTS: |
| 3243 case FAST_ELEMENTS: | 3220 case FAST_ELEMENTS: |
| 3244 case FAST_SMI_ELEMENTS: | 3221 case FAST_SMI_ELEMENTS: |
| 3245 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3222 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3278 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3255 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 3279 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3256 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 3280 __ sll(at, key, shift_size); | 3257 __ sll(at, key, shift_size); |
| 3281 __ Addu(scratch, scratch, at); | 3258 __ Addu(scratch, scratch, at); |
| 3282 } | 3259 } |
| 3283 | 3260 |
| 3284 __ ldc1(result, MemOperand(scratch)); | 3261 __ ldc1(result, MemOperand(scratch)); |
| 3285 | 3262 |
| 3286 if (instr->hydrogen()->RequiresHoleCheck()) { | 3263 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3287 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); | 3264 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); |
| 3288 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, | 3265 DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32)); |
| 3289 Operand(kHoleNanUpper32)); | |
| 3290 } | 3266 } |
| 3291 } | 3267 } |
| 3292 | 3268 |
| 3293 | 3269 |
| 3294 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3270 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3295 Register elements = ToRegister(instr->elements()); | 3271 Register elements = ToRegister(instr->elements()); |
| 3296 Register result = ToRegister(instr->result()); | 3272 Register result = ToRegister(instr->result()); |
| 3297 Register scratch = scratch0(); | 3273 Register scratch = scratch0(); |
| 3298 Register store_base = scratch; | 3274 Register store_base = scratch; |
| 3299 int offset = instr->base_offset(); | 3275 int offset = instr->base_offset(); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3315 __ sll(scratch, key, kPointerSizeLog2); | 3291 __ sll(scratch, key, kPointerSizeLog2); |
| 3316 __ addu(scratch, elements, scratch); | 3292 __ addu(scratch, elements, scratch); |
| 3317 } | 3293 } |
| 3318 } | 3294 } |
| 3319 __ lw(result, MemOperand(store_base, offset)); | 3295 __ lw(result, MemOperand(store_base, offset)); |
| 3320 | 3296 |
| 3321 // Check for the hole value. | 3297 // Check for the hole value. |
| 3322 if (instr->hydrogen()->RequiresHoleCheck()) { | 3298 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3323 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3299 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3324 __ SmiTst(result, scratch); | 3300 __ SmiTst(result, scratch); |
| 3325 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, | 3301 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 3326 Operand(zero_reg)); | |
| 3327 } else { | 3302 } else { |
| 3328 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3303 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3329 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch)); | 3304 DeoptimizeIf(eq, instr, "hole", result, Operand(scratch)); |
| 3330 } | 3305 } |
| 3331 } | 3306 } |
| 3332 } | 3307 } |
| 3333 | 3308 |
| 3334 | 3309 |
| 3335 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3310 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3336 if (instr->is_typed_elements()) { | 3311 if (instr->is_typed_elements()) { |
| 3337 DoLoadKeyedExternalArray(instr); | 3312 DoLoadKeyedExternalArray(instr); |
| 3338 } else if (instr->hydrogen()->representation().IsDouble()) { | 3313 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3339 DoLoadKeyedFixedDoubleArray(instr); | 3314 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3465 } | 3440 } |
| 3466 | 3441 |
| 3467 // Normal function. Replace undefined or null with global receiver. | 3442 // Normal function. Replace undefined or null with global receiver. |
| 3468 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3443 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3469 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3444 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3470 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3445 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3471 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3446 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3472 | 3447 |
| 3473 // Deoptimize if the receiver is not a JS object. | 3448 // Deoptimize if the receiver is not a JS object. |
| 3474 __ SmiTst(receiver, scratch); | 3449 __ SmiTst(receiver, scratch); |
| 3475 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg)); | 3450 DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg)); |
| 3476 | 3451 |
| 3477 __ GetObjectType(receiver, scratch, scratch); | 3452 __ GetObjectType(receiver, scratch, scratch); |
| 3478 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch, | 3453 DeoptimizeIf(lt, instr, "not a JavaScript object", scratch, |
| 3479 Operand(FIRST_SPEC_OBJECT_TYPE)); | 3454 Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 3480 | 3455 |
| 3481 __ Branch(&result_in_receiver); | 3456 __ Branch(&result_in_receiver); |
| 3482 __ bind(&global_object); | 3457 __ bind(&global_object); |
| 3483 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3458 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3484 __ lw(result, | 3459 __ lw(result, |
| 3485 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3460 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 3486 __ lw(result, | 3461 __ lw(result, |
| 3487 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3462 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 3488 | 3463 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3504 Register length = ToRegister(instr->length()); | 3479 Register length = ToRegister(instr->length()); |
| 3505 Register elements = ToRegister(instr->elements()); | 3480 Register elements = ToRegister(instr->elements()); |
| 3506 Register scratch = scratch0(); | 3481 Register scratch = scratch0(); |
| 3507 DCHECK(receiver.is(a0)); // Used for parameter count. | 3482 DCHECK(receiver.is(a0)); // Used for parameter count. |
| 3508 DCHECK(function.is(a1)); // Required by InvokeFunction. | 3483 DCHECK(function.is(a1)); // Required by InvokeFunction. |
| 3509 DCHECK(ToRegister(instr->result()).is(v0)); | 3484 DCHECK(ToRegister(instr->result()).is(v0)); |
| 3510 | 3485 |
| 3511 // Copy the arguments to this function possibly from the | 3486 // Copy the arguments to this function possibly from the |
| 3512 // adaptor frame below it. | 3487 // adaptor frame below it. |
| 3513 const uint32_t kArgumentsLimit = 1 * KB; | 3488 const uint32_t kArgumentsLimit = 1 * KB; |
| 3514 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length, | 3489 DeoptimizeIf(hi, instr, "too many arguments", length, |
| 3515 Operand(kArgumentsLimit)); | 3490 Operand(kArgumentsLimit)); |
| 3516 | 3491 |
| 3517 // Push the receiver and use the register to keep the original | 3492 // Push the receiver and use the register to keep the original |
| 3518 // number of arguments. | 3493 // number of arguments. |
| 3519 __ push(receiver); | 3494 __ push(receiver); |
| 3520 __ Move(receiver, length); | 3495 __ Move(receiver, length); |
| 3521 // The arguments are at a one pointer size offset from elements. | 3496 // The arguments are at a one pointer size offset from elements. |
| 3522 __ Addu(elements, elements, Operand(1 * kPointerSize)); | 3497 __ Addu(elements, elements, Operand(1 * kPointerSize)); |
| 3523 | 3498 |
| 3524 // Loop through the arguments pushing them onto the execution | 3499 // Loop through the arguments pushing them onto the execution |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3630 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3605 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3631 DCHECK(instr->context() != NULL); | 3606 DCHECK(instr->context() != NULL); |
| 3632 DCHECK(ToRegister(instr->context()).is(cp)); | 3607 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3633 Register input = ToRegister(instr->value()); | 3608 Register input = ToRegister(instr->value()); |
| 3634 Register result = ToRegister(instr->result()); | 3609 Register result = ToRegister(instr->result()); |
| 3635 Register scratch = scratch0(); | 3610 Register scratch = scratch0(); |
| 3636 | 3611 |
| 3637 // Deoptimize if not a heap number. | 3612 // Deoptimize if not a heap number. |
| 3638 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3613 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3639 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3614 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3640 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at)); | 3615 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 3641 | 3616 |
| 3642 Label done; | 3617 Label done; |
| 3643 Register exponent = scratch0(); | 3618 Register exponent = scratch0(); |
| 3644 scratch = no_reg; | 3619 scratch = no_reg; |
| 3645 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3620 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3646 // Check the sign of the argument. If the argument is positive, just | 3621 // Check the sign of the argument. If the argument is positive, just |
| 3647 // return it. | 3622 // return it. |
| 3648 __ Move(result, input); | 3623 __ Move(result, input); |
| 3649 __ And(at, exponent, Operand(HeapNumber::kSignMask)); | 3624 __ And(at, exponent, Operand(HeapNumber::kSignMask)); |
| 3650 __ Branch(&done, eq, at, Operand(zero_reg)); | 3625 __ Branch(&done, eq, at, Operand(zero_reg)); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3697 | 3672 |
| 3698 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3673 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3699 Register input = ToRegister(instr->value()); | 3674 Register input = ToRegister(instr->value()); |
| 3700 Register result = ToRegister(instr->result()); | 3675 Register result = ToRegister(instr->result()); |
| 3701 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 3676 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 3702 Label done; | 3677 Label done; |
| 3703 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 3678 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 3704 __ mov(result, input); | 3679 __ mov(result, input); |
| 3705 __ subu(result, zero_reg, input); | 3680 __ subu(result, zero_reg, input); |
| 3706 // Overflow if result is still negative, i.e. 0x80000000. | 3681 // Overflow if result is still negative, i.e. 0x80000000. |
| 3707 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | 3682 DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg)); |
| 3708 __ bind(&done); | 3683 __ bind(&done); |
| 3709 } | 3684 } |
| 3710 | 3685 |
| 3711 | 3686 |
| 3712 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3687 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3713 // Class for deferred case. | 3688 // Class for deferred case. |
| 3714 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3689 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3715 public: | 3690 public: |
| 3716 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3691 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3717 : LDeferredCode(codegen), instr_(instr) { } | 3692 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3752 Register except_flag = ToRegister(instr->temp()); | 3727 Register except_flag = ToRegister(instr->temp()); |
| 3753 | 3728 |
| 3754 __ EmitFPUTruncate(kRoundToMinusInf, | 3729 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3755 result, | 3730 result, |
| 3756 input, | 3731 input, |
| 3757 scratch1, | 3732 scratch1, |
| 3758 double_scratch0(), | 3733 double_scratch0(), |
| 3759 except_flag); | 3734 except_flag); |
| 3760 | 3735 |
| 3761 // Deopt if the operation did not succeed. | 3736 // Deopt if the operation did not succeed. |
| 3762 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3737 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3763 Operand(zero_reg)); | 3738 Operand(zero_reg)); |
| 3764 | 3739 |
| 3765 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3740 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3766 // Test for -0. | 3741 // Test for -0. |
| 3767 Label done; | 3742 Label done; |
| 3768 __ Branch(&done, ne, result, Operand(zero_reg)); | 3743 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3769 __ Mfhc1(scratch1, input); | 3744 __ Mfhc1(scratch1, input); |
| 3770 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 3745 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 3771 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 3746 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 3772 Operand(zero_reg)); | |
| 3773 __ bind(&done); | 3747 __ bind(&done); |
| 3774 } | 3748 } |
| 3775 } | 3749 } |
| 3776 | 3750 |
| 3777 | 3751 |
| 3778 void LCodeGen::DoMathRound(LMathRound* instr) { | 3752 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3779 DoubleRegister input = ToDoubleRegister(instr->value()); | 3753 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3780 Register result = ToRegister(instr->result()); | 3754 Register result = ToRegister(instr->result()); |
| 3781 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3755 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3782 Register scratch = scratch0(); | 3756 Register scratch = scratch0(); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3795 __ mov(result, zero_reg); | 3769 __ mov(result, zero_reg); |
| 3796 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3770 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3797 __ Branch(&check_sign_on_zero); | 3771 __ Branch(&check_sign_on_zero); |
| 3798 } else { | 3772 } else { |
| 3799 __ Branch(&done); | 3773 __ Branch(&done); |
| 3800 } | 3774 } |
| 3801 __ bind(&skip1); | 3775 __ bind(&skip1); |
| 3802 | 3776 |
| 3803 // The following conversion will not work with numbers | 3777 // The following conversion will not work with numbers |
| 3804 // outside of ]-2^32, 2^32[. | 3778 // outside of ]-2^32, 2^32[. |
| 3805 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, | 3779 DeoptimizeIf(ge, instr, "overflow", scratch, |
| 3806 Operand(HeapNumber::kExponentBias + 32)); | 3780 Operand(HeapNumber::kExponentBias + 32)); |
| 3807 | 3781 |
| 3808 // Save the original sign for later comparison. | 3782 // Save the original sign for later comparison. |
| 3809 __ And(scratch, result, Operand(HeapNumber::kSignMask)); | 3783 __ And(scratch, result, Operand(HeapNumber::kSignMask)); |
| 3810 | 3784 |
| 3811 __ Move(double_scratch0(), 0.5); | 3785 __ Move(double_scratch0(), 0.5); |
| 3812 __ add_d(double_scratch0(), input, double_scratch0()); | 3786 __ add_d(double_scratch0(), input, double_scratch0()); |
| 3813 | 3787 |
| 3814 // Check sign of the result: if the sign changed, the input | 3788 // Check sign of the result: if the sign changed, the input |
| 3815 // value was in ]0.5, 0[ and the result should be -0. | 3789 // value was in ]0.5, 0[ and the result should be -0. |
| 3816 __ Mfhc1(result, double_scratch0()); | 3790 __ Mfhc1(result, double_scratch0()); |
| 3817 __ Xor(result, result, Operand(scratch)); | 3791 __ Xor(result, result, Operand(scratch)); |
| 3818 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3792 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3819 // ARM uses 'mi' here, which is 'lt' | 3793 // ARM uses 'mi' here, which is 'lt' |
| 3820 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 3794 DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg)); |
| 3821 } else { | 3795 } else { |
| 3822 Label skip2; | 3796 Label skip2; |
| 3823 // ARM uses 'mi' here, which is 'lt' | 3797 // ARM uses 'mi' here, which is 'lt' |
| 3824 // Negating it results in 'ge' | 3798 // Negating it results in 'ge' |
| 3825 __ Branch(&skip2, ge, result, Operand(zero_reg)); | 3799 __ Branch(&skip2, ge, result, Operand(zero_reg)); |
| 3826 __ mov(result, zero_reg); | 3800 __ mov(result, zero_reg); |
| 3827 __ Branch(&done); | 3801 __ Branch(&done); |
| 3828 __ bind(&skip2); | 3802 __ bind(&skip2); |
| 3829 } | 3803 } |
| 3830 | 3804 |
| 3831 Register except_flag = scratch; | 3805 Register except_flag = scratch; |
| 3832 __ EmitFPUTruncate(kRoundToMinusInf, | 3806 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3833 result, | 3807 result, |
| 3834 double_scratch0(), | 3808 double_scratch0(), |
| 3835 at, | 3809 at, |
| 3836 double_scratch1, | 3810 double_scratch1, |
| 3837 except_flag); | 3811 except_flag); |
| 3838 | 3812 |
| 3839 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3813 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 3840 Operand(zero_reg)); | 3814 Operand(zero_reg)); |
| 3841 | 3815 |
| 3842 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3816 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3843 // Test for -0. | 3817 // Test for -0. |
| 3844 __ Branch(&done, ne, result, Operand(zero_reg)); | 3818 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3845 __ bind(&check_sign_on_zero); | 3819 __ bind(&check_sign_on_zero); |
| 3846 __ Mfhc1(scratch, input); | 3820 __ Mfhc1(scratch, input); |
| 3847 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 3821 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
| 3848 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch, | 3822 DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg)); |
| 3849 Operand(zero_reg)); | |
| 3850 } | 3823 } |
| 3851 __ bind(&done); | 3824 __ bind(&done); |
| 3852 } | 3825 } |
| 3853 | 3826 |
| 3854 | 3827 |
| 3855 void LCodeGen::DoMathFround(LMathFround* instr) { | 3828 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3856 DoubleRegister input = ToDoubleRegister(instr->value()); | 3829 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3857 DoubleRegister result = ToDoubleRegister(instr->result()); | 3830 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3858 __ cvt_s_d(result.low(), input); | 3831 __ cvt_s_d(result.low(), input); |
| 3859 __ cvt_d_s(result, result.low()); | 3832 __ cvt_d_s(result, result.low()); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3905 | 3878 |
| 3906 if (exponent_type.IsSmi()) { | 3879 if (exponent_type.IsSmi()) { |
| 3907 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3880 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3908 __ CallStub(&stub); | 3881 __ CallStub(&stub); |
| 3909 } else if (exponent_type.IsTagged()) { | 3882 } else if (exponent_type.IsTagged()) { |
| 3910 Label no_deopt; | 3883 Label no_deopt; |
| 3911 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3884 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3912 DCHECK(!t3.is(tagged_exponent)); | 3885 DCHECK(!t3.is(tagged_exponent)); |
| 3913 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3886 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3914 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3887 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3915 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at)); | 3888 DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at)); |
| 3916 __ bind(&no_deopt); | 3889 __ bind(&no_deopt); |
| 3917 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3890 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3918 __ CallStub(&stub); | 3891 __ CallStub(&stub); |
| 3919 } else if (exponent_type.IsInteger32()) { | 3892 } else if (exponent_type.IsInteger32()) { |
| 3920 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3893 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3921 __ CallStub(&stub); | 3894 __ CallStub(&stub); |
| 3922 } else { | 3895 } else { |
| 3923 DCHECK(exponent_type.IsDouble()); | 3896 DCHECK(exponent_type.IsDouble()); |
| 3924 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3897 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3925 __ CallStub(&stub); | 3898 __ CallStub(&stub); |
| (...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4297 } else { | 4270 } else { |
| 4298 reg = ToRegister(instr->index()); | 4271 reg = ToRegister(instr->index()); |
| 4299 operand = ToOperand(instr->length()); | 4272 operand = ToOperand(instr->length()); |
| 4300 } | 4273 } |
| 4301 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4274 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4302 Label done; | 4275 Label done; |
| 4303 __ Branch(&done, NegateCondition(cc), reg, operand); | 4276 __ Branch(&done, NegateCondition(cc), reg, operand); |
| 4304 __ stop("eliminated bounds check failed"); | 4277 __ stop("eliminated bounds check failed"); |
| 4305 __ bind(&done); | 4278 __ bind(&done); |
| 4306 } else { | 4279 } else { |
| 4307 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand); | 4280 DeoptimizeIf(cc, instr, "out of bounds", reg, operand); |
| 4308 } | 4281 } |
| 4309 } | 4282 } |
| 4310 | 4283 |
| 4311 | 4284 |
| 4312 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4285 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4313 Register external_pointer = ToRegister(instr->elements()); | 4286 Register external_pointer = ToRegister(instr->elements()); |
| 4314 Register key = no_reg; | 4287 Register key = no_reg; |
| 4315 ElementsKind elements_kind = instr->elements_kind(); | 4288 ElementsKind elements_kind = instr->elements_kind(); |
| 4316 bool key_is_constant = instr->key()->IsConstantOperand(); | 4289 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4317 int constant_key = 0; | 4290 int constant_key = 0; |
| (...skipping 573 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4891 } | 4864 } |
| 4892 | 4865 |
| 4893 | 4866 |
| 4894 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4867 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4895 HChange* hchange = instr->hydrogen(); | 4868 HChange* hchange = instr->hydrogen(); |
| 4896 Register input = ToRegister(instr->value()); | 4869 Register input = ToRegister(instr->value()); |
| 4897 Register output = ToRegister(instr->result()); | 4870 Register output = ToRegister(instr->result()); |
| 4898 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4871 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4899 hchange->value()->CheckFlag(HValue::kUint32)) { | 4872 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4900 __ And(at, input, Operand(0xc0000000)); | 4873 __ And(at, input, Operand(0xc0000000)); |
| 4901 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4874 DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg)); |
| 4902 } | 4875 } |
| 4903 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4876 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4904 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4877 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4905 __ SmiTagCheckOverflow(output, input, at); | 4878 __ SmiTagCheckOverflow(output, input, at); |
| 4906 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4879 DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg)); |
| 4907 } else { | 4880 } else { |
| 4908 __ SmiTag(output, input); | 4881 __ SmiTag(output, input); |
| 4909 } | 4882 } |
| 4910 } | 4883 } |
| 4911 | 4884 |
| 4912 | 4885 |
| 4913 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4886 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4914 Register scratch = scratch0(); | 4887 Register scratch = scratch0(); |
| 4915 Register input = ToRegister(instr->value()); | 4888 Register input = ToRegister(instr->value()); |
| 4916 Register result = ToRegister(instr->result()); | 4889 Register result = ToRegister(instr->result()); |
| 4917 if (instr->needs_check()) { | 4890 if (instr->needs_check()) { |
| 4918 STATIC_ASSERT(kHeapObjectTag == 1); | 4891 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4919 // If the input is a HeapObject, value of scratch won't be zero. | 4892 // If the input is a HeapObject, value of scratch won't be zero. |
| 4920 __ And(scratch, input, Operand(kHeapObjectTag)); | 4893 __ And(scratch, input, Operand(kHeapObjectTag)); |
| 4921 __ SmiUntag(result, input); | 4894 __ SmiUntag(result, input); |
| 4922 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg)); | 4895 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg)); |
| 4923 } else { | 4896 } else { |
| 4924 __ SmiUntag(result, input); | 4897 __ SmiUntag(result, input); |
| 4925 } | 4898 } |
| 4926 } | 4899 } |
| 4927 | 4900 |
| 4928 | 4901 |
| 4929 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4902 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4930 DoubleRegister result_reg, | 4903 DoubleRegister result_reg, |
| 4931 NumberUntagDMode mode) { | 4904 NumberUntagDMode mode) { |
| 4932 bool can_convert_undefined_to_nan = | 4905 bool can_convert_undefined_to_nan = |
| 4933 instr->hydrogen()->can_convert_undefined_to_nan(); | 4906 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4934 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4907 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4935 | 4908 |
| 4936 Register scratch = scratch0(); | 4909 Register scratch = scratch0(); |
| 4937 Label convert, load_smi, done; | 4910 Label convert, load_smi, done; |
| 4938 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4911 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4939 // Smi check. | 4912 // Smi check. |
| 4940 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4913 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4941 // Heap number map check. | 4914 // Heap number map check. |
| 4942 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4915 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4943 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4916 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4944 if (can_convert_undefined_to_nan) { | 4917 if (can_convert_undefined_to_nan) { |
| 4945 __ Branch(&convert, ne, scratch, Operand(at)); | 4918 __ Branch(&convert, ne, scratch, Operand(at)); |
| 4946 } else { | 4919 } else { |
| 4947 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, | 4920 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at)); |
| 4948 Operand(at)); | |
| 4949 } | 4921 } |
| 4950 // Load heap number. | 4922 // Load heap number. |
| 4951 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4923 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4952 if (deoptimize_on_minus_zero) { | 4924 if (deoptimize_on_minus_zero) { |
| 4953 __ mfc1(at, result_reg.low()); | 4925 __ mfc1(at, result_reg.low()); |
| 4954 __ Branch(&done, ne, at, Operand(zero_reg)); | 4926 __ Branch(&done, ne, at, Operand(zero_reg)); |
| 4955 __ Mfhc1(scratch, result_reg); | 4927 __ Mfhc1(scratch, result_reg); |
| 4956 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch, | 4928 DeoptimizeIf(eq, instr, "minus zero", scratch, |
| 4957 Operand(HeapNumber::kSignMask)); | 4929 Operand(HeapNumber::kSignMask)); |
| 4958 } | 4930 } |
| 4959 __ Branch(&done); | 4931 __ Branch(&done); |
| 4960 if (can_convert_undefined_to_nan) { | 4932 if (can_convert_undefined_to_nan) { |
| 4961 __ bind(&convert); | 4933 __ bind(&convert); |
| 4962 // Convert undefined (and hole) to NaN. | 4934 // Convert undefined (and hole) to NaN. |
| 4963 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4935 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4964 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 4936 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 4965 Operand(at)); | 4937 Operand(at)); |
| 4966 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4938 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4967 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4939 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4968 __ Branch(&done); | 4940 __ Branch(&done); |
| 4969 } | 4941 } |
| 4970 } else { | 4942 } else { |
| 4971 __ SmiUntag(scratch, input_reg); | 4943 __ SmiUntag(scratch, input_reg); |
| 4972 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4944 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4973 } | 4945 } |
| 4974 // Smi to double register conversion | 4946 // Smi to double register conversion |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5019 __ mov(input_reg, zero_reg); // In delay slot. | 4991 __ mov(input_reg, zero_reg); // In delay slot. |
| 5020 | 4992 |
| 5021 __ bind(&check_bools); | 4993 __ bind(&check_bools); |
| 5022 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 4994 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 5023 __ Branch(&check_false, ne, scratch2, Operand(at)); | 4995 __ Branch(&check_false, ne, scratch2, Operand(at)); |
| 5024 __ Branch(USE_DELAY_SLOT, &done); | 4996 __ Branch(USE_DELAY_SLOT, &done); |
| 5025 __ li(input_reg, Operand(1)); // In delay slot. | 4997 __ li(input_reg, Operand(1)); // In delay slot. |
| 5026 | 4998 |
| 5027 __ bind(&check_false); | 4999 __ bind(&check_false); |
| 5028 __ LoadRoot(at, Heap::kFalseValueRootIndex); | 5000 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 5029 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean, | 5001 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2, |
| 5030 scratch2, Operand(at)); | 5002 Operand(at)); |
| 5031 __ Branch(USE_DELAY_SLOT, &done); | 5003 __ Branch(USE_DELAY_SLOT, &done); |
| 5032 __ mov(input_reg, zero_reg); // In delay slot. | 5004 __ mov(input_reg, zero_reg); // In delay slot. |
| 5033 } else { | 5005 } else { |
| 5034 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1, | 5006 DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at)); |
| 5035 Operand(at)); | |
| 5036 | 5007 |
| 5037 // Load the double value. | 5008 // Load the double value. |
| 5038 __ ldc1(double_scratch, | 5009 __ ldc1(double_scratch, |
| 5039 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5010 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5040 | 5011 |
| 5041 Register except_flag = scratch2; | 5012 Register except_flag = scratch2; |
| 5042 __ EmitFPUTruncate(kRoundToZero, | 5013 __ EmitFPUTruncate(kRoundToZero, |
| 5043 input_reg, | 5014 input_reg, |
| 5044 double_scratch, | 5015 double_scratch, |
| 5045 scratch1, | 5016 scratch1, |
| 5046 double_scratch2, | 5017 double_scratch2, |
| 5047 except_flag, | 5018 except_flag, |
| 5048 kCheckForInexactConversion); | 5019 kCheckForInexactConversion); |
| 5049 | 5020 |
| 5050 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5021 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5051 Operand(zero_reg)); | 5022 Operand(zero_reg)); |
| 5052 | 5023 |
| 5053 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5024 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5054 __ Branch(&done, ne, input_reg, Operand(zero_reg)); | 5025 __ Branch(&done, ne, input_reg, Operand(zero_reg)); |
| 5055 | 5026 |
| 5056 __ Mfhc1(scratch1, double_scratch); | 5027 __ Mfhc1(scratch1, double_scratch); |
| 5057 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5028 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5058 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5029 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5059 Operand(zero_reg)); | |
| 5060 } | 5030 } |
| 5061 } | 5031 } |
| 5062 __ bind(&done); | 5032 __ bind(&done); |
| 5063 } | 5033 } |
| 5064 | 5034 |
| 5065 | 5035 |
| 5066 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5036 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5067 class DeferredTaggedToI FINAL : public LDeferredCode { | 5037 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 5068 public: | 5038 public: |
| 5069 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5039 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5125 | 5095 |
| 5126 __ EmitFPUTruncate(kRoundToMinusInf, | 5096 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5127 result_reg, | 5097 result_reg, |
| 5128 double_input, | 5098 double_input, |
| 5129 scratch1, | 5099 scratch1, |
| 5130 double_scratch0(), | 5100 double_scratch0(), |
| 5131 except_flag, | 5101 except_flag, |
| 5132 kCheckForInexactConversion); | 5102 kCheckForInexactConversion); |
| 5133 | 5103 |
| 5134 // Deopt if the operation did not succeed (except_flag != 0). | 5104 // Deopt if the operation did not succeed (except_flag != 0). |
| 5135 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5105 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5136 Operand(zero_reg)); | 5106 Operand(zero_reg)); |
| 5137 | 5107 |
| 5138 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5108 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5139 Label done; | 5109 Label done; |
| 5140 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5110 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5141 __ Mfhc1(scratch1, double_input); | 5111 __ Mfhc1(scratch1, double_input); |
| 5142 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5112 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5143 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5113 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5144 Operand(zero_reg)); | |
| 5145 __ bind(&done); | 5114 __ bind(&done); |
| 5146 } | 5115 } |
| 5147 } | 5116 } |
| 5148 } | 5117 } |
| 5149 | 5118 |
| 5150 | 5119 |
| 5151 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5120 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5152 Register result_reg = ToRegister(instr->result()); | 5121 Register result_reg = ToRegister(instr->result()); |
| 5153 Register scratch1 = LCodeGen::scratch0(); | 5122 Register scratch1 = LCodeGen::scratch0(); |
| 5154 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 5123 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 5155 | 5124 |
| 5156 if (instr->truncating()) { | 5125 if (instr->truncating()) { |
| 5157 __ TruncateDoubleToI(result_reg, double_input); | 5126 __ TruncateDoubleToI(result_reg, double_input); |
| 5158 } else { | 5127 } else { |
| 5159 Register except_flag = LCodeGen::scratch1(); | 5128 Register except_flag = LCodeGen::scratch1(); |
| 5160 | 5129 |
| 5161 __ EmitFPUTruncate(kRoundToMinusInf, | 5130 __ EmitFPUTruncate(kRoundToMinusInf, |
| 5162 result_reg, | 5131 result_reg, |
| 5163 double_input, | 5132 double_input, |
| 5164 scratch1, | 5133 scratch1, |
| 5165 double_scratch0(), | 5134 double_scratch0(), |
| 5166 except_flag, | 5135 except_flag, |
| 5167 kCheckForInexactConversion); | 5136 kCheckForInexactConversion); |
| 5168 | 5137 |
| 5169 // Deopt if the operation did not succeed (except_flag != 0). | 5138 // Deopt if the operation did not succeed (except_flag != 0). |
| 5170 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 5139 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag, |
| 5171 Operand(zero_reg)); | 5140 Operand(zero_reg)); |
| 5172 | 5141 |
| 5173 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5142 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5174 Label done; | 5143 Label done; |
| 5175 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 5144 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 5176 __ Mfhc1(scratch1, double_input); | 5145 __ Mfhc1(scratch1, double_input); |
| 5177 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 5146 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 5178 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 5147 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg)); |
| 5179 Operand(zero_reg)); | |
| 5180 __ bind(&done); | 5148 __ bind(&done); |
| 5181 } | 5149 } |
| 5182 } | 5150 } |
| 5183 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); | 5151 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); |
| 5184 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg)); | 5152 DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg)); |
| 5185 } | 5153 } |
| 5186 | 5154 |
| 5187 | 5155 |
| 5188 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5156 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5189 LOperand* input = instr->value(); | 5157 LOperand* input = instr->value(); |
| 5190 __ SmiTst(ToRegister(input), at); | 5158 __ SmiTst(ToRegister(input), at); |
| 5191 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg)); | 5159 DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg)); |
| 5192 } | 5160 } |
| 5193 | 5161 |
| 5194 | 5162 |
| 5195 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5163 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5196 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5164 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5197 LOperand* input = instr->value(); | 5165 LOperand* input = instr->value(); |
| 5198 __ SmiTst(ToRegister(input), at); | 5166 __ SmiTst(ToRegister(input), at); |
| 5199 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 5167 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5200 } | 5168 } |
| 5201 } | 5169 } |
| 5202 | 5170 |
| 5203 | 5171 |
| 5204 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5172 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5205 Register input = ToRegister(instr->value()); | 5173 Register input = ToRegister(instr->value()); |
| 5206 Register scratch = scratch0(); | 5174 Register scratch = scratch0(); |
| 5207 | 5175 |
| 5208 __ GetObjectType(input, scratch, scratch); | 5176 __ GetObjectType(input, scratch, scratch); |
| 5209 | 5177 |
| 5210 if (instr->hydrogen()->is_interval_check()) { | 5178 if (instr->hydrogen()->is_interval_check()) { |
| 5211 InstanceType first; | 5179 InstanceType first; |
| 5212 InstanceType last; | 5180 InstanceType last; |
| 5213 instr->hydrogen()->GetCheckInterval(&first, &last); | 5181 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5214 | 5182 |
| 5215 // If there is only one type in the interval check for equality. | 5183 // If there is only one type in the interval check for equality. |
| 5216 if (first == last) { | 5184 if (first == last) { |
| 5217 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5185 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first)); |
| 5218 Operand(first)); | |
| 5219 } else { | 5186 } else { |
| 5220 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch, | 5187 DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first)); |
| 5221 Operand(first)); | |
| 5222 // Omit check for the last type. | 5188 // Omit check for the last type. |
| 5223 if (last != LAST_TYPE) { | 5189 if (last != LAST_TYPE) { |
| 5224 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch, | 5190 DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last)); |
| 5225 Operand(last)); | |
| 5226 } | 5191 } |
| 5227 } | 5192 } |
| 5228 } else { | 5193 } else { |
| 5229 uint8_t mask; | 5194 uint8_t mask; |
| 5230 uint8_t tag; | 5195 uint8_t tag; |
| 5231 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5196 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5232 | 5197 |
| 5233 if (base::bits::IsPowerOfTwo32(mask)) { | 5198 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5234 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5199 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5235 __ And(at, scratch, mask); | 5200 __ And(at, scratch, mask); |
| 5236 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | 5201 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at, |
| 5237 at, Operand(zero_reg)); | 5202 Operand(zero_reg)); |
| 5238 } else { | 5203 } else { |
| 5239 __ And(scratch, scratch, Operand(mask)); | 5204 __ And(scratch, scratch, Operand(mask)); |
| 5240 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5205 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag)); |
| 5241 Operand(tag)); | |
| 5242 } | 5206 } |
| 5243 } | 5207 } |
| 5244 } | 5208 } |
| 5245 | 5209 |
| 5246 | 5210 |
| 5247 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5211 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5248 Register reg = ToRegister(instr->value()); | 5212 Register reg = ToRegister(instr->value()); |
| 5249 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5213 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5250 AllowDeferredHandleDereference smi_check; | 5214 AllowDeferredHandleDereference smi_check; |
| 5251 if (isolate()->heap()->InNewSpace(*object)) { | 5215 if (isolate()->heap()->InNewSpace(*object)) { |
| 5252 Register reg = ToRegister(instr->value()); | 5216 Register reg = ToRegister(instr->value()); |
| 5253 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5217 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5254 __ li(at, Operand(Handle<Object>(cell))); | 5218 __ li(at, Operand(Handle<Object>(cell))); |
| 5255 __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); | 5219 __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); |
| 5256 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at)); | 5220 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at)); |
| 5257 } else { | 5221 } else { |
| 5258 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object)); | 5222 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object)); |
| 5259 } | 5223 } |
| 5260 } | 5224 } |
| 5261 | 5225 |
| 5262 | 5226 |
| 5263 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5227 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5264 { | 5228 { |
| 5265 PushSafepointRegistersScope scope(this); | 5229 PushSafepointRegistersScope scope(this); |
| 5266 __ push(object); | 5230 __ push(object); |
| 5267 __ mov(cp, zero_reg); | 5231 __ mov(cp, zero_reg); |
| 5268 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5232 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5269 RecordSafepointWithRegisters( | 5233 RecordSafepointWithRegisters( |
| 5270 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5234 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5271 __ StoreToSafepointRegisterSlot(v0, scratch0()); | 5235 __ StoreToSafepointRegisterSlot(v0, scratch0()); |
| 5272 } | 5236 } |
| 5273 __ SmiTst(scratch0(), at); | 5237 __ SmiTst(scratch0(), at); |
| 5274 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at, | 5238 DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg)); |
| 5275 Operand(zero_reg)); | |
| 5276 } | 5239 } |
| 5277 | 5240 |
| 5278 | 5241 |
| 5279 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5242 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5280 class DeferredCheckMaps FINAL : public LDeferredCode { | 5243 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5281 public: | 5244 public: |
| 5282 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5245 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5283 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5246 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5284 SetExit(check_maps()); | 5247 SetExit(check_maps()); |
| 5285 } | 5248 } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5319 Label success; | 5282 Label success; |
| 5320 for (int i = 0; i < maps->size() - 1; i++) { | 5283 for (int i = 0; i < maps->size() - 1; i++) { |
| 5321 Handle<Map> map = maps->at(i).handle(); | 5284 Handle<Map> map = maps->at(i).handle(); |
| 5322 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); | 5285 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); |
| 5323 } | 5286 } |
| 5324 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5287 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5325 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). | 5288 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). |
| 5326 if (instr->hydrogen()->HasMigrationTarget()) { | 5289 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5327 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); | 5290 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); |
| 5328 } else { | 5291 } else { |
| 5329 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map)); | 5292 DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map)); |
| 5330 } | 5293 } |
| 5331 | 5294 |
| 5332 __ bind(&success); | 5295 __ bind(&success); |
| 5333 } | 5296 } |
| 5334 | 5297 |
| 5335 | 5298 |
| 5336 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5299 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5337 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5300 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5338 Register result_reg = ToRegister(instr->result()); | 5301 Register result_reg = ToRegister(instr->result()); |
| 5339 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5302 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5357 | 5320 |
| 5358 // Both smi and heap number cases are handled. | 5321 // Both smi and heap number cases are handled. |
| 5359 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | 5322 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); |
| 5360 | 5323 |
| 5361 // Check for heap number | 5324 // Check for heap number |
| 5362 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5325 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5363 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | 5326 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); |
| 5364 | 5327 |
| 5365 // Check for undefined. Undefined is converted to zero for clamping | 5328 // Check for undefined. Undefined is converted to zero for clamping |
| 5366 // conversions. | 5329 // conversions. |
| 5367 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 5330 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg, |
| 5368 Operand(factory()->undefined_value())); | 5331 Operand(factory()->undefined_value())); |
| 5369 __ mov(result_reg, zero_reg); | 5332 __ mov(result_reg, zero_reg); |
| 5370 __ jmp(&done); | 5333 __ jmp(&done); |
| 5371 | 5334 |
| 5372 // Heap number | 5335 // Heap number |
| 5373 __ bind(&heap_number); | 5336 __ bind(&heap_number); |
| 5374 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | 5337 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, |
| 5375 HeapNumber::kValueOffset)); | 5338 HeapNumber::kValueOffset)); |
| 5376 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 5339 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
| 5377 __ jmp(&done); | 5340 __ jmp(&done); |
| (...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5780 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 5743 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 5781 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 5744 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 5782 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 5745 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 5783 // needed return address), even though the implementation of LAZY and EAGER is | 5746 // needed return address), even though the implementation of LAZY and EAGER is |
| 5784 // now identical. When LAZY is eventually completely folded into EAGER, remove | 5747 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 5785 // the special case below. | 5748 // the special case below. |
| 5786 if (info()->IsStub() && type == Deoptimizer::EAGER) { | 5749 if (info()->IsStub() && type == Deoptimizer::EAGER) { |
| 5787 type = Deoptimizer::LAZY; | 5750 type = Deoptimizer::LAZY; |
| 5788 } | 5751 } |
| 5789 | 5752 |
| 5790 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg, | 5753 DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg, |
| 5791 Operand(zero_reg)); | 5754 Operand(zero_reg)); |
| 5792 } | 5755 } |
| 5793 | 5756 |
| 5794 | 5757 |
| 5795 void LCodeGen::DoDummy(LDummy* instr) { | 5758 void LCodeGen::DoDummy(LDummy* instr) { |
| 5796 // Nothing to see here, move on! | 5759 // Nothing to see here, move on! |
| 5797 } | 5760 } |
| 5798 | 5761 |
| 5799 | 5762 |
| 5800 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 5763 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5871 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5834 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5872 | 5835 |
| 5873 GenerateOsrPrologue(); | 5836 GenerateOsrPrologue(); |
| 5874 } | 5837 } |
| 5875 | 5838 |
| 5876 | 5839 |
| 5877 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5840 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5878 Register result = ToRegister(instr->result()); | 5841 Register result = ToRegister(instr->result()); |
| 5879 Register object = ToRegister(instr->object()); | 5842 Register object = ToRegister(instr->object()); |
| 5880 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 5843 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 5881 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined, object, Operand(at)); | 5844 DeoptimizeIf(eq, instr, "undefined", object, Operand(at)); |
| 5882 | 5845 |
| 5883 Register null_value = t1; | 5846 Register null_value = t1; |
| 5884 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5847 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5885 DeoptimizeIf(eq, instr, Deoptimizer::kNull, object, Operand(null_value)); | 5848 DeoptimizeIf(eq, instr, "null", object, Operand(null_value)); |
| 5886 | 5849 |
| 5887 __ And(at, object, kSmiTagMask); | 5850 __ And(at, object, kSmiTagMask); |
| 5888 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 5851 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg)); |
| 5889 | 5852 |
| 5890 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5853 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5891 __ GetObjectType(object, a1, a1); | 5854 __ GetObjectType(object, a1, a1); |
| 5892 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1, | 5855 DeoptimizeIf(le, instr, "not a JavaScript object", a1, |
| 5893 Operand(LAST_JS_PROXY_TYPE)); | 5856 Operand(LAST_JS_PROXY_TYPE)); |
| 5894 | 5857 |
| 5895 Label use_cache, call_runtime; | 5858 Label use_cache, call_runtime; |
| 5896 DCHECK(object.is(a0)); | 5859 DCHECK(object.is(a0)); |
| 5897 __ CheckEnumCache(null_value, &call_runtime); | 5860 __ CheckEnumCache(null_value, &call_runtime); |
| 5898 | 5861 |
| 5899 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); | 5862 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5900 __ Branch(&use_cache); | 5863 __ Branch(&use_cache); |
| 5901 | 5864 |
| 5902 // Get the set of properties to enumerate. | 5865 // Get the set of properties to enumerate. |
| 5903 __ bind(&call_runtime); | 5866 __ bind(&call_runtime); |
| 5904 __ push(object); | 5867 __ push(object); |
| 5905 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5868 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5906 | 5869 |
| 5907 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); | 5870 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 5908 DCHECK(result.is(v0)); | 5871 DCHECK(result.is(v0)); |
| 5909 __ LoadRoot(at, Heap::kMetaMapRootIndex); | 5872 __ LoadRoot(at, Heap::kMetaMapRootIndex); |
| 5910 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at)); | 5873 DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at)); |
| 5911 __ bind(&use_cache); | 5874 __ bind(&use_cache); |
| 5912 } | 5875 } |
| 5913 | 5876 |
| 5914 | 5877 |
| 5915 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5878 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5916 Register map = ToRegister(instr->map()); | 5879 Register map = ToRegister(instr->map()); |
| 5917 Register result = ToRegister(instr->result()); | 5880 Register result = ToRegister(instr->result()); |
| 5918 Label load_cache, done; | 5881 Label load_cache, done; |
| 5919 __ EnumLength(result, map); | 5882 __ EnumLength(result, map); |
| 5920 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); | 5883 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); |
| 5921 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); | 5884 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5922 __ jmp(&done); | 5885 __ jmp(&done); |
| 5923 | 5886 |
| 5924 __ bind(&load_cache); | 5887 __ bind(&load_cache); |
| 5925 __ LoadInstanceDescriptors(map, result); | 5888 __ LoadInstanceDescriptors(map, result); |
| 5926 __ lw(result, | 5889 __ lw(result, |
| 5927 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5890 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5928 __ lw(result, | 5891 __ lw(result, |
| 5929 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5892 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5930 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg)); | 5893 DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg)); |
| 5931 | 5894 |
| 5932 __ bind(&done); | 5895 __ bind(&done); |
| 5933 } | 5896 } |
| 5934 | 5897 |
| 5935 | 5898 |
| 5936 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5899 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5937 Register object = ToRegister(instr->value()); | 5900 Register object = ToRegister(instr->value()); |
| 5938 Register map = ToRegister(instr->map()); | 5901 Register map = ToRegister(instr->map()); |
| 5939 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5902 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5940 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0())); | 5903 DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0())); |
| 5941 } | 5904 } |
| 5942 | 5905 |
| 5943 | 5906 |
| 5944 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5907 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5945 Register result, | 5908 Register result, |
| 5946 Register object, | 5909 Register object, |
| 5947 Register index) { | 5910 Register index) { |
| 5948 PushSafepointRegistersScope scope(this); | 5911 PushSafepointRegistersScope scope(this); |
| 5949 __ Push(object, index); | 5912 __ Push(object, index); |
| 5950 __ mov(cp, zero_reg); | 5913 __ mov(cp, zero_reg); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6027 __ li(at, scope_info); | 5990 __ li(at, scope_info); |
| 6028 __ Push(at, ToRegister(instr->function())); | 5991 __ Push(at, ToRegister(instr->function())); |
| 6029 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5992 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 6030 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5993 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 6031 } | 5994 } |
| 6032 | 5995 |
| 6033 | 5996 |
| 6034 #undef __ | 5997 #undef __ |
| 6035 | 5998 |
| 6036 } } // namespace v8::internal | 5999 } } // namespace v8::internal |
| OLD | NEW |