| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_IA32 | 7 #if V8_TARGET_ARCH_IA32 |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 802 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 813 int pc_offset = masm()->pc_offset(); | 813 int pc_offset = masm()->pc_offset(); |
| 814 environment->Register(deoptimization_index, | 814 environment->Register(deoptimization_index, |
| 815 translation.index(), | 815 translation.index(), |
| 816 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 816 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 817 deoptimizations_.Add(environment, zone()); | 817 deoptimizations_.Add(environment, zone()); |
| 818 } | 818 } |
| 819 } | 819 } |
| 820 | 820 |
| 821 | 821 |
| 822 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 822 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
| 823 const char* detail, | 823 Deoptimizer::DeoptReason deopt_reason, |
| 824 Deoptimizer::BailoutType bailout_type) { | 824 Deoptimizer::BailoutType bailout_type) { |
| 825 LEnvironment* environment = instr->environment(); | 825 LEnvironment* environment = instr->environment(); |
| 826 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 826 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 827 DCHECK(environment->HasBeenRegistered()); | 827 DCHECK(environment->HasBeenRegistered()); |
| 828 int id = environment->deoptimization_index(); | 828 int id = environment->deoptimization_index(); |
| 829 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 829 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 830 Address entry = | 830 Address entry = |
| 831 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 831 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 832 if (entry == NULL) { | 832 if (entry == NULL) { |
| 833 Abort(kBailoutWasNotPrepared); | 833 Abort(kBailoutWasNotPrepared); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 856 } | 856 } |
| 857 | 857 |
| 858 if (info()->ShouldTrapOnDeopt()) { | 858 if (info()->ShouldTrapOnDeopt()) { |
| 859 Label done; | 859 Label done; |
| 860 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); | 860 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear); |
| 861 __ int3(); | 861 __ int3(); |
| 862 __ bind(&done); | 862 __ bind(&done); |
| 863 } | 863 } |
| 864 | 864 |
| 865 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 865 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
| 866 instr->Mnemonic(), detail); | 866 instr->Mnemonic(), deopt_reason); |
| 867 DCHECK(info()->IsStub() || frame_is_built_); | 867 DCHECK(info()->IsStub() || frame_is_built_); |
| 868 if (cc == no_condition && frame_is_built_) { | 868 if (cc == no_condition && frame_is_built_) { |
| 869 DeoptComment(reason); | 869 DeoptComment(reason); |
| 870 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 870 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 871 } else { | 871 } else { |
| 872 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 872 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
| 873 !frame_is_built_); | 873 !frame_is_built_); |
| 874 // We often have several deopts to the same entry, reuse the last | 874 // We often have several deopts to the same entry, reuse the last |
| 875 // jump entry if this is the case. | 875 // jump entry if this is the case. |
| 876 if (jump_table_.is_empty() || | 876 if (jump_table_.is_empty() || |
| 877 !table_entry.IsEquivalentTo(jump_table_.last())) { | 877 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 878 jump_table_.Add(table_entry, zone()); | 878 jump_table_.Add(table_entry, zone()); |
| 879 } | 879 } |
| 880 if (cc == no_condition) { | 880 if (cc == no_condition) { |
| 881 __ jmp(&jump_table_.last().label); | 881 __ jmp(&jump_table_.last().label); |
| 882 } else { | 882 } else { |
| 883 __ j(cc, &jump_table_.last().label); | 883 __ j(cc, &jump_table_.last().label); |
| 884 } | 884 } |
| 885 } | 885 } |
| 886 } | 886 } |
| 887 | 887 |
| 888 | 888 |
| 889 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 889 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
| 890 const char* detail) { | 890 Deoptimizer::DeoptReason deopt_reason) { |
| 891 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 891 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 892 ? Deoptimizer::LAZY | 892 ? Deoptimizer::LAZY |
| 893 : Deoptimizer::EAGER; | 893 : Deoptimizer::EAGER; |
| 894 DeoptimizeIf(cc, instr, detail, bailout_type); | 894 DeoptimizeIf(cc, instr, deopt_reason, bailout_type); |
| 895 } | 895 } |
| 896 | 896 |
| 897 | 897 |
| 898 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 898 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 899 int length = deoptimizations_.length(); | 899 int length = deoptimizations_.length(); |
| 900 if (length == 0) return; | 900 if (length == 0) return; |
| 901 Handle<DeoptimizationInputData> data = | 901 Handle<DeoptimizationInputData> data = |
| 902 DeoptimizationInputData::New(isolate(), length, TENURED); | 902 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 903 | 903 |
| 904 Handle<ByteArray> translations = | 904 Handle<ByteArray> translations = |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1114 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1114 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1115 Label dividend_is_not_negative, done; | 1115 Label dividend_is_not_negative, done; |
| 1116 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1116 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1117 __ test(dividend, dividend); | 1117 __ test(dividend, dividend); |
| 1118 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); | 1118 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); |
| 1119 // Note that this is correct even for kMinInt operands. | 1119 // Note that this is correct even for kMinInt operands. |
| 1120 __ neg(dividend); | 1120 __ neg(dividend); |
| 1121 __ and_(dividend, mask); | 1121 __ and_(dividend, mask); |
| 1122 __ neg(dividend); | 1122 __ neg(dividend); |
| 1123 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1123 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1124 DeoptimizeIf(zero, instr, "minus zero"); | 1124 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1125 } | 1125 } |
| 1126 __ jmp(&done, Label::kNear); | 1126 __ jmp(&done, Label::kNear); |
| 1127 } | 1127 } |
| 1128 | 1128 |
| 1129 __ bind(÷nd_is_not_negative); | 1129 __ bind(÷nd_is_not_negative); |
| 1130 __ and_(dividend, mask); | 1130 __ and_(dividend, mask); |
| 1131 __ bind(&done); | 1131 __ bind(&done); |
| 1132 } | 1132 } |
| 1133 | 1133 |
| 1134 | 1134 |
| 1135 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1135 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1136 Register dividend = ToRegister(instr->dividend()); | 1136 Register dividend = ToRegister(instr->dividend()); |
| 1137 int32_t divisor = instr->divisor(); | 1137 int32_t divisor = instr->divisor(); |
| 1138 DCHECK(ToRegister(instr->result()).is(eax)); | 1138 DCHECK(ToRegister(instr->result()).is(eax)); |
| 1139 | 1139 |
| 1140 if (divisor == 0) { | 1140 if (divisor == 0) { |
| 1141 DeoptimizeIf(no_condition, instr, "division by zero"); | 1141 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1142 return; | 1142 return; |
| 1143 } | 1143 } |
| 1144 | 1144 |
| 1145 __ TruncatingDiv(dividend, Abs(divisor)); | 1145 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1146 __ imul(edx, edx, Abs(divisor)); | 1146 __ imul(edx, edx, Abs(divisor)); |
| 1147 __ mov(eax, dividend); | 1147 __ mov(eax, dividend); |
| 1148 __ sub(eax, edx); | 1148 __ sub(eax, edx); |
| 1149 | 1149 |
| 1150 // Check for negative zero. | 1150 // Check for negative zero. |
| 1151 HMod* hmod = instr->hydrogen(); | 1151 HMod* hmod = instr->hydrogen(); |
| 1152 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1152 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1153 Label remainder_not_zero; | 1153 Label remainder_not_zero; |
| 1154 __ j(not_zero, &remainder_not_zero, Label::kNear); | 1154 __ j(not_zero, &remainder_not_zero, Label::kNear); |
| 1155 __ cmp(dividend, Immediate(0)); | 1155 __ cmp(dividend, Immediate(0)); |
| 1156 DeoptimizeIf(less, instr, "minus zero"); | 1156 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
| 1157 __ bind(&remainder_not_zero); | 1157 __ bind(&remainder_not_zero); |
| 1158 } | 1158 } |
| 1159 } | 1159 } |
| 1160 | 1160 |
| 1161 | 1161 |
| 1162 void LCodeGen::DoModI(LModI* instr) { | 1162 void LCodeGen::DoModI(LModI* instr) { |
| 1163 HMod* hmod = instr->hydrogen(); | 1163 HMod* hmod = instr->hydrogen(); |
| 1164 | 1164 |
| 1165 Register left_reg = ToRegister(instr->left()); | 1165 Register left_reg = ToRegister(instr->left()); |
| 1166 DCHECK(left_reg.is(eax)); | 1166 DCHECK(left_reg.is(eax)); |
| 1167 Register right_reg = ToRegister(instr->right()); | 1167 Register right_reg = ToRegister(instr->right()); |
| 1168 DCHECK(!right_reg.is(eax)); | 1168 DCHECK(!right_reg.is(eax)); |
| 1169 DCHECK(!right_reg.is(edx)); | 1169 DCHECK(!right_reg.is(edx)); |
| 1170 Register result_reg = ToRegister(instr->result()); | 1170 Register result_reg = ToRegister(instr->result()); |
| 1171 DCHECK(result_reg.is(edx)); | 1171 DCHECK(result_reg.is(edx)); |
| 1172 | 1172 |
| 1173 Label done; | 1173 Label done; |
| 1174 // Check for x % 0, idiv would signal a divide error. We have to | 1174 // Check for x % 0, idiv would signal a divide error. We have to |
| 1175 // deopt in this case because we can't return a NaN. | 1175 // deopt in this case because we can't return a NaN. |
| 1176 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1176 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1177 __ test(right_reg, Operand(right_reg)); | 1177 __ test(right_reg, Operand(right_reg)); |
| 1178 DeoptimizeIf(zero, instr, "division by zero"); | 1178 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1179 } | 1179 } |
| 1180 | 1180 |
| 1181 // Check for kMinInt % -1, idiv would signal a divide error. We | 1181 // Check for kMinInt % -1, idiv would signal a divide error. We |
| 1182 // have to deopt if we care about -0, because we can't return that. | 1182 // have to deopt if we care about -0, because we can't return that. |
| 1183 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1183 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1184 Label no_overflow_possible; | 1184 Label no_overflow_possible; |
| 1185 __ cmp(left_reg, kMinInt); | 1185 __ cmp(left_reg, kMinInt); |
| 1186 __ j(not_equal, &no_overflow_possible, Label::kNear); | 1186 __ j(not_equal, &no_overflow_possible, Label::kNear); |
| 1187 __ cmp(right_reg, -1); | 1187 __ cmp(right_reg, -1); |
| 1188 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1188 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1189 DeoptimizeIf(equal, instr, "minus zero"); | 1189 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero); |
| 1190 } else { | 1190 } else { |
| 1191 __ j(not_equal, &no_overflow_possible, Label::kNear); | 1191 __ j(not_equal, &no_overflow_possible, Label::kNear); |
| 1192 __ Move(result_reg, Immediate(0)); | 1192 __ Move(result_reg, Immediate(0)); |
| 1193 __ jmp(&done, Label::kNear); | 1193 __ jmp(&done, Label::kNear); |
| 1194 } | 1194 } |
| 1195 __ bind(&no_overflow_possible); | 1195 __ bind(&no_overflow_possible); |
| 1196 } | 1196 } |
| 1197 | 1197 |
| 1198 // Sign extend dividend in eax into edx:eax. | 1198 // Sign extend dividend in eax into edx:eax. |
| 1199 __ cdq(); | 1199 __ cdq(); |
| 1200 | 1200 |
| 1201 // If we care about -0, test if the dividend is <0 and the result is 0. | 1201 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1202 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1202 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1203 Label positive_left; | 1203 Label positive_left; |
| 1204 __ test(left_reg, Operand(left_reg)); | 1204 __ test(left_reg, Operand(left_reg)); |
| 1205 __ j(not_sign, &positive_left, Label::kNear); | 1205 __ j(not_sign, &positive_left, Label::kNear); |
| 1206 __ idiv(right_reg); | 1206 __ idiv(right_reg); |
| 1207 __ test(result_reg, Operand(result_reg)); | 1207 __ test(result_reg, Operand(result_reg)); |
| 1208 DeoptimizeIf(zero, instr, "minus zero"); | 1208 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1209 __ jmp(&done, Label::kNear); | 1209 __ jmp(&done, Label::kNear); |
| 1210 __ bind(&positive_left); | 1210 __ bind(&positive_left); |
| 1211 } | 1211 } |
| 1212 __ idiv(right_reg); | 1212 __ idiv(right_reg); |
| 1213 __ bind(&done); | 1213 __ bind(&done); |
| 1214 } | 1214 } |
| 1215 | 1215 |
| 1216 | 1216 |
| 1217 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1217 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1218 Register dividend = ToRegister(instr->dividend()); | 1218 Register dividend = ToRegister(instr->dividend()); |
| 1219 int32_t divisor = instr->divisor(); | 1219 int32_t divisor = instr->divisor(); |
| 1220 Register result = ToRegister(instr->result()); | 1220 Register result = ToRegister(instr->result()); |
| 1221 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1221 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1222 DCHECK(!result.is(dividend)); | 1222 DCHECK(!result.is(dividend)); |
| 1223 | 1223 |
| 1224 // Check for (0 / -x) that will produce negative zero. | 1224 // Check for (0 / -x) that will produce negative zero. |
| 1225 HDiv* hdiv = instr->hydrogen(); | 1225 HDiv* hdiv = instr->hydrogen(); |
| 1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1227 __ test(dividend, dividend); | 1227 __ test(dividend, dividend); |
| 1228 DeoptimizeIf(zero, instr, "minus zero"); | 1228 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1229 } | 1229 } |
| 1230 // Check for (kMinInt / -1). | 1230 // Check for (kMinInt / -1). |
| 1231 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1231 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1232 __ cmp(dividend, kMinInt); | 1232 __ cmp(dividend, kMinInt); |
| 1233 DeoptimizeIf(zero, instr, "overflow"); | 1233 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1234 } | 1234 } |
| 1235 // Deoptimize if remainder will not be 0. | 1235 // Deoptimize if remainder will not be 0. |
| 1236 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1236 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1237 divisor != 1 && divisor != -1) { | 1237 divisor != 1 && divisor != -1) { |
| 1238 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1238 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1239 __ test(dividend, Immediate(mask)); | 1239 __ test(dividend, Immediate(mask)); |
| 1240 DeoptimizeIf(not_zero, instr, "lost precision"); | 1240 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
| 1241 } | 1241 } |
| 1242 __ Move(result, dividend); | 1242 __ Move(result, dividend); |
| 1243 int32_t shift = WhichPowerOf2Abs(divisor); | 1243 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1244 if (shift > 0) { | 1244 if (shift > 0) { |
| 1245 // The arithmetic shift is always OK, the 'if' is an optimization only. | 1245 // The arithmetic shift is always OK, the 'if' is an optimization only. |
| 1246 if (shift > 1) __ sar(result, 31); | 1246 if (shift > 1) __ sar(result, 31); |
| 1247 __ shr(result, 32 - shift); | 1247 __ shr(result, 32 - shift); |
| 1248 __ add(result, dividend); | 1248 __ add(result, dividend); |
| 1249 __ sar(result, shift); | 1249 __ sar(result, shift); |
| 1250 } | 1250 } |
| 1251 if (divisor < 0) __ neg(result); | 1251 if (divisor < 0) __ neg(result); |
| 1252 } | 1252 } |
| 1253 | 1253 |
| 1254 | 1254 |
| 1255 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1255 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1256 Register dividend = ToRegister(instr->dividend()); | 1256 Register dividend = ToRegister(instr->dividend()); |
| 1257 int32_t divisor = instr->divisor(); | 1257 int32_t divisor = instr->divisor(); |
| 1258 DCHECK(ToRegister(instr->result()).is(edx)); | 1258 DCHECK(ToRegister(instr->result()).is(edx)); |
| 1259 | 1259 |
| 1260 if (divisor == 0) { | 1260 if (divisor == 0) { |
| 1261 DeoptimizeIf(no_condition, instr, "division by zero"); | 1261 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1262 return; | 1262 return; |
| 1263 } | 1263 } |
| 1264 | 1264 |
| 1265 // Check for (0 / -x) that will produce negative zero. | 1265 // Check for (0 / -x) that will produce negative zero. |
| 1266 HDiv* hdiv = instr->hydrogen(); | 1266 HDiv* hdiv = instr->hydrogen(); |
| 1267 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1267 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1268 __ test(dividend, dividend); | 1268 __ test(dividend, dividend); |
| 1269 DeoptimizeIf(zero, instr, "minus zero"); | 1269 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1270 } | 1270 } |
| 1271 | 1271 |
| 1272 __ TruncatingDiv(dividend, Abs(divisor)); | 1272 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1273 if (divisor < 0) __ neg(edx); | 1273 if (divisor < 0) __ neg(edx); |
| 1274 | 1274 |
| 1275 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1275 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1276 __ mov(eax, edx); | 1276 __ mov(eax, edx); |
| 1277 __ imul(eax, eax, divisor); | 1277 __ imul(eax, eax, divisor); |
| 1278 __ sub(eax, dividend); | 1278 __ sub(eax, dividend); |
| 1279 DeoptimizeIf(not_equal, instr, "lost precision"); | 1279 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
| 1280 } | 1280 } |
| 1281 } | 1281 } |
| 1282 | 1282 |
| 1283 | 1283 |
| 1284 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1284 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1285 void LCodeGen::DoDivI(LDivI* instr) { | 1285 void LCodeGen::DoDivI(LDivI* instr) { |
| 1286 HBinaryOperation* hdiv = instr->hydrogen(); | 1286 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1287 Register dividend = ToRegister(instr->dividend()); | 1287 Register dividend = ToRegister(instr->dividend()); |
| 1288 Register divisor = ToRegister(instr->divisor()); | 1288 Register divisor = ToRegister(instr->divisor()); |
| 1289 Register remainder = ToRegister(instr->temp()); | 1289 Register remainder = ToRegister(instr->temp()); |
| 1290 DCHECK(dividend.is(eax)); | 1290 DCHECK(dividend.is(eax)); |
| 1291 DCHECK(remainder.is(edx)); | 1291 DCHECK(remainder.is(edx)); |
| 1292 DCHECK(ToRegister(instr->result()).is(eax)); | 1292 DCHECK(ToRegister(instr->result()).is(eax)); |
| 1293 DCHECK(!divisor.is(eax)); | 1293 DCHECK(!divisor.is(eax)); |
| 1294 DCHECK(!divisor.is(edx)); | 1294 DCHECK(!divisor.is(edx)); |
| 1295 | 1295 |
| 1296 // Check for x / 0. | 1296 // Check for x / 0. |
| 1297 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1297 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1298 __ test(divisor, divisor); | 1298 __ test(divisor, divisor); |
| 1299 DeoptimizeIf(zero, instr, "division by zero"); | 1299 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1300 } | 1300 } |
| 1301 | 1301 |
| 1302 // Check for (0 / -x) that will produce negative zero. | 1302 // Check for (0 / -x) that will produce negative zero. |
| 1303 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1303 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1304 Label dividend_not_zero; | 1304 Label dividend_not_zero; |
| 1305 __ test(dividend, dividend); | 1305 __ test(dividend, dividend); |
| 1306 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1306 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
| 1307 __ test(divisor, divisor); | 1307 __ test(divisor, divisor); |
| 1308 DeoptimizeIf(sign, instr, "minus zero"); | 1308 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1309 __ bind(÷nd_not_zero); | 1309 __ bind(÷nd_not_zero); |
| 1310 } | 1310 } |
| 1311 | 1311 |
| 1312 // Check for (kMinInt / -1). | 1312 // Check for (kMinInt / -1). |
| 1313 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1313 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 1314 Label dividend_not_min_int; | 1314 Label dividend_not_min_int; |
| 1315 __ cmp(dividend, kMinInt); | 1315 __ cmp(dividend, kMinInt); |
| 1316 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1316 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
| 1317 __ cmp(divisor, -1); | 1317 __ cmp(divisor, -1); |
| 1318 DeoptimizeIf(zero, instr, "overflow"); | 1318 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1319 __ bind(÷nd_not_min_int); | 1319 __ bind(÷nd_not_min_int); |
| 1320 } | 1320 } |
| 1321 | 1321 |
| 1322 // Sign extend to edx (= remainder). | 1322 // Sign extend to edx (= remainder). |
| 1323 __ cdq(); | 1323 __ cdq(); |
| 1324 __ idiv(divisor); | 1324 __ idiv(divisor); |
| 1325 | 1325 |
| 1326 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1326 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1327 // Deoptimize if remainder is not 0. | 1327 // Deoptimize if remainder is not 0. |
| 1328 __ test(remainder, remainder); | 1328 __ test(remainder, remainder); |
| 1329 DeoptimizeIf(not_zero, instr, "lost precision"); | 1329 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
| 1330 } | 1330 } |
| 1331 } | 1331 } |
| 1332 | 1332 |
| 1333 | 1333 |
| 1334 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1334 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 1335 Register dividend = ToRegister(instr->dividend()); | 1335 Register dividend = ToRegister(instr->dividend()); |
| 1336 int32_t divisor = instr->divisor(); | 1336 int32_t divisor = instr->divisor(); |
| 1337 DCHECK(dividend.is(ToRegister(instr->result()))); | 1337 DCHECK(dividend.is(ToRegister(instr->result()))); |
| 1338 | 1338 |
| 1339 // If the divisor is positive, things are easy: There can be no deopts and we | 1339 // If the divisor is positive, things are easy: There can be no deopts and we |
| 1340 // can simply do an arithmetic right shift. | 1340 // can simply do an arithmetic right shift. |
| 1341 if (divisor == 1) return; | 1341 if (divisor == 1) return; |
| 1342 int32_t shift = WhichPowerOf2Abs(divisor); | 1342 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1343 if (divisor > 1) { | 1343 if (divisor > 1) { |
| 1344 __ sar(dividend, shift); | 1344 __ sar(dividend, shift); |
| 1345 return; | 1345 return; |
| 1346 } | 1346 } |
| 1347 | 1347 |
| 1348 // If the divisor is negative, we have to negate and handle edge cases. | 1348 // If the divisor is negative, we have to negate and handle edge cases. |
| 1349 __ neg(dividend); | 1349 __ neg(dividend); |
| 1350 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1350 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1351 DeoptimizeIf(zero, instr, "minus zero"); | 1351 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1352 } | 1352 } |
| 1353 | 1353 |
| 1354 // Dividing by -1 is basically negation, unless we overflow. | 1354 // Dividing by -1 is basically negation, unless we overflow. |
| 1355 if (divisor == -1) { | 1355 if (divisor == -1) { |
| 1356 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1356 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1357 DeoptimizeIf(overflow, instr, "overflow"); | 1357 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1358 } | 1358 } |
| 1359 return; | 1359 return; |
| 1360 } | 1360 } |
| 1361 | 1361 |
| 1362 // If the negation could not overflow, simply shifting is OK. | 1362 // If the negation could not overflow, simply shifting is OK. |
| 1363 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1363 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1364 __ sar(dividend, shift); | 1364 __ sar(dividend, shift); |
| 1365 return; | 1365 return; |
| 1366 } | 1366 } |
| 1367 | 1367 |
| 1368 Label not_kmin_int, done; | 1368 Label not_kmin_int, done; |
| 1369 __ j(no_overflow, ¬_kmin_int, Label::kNear); | 1369 __ j(no_overflow, ¬_kmin_int, Label::kNear); |
| 1370 __ mov(dividend, Immediate(kMinInt / divisor)); | 1370 __ mov(dividend, Immediate(kMinInt / divisor)); |
| 1371 __ jmp(&done, Label::kNear); | 1371 __ jmp(&done, Label::kNear); |
| 1372 __ bind(¬_kmin_int); | 1372 __ bind(¬_kmin_int); |
| 1373 __ sar(dividend, shift); | 1373 __ sar(dividend, shift); |
| 1374 __ bind(&done); | 1374 __ bind(&done); |
| 1375 } | 1375 } |
| 1376 | 1376 |
| 1377 | 1377 |
| 1378 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1378 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1379 Register dividend = ToRegister(instr->dividend()); | 1379 Register dividend = ToRegister(instr->dividend()); |
| 1380 int32_t divisor = instr->divisor(); | 1380 int32_t divisor = instr->divisor(); |
| 1381 DCHECK(ToRegister(instr->result()).is(edx)); | 1381 DCHECK(ToRegister(instr->result()).is(edx)); |
| 1382 | 1382 |
| 1383 if (divisor == 0) { | 1383 if (divisor == 0) { |
| 1384 DeoptimizeIf(no_condition, instr, "division by zero"); | 1384 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1385 return; | 1385 return; |
| 1386 } | 1386 } |
| 1387 | 1387 |
| 1388 // Check for (0 / -x) that will produce negative zero. | 1388 // Check for (0 / -x) that will produce negative zero. |
| 1389 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1389 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1390 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1390 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1391 __ test(dividend, dividend); | 1391 __ test(dividend, dividend); |
| 1392 DeoptimizeIf(zero, instr, "minus zero"); | 1392 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1393 } | 1393 } |
| 1394 | 1394 |
| 1395 // Easy case: We need no dynamic check for the dividend and the flooring | 1395 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1396 // division is the same as the truncating division. | 1396 // division is the same as the truncating division. |
| 1397 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1397 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1398 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1398 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1399 __ TruncatingDiv(dividend, Abs(divisor)); | 1399 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1400 if (divisor < 0) __ neg(edx); | 1400 if (divisor < 0) __ neg(edx); |
| 1401 return; | 1401 return; |
| 1402 } | 1402 } |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1429 Register result = ToRegister(instr->result()); | 1429 Register result = ToRegister(instr->result()); |
| 1430 DCHECK(dividend.is(eax)); | 1430 DCHECK(dividend.is(eax)); |
| 1431 DCHECK(remainder.is(edx)); | 1431 DCHECK(remainder.is(edx)); |
| 1432 DCHECK(result.is(eax)); | 1432 DCHECK(result.is(eax)); |
| 1433 DCHECK(!divisor.is(eax)); | 1433 DCHECK(!divisor.is(eax)); |
| 1434 DCHECK(!divisor.is(edx)); | 1434 DCHECK(!divisor.is(edx)); |
| 1435 | 1435 |
| 1436 // Check for x / 0. | 1436 // Check for x / 0. |
| 1437 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1437 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1438 __ test(divisor, divisor); | 1438 __ test(divisor, divisor); |
| 1439 DeoptimizeIf(zero, instr, "division by zero"); | 1439 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1440 } | 1440 } |
| 1441 | 1441 |
| 1442 // Check for (0 / -x) that will produce negative zero. | 1442 // Check for (0 / -x) that will produce negative zero. |
| 1443 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1443 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1444 Label dividend_not_zero; | 1444 Label dividend_not_zero; |
| 1445 __ test(dividend, dividend); | 1445 __ test(dividend, dividend); |
| 1446 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1446 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
| 1447 __ test(divisor, divisor); | 1447 __ test(divisor, divisor); |
| 1448 DeoptimizeIf(sign, instr, "minus zero"); | 1448 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1449 __ bind(÷nd_not_zero); | 1449 __ bind(÷nd_not_zero); |
| 1450 } | 1450 } |
| 1451 | 1451 |
| 1452 // Check for (kMinInt / -1). | 1452 // Check for (kMinInt / -1). |
| 1453 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1453 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 1454 Label dividend_not_min_int; | 1454 Label dividend_not_min_int; |
| 1455 __ cmp(dividend, kMinInt); | 1455 __ cmp(dividend, kMinInt); |
| 1456 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1456 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
| 1457 __ cmp(divisor, -1); | 1457 __ cmp(divisor, -1); |
| 1458 DeoptimizeIf(zero, instr, "overflow"); | 1458 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1459 __ bind(÷nd_not_min_int); | 1459 __ bind(÷nd_not_min_int); |
| 1460 } | 1460 } |
| 1461 | 1461 |
| 1462 // Sign extend to edx (= remainder). | 1462 // Sign extend to edx (= remainder). |
| 1463 __ cdq(); | 1463 __ cdq(); |
| 1464 __ idiv(divisor); | 1464 __ idiv(divisor); |
| 1465 | 1465 |
| 1466 Label done; | 1466 Label done; |
| 1467 __ test(remainder, remainder); | 1467 __ test(remainder, remainder); |
| 1468 __ j(zero, &done, Label::kNear); | 1468 __ j(zero, &done, Label::kNear); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1526 __ imul(left, left, constant); | 1526 __ imul(left, left, constant); |
| 1527 } | 1527 } |
| 1528 } else { | 1528 } else { |
| 1529 if (instr->hydrogen()->representation().IsSmi()) { | 1529 if (instr->hydrogen()->representation().IsSmi()) { |
| 1530 __ SmiUntag(left); | 1530 __ SmiUntag(left); |
| 1531 } | 1531 } |
| 1532 __ imul(left, ToOperand(right)); | 1532 __ imul(left, ToOperand(right)); |
| 1533 } | 1533 } |
| 1534 | 1534 |
| 1535 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1535 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1536 DeoptimizeIf(overflow, instr, "overflow"); | 1536 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1537 } | 1537 } |
| 1538 | 1538 |
| 1539 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1539 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1540 // Bail out if the result is supposed to be negative zero. | 1540 // Bail out if the result is supposed to be negative zero. |
| 1541 Label done; | 1541 Label done; |
| 1542 __ test(left, Operand(left)); | 1542 __ test(left, Operand(left)); |
| 1543 __ j(not_zero, &done, Label::kNear); | 1543 __ j(not_zero, &done, Label::kNear); |
| 1544 if (right->IsConstantOperand()) { | 1544 if (right->IsConstantOperand()) { |
| 1545 if (ToInteger32(LConstantOperand::cast(right)) < 0) { | 1545 if (ToInteger32(LConstantOperand::cast(right)) < 0) { |
| 1546 DeoptimizeIf(no_condition, instr, "minus zero"); | 1546 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 1547 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { | 1547 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { |
| 1548 __ cmp(ToRegister(instr->temp()), Immediate(0)); | 1548 __ cmp(ToRegister(instr->temp()), Immediate(0)); |
| 1549 DeoptimizeIf(less, instr, "minus zero"); | 1549 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
| 1550 } | 1550 } |
| 1551 } else { | 1551 } else { |
| 1552 // Test the non-zero operand for negative sign. | 1552 // Test the non-zero operand for negative sign. |
| 1553 __ or_(ToRegister(instr->temp()), ToOperand(right)); | 1553 __ or_(ToRegister(instr->temp()), ToOperand(right)); |
| 1554 DeoptimizeIf(sign, instr, "minus zero"); | 1554 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1555 } | 1555 } |
| 1556 __ bind(&done); | 1556 __ bind(&done); |
| 1557 } | 1557 } |
| 1558 } | 1558 } |
| 1559 | 1559 |
| 1560 | 1560 |
| 1561 void LCodeGen::DoBitI(LBitI* instr) { | 1561 void LCodeGen::DoBitI(LBitI* instr) { |
| 1562 LOperand* left = instr->left(); | 1562 LOperand* left = instr->left(); |
| 1563 LOperand* right = instr->right(); | 1563 LOperand* right = instr->right(); |
| 1564 DCHECK(left->Equals(instr->result())); | 1564 DCHECK(left->Equals(instr->result())); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1617 case Token::ROR: | 1617 case Token::ROR: |
| 1618 __ ror_cl(ToRegister(left)); | 1618 __ ror_cl(ToRegister(left)); |
| 1619 break; | 1619 break; |
| 1620 case Token::SAR: | 1620 case Token::SAR: |
| 1621 __ sar_cl(ToRegister(left)); | 1621 __ sar_cl(ToRegister(left)); |
| 1622 break; | 1622 break; |
| 1623 case Token::SHR: | 1623 case Token::SHR: |
| 1624 __ shr_cl(ToRegister(left)); | 1624 __ shr_cl(ToRegister(left)); |
| 1625 if (instr->can_deopt()) { | 1625 if (instr->can_deopt()) { |
| 1626 __ test(ToRegister(left), ToRegister(left)); | 1626 __ test(ToRegister(left), ToRegister(left)); |
| 1627 DeoptimizeIf(sign, instr, "negative value"); | 1627 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); |
| 1628 } | 1628 } |
| 1629 break; | 1629 break; |
| 1630 case Token::SHL: | 1630 case Token::SHL: |
| 1631 __ shl_cl(ToRegister(left)); | 1631 __ shl_cl(ToRegister(left)); |
| 1632 break; | 1632 break; |
| 1633 default: | 1633 default: |
| 1634 UNREACHABLE(); | 1634 UNREACHABLE(); |
| 1635 break; | 1635 break; |
| 1636 } | 1636 } |
| 1637 } else { | 1637 } else { |
| 1638 int value = ToInteger32(LConstantOperand::cast(right)); | 1638 int value = ToInteger32(LConstantOperand::cast(right)); |
| 1639 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | 1639 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
| 1640 switch (instr->op()) { | 1640 switch (instr->op()) { |
| 1641 case Token::ROR: | 1641 case Token::ROR: |
| 1642 if (shift_count == 0 && instr->can_deopt()) { | 1642 if (shift_count == 0 && instr->can_deopt()) { |
| 1643 __ test(ToRegister(left), ToRegister(left)); | 1643 __ test(ToRegister(left), ToRegister(left)); |
| 1644 DeoptimizeIf(sign, instr, "negative value"); | 1644 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); |
| 1645 } else { | 1645 } else { |
| 1646 __ ror(ToRegister(left), shift_count); | 1646 __ ror(ToRegister(left), shift_count); |
| 1647 } | 1647 } |
| 1648 break; | 1648 break; |
| 1649 case Token::SAR: | 1649 case Token::SAR: |
| 1650 if (shift_count != 0) { | 1650 if (shift_count != 0) { |
| 1651 __ sar(ToRegister(left), shift_count); | 1651 __ sar(ToRegister(left), shift_count); |
| 1652 } | 1652 } |
| 1653 break; | 1653 break; |
| 1654 case Token::SHR: | 1654 case Token::SHR: |
| 1655 if (shift_count != 0) { | 1655 if (shift_count != 0) { |
| 1656 __ shr(ToRegister(left), shift_count); | 1656 __ shr(ToRegister(left), shift_count); |
| 1657 } else if (instr->can_deopt()) { | 1657 } else if (instr->can_deopt()) { |
| 1658 __ test(ToRegister(left), ToRegister(left)); | 1658 __ test(ToRegister(left), ToRegister(left)); |
| 1659 DeoptimizeIf(sign, instr, "negative value"); | 1659 DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue); |
| 1660 } | 1660 } |
| 1661 break; | 1661 break; |
| 1662 case Token::SHL: | 1662 case Token::SHL: |
| 1663 if (shift_count != 0) { | 1663 if (shift_count != 0) { |
| 1664 if (instr->hydrogen_value()->representation().IsSmi() && | 1664 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1665 instr->can_deopt()) { | 1665 instr->can_deopt()) { |
| 1666 if (shift_count != 1) { | 1666 if (shift_count != 1) { |
| 1667 __ shl(ToRegister(left), shift_count - 1); | 1667 __ shl(ToRegister(left), shift_count - 1); |
| 1668 } | 1668 } |
| 1669 __ SmiTag(ToRegister(left)); | 1669 __ SmiTag(ToRegister(left)); |
| 1670 DeoptimizeIf(overflow, instr, "overflow"); | 1670 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1671 } else { | 1671 } else { |
| 1672 __ shl(ToRegister(left), shift_count); | 1672 __ shl(ToRegister(left), shift_count); |
| 1673 } | 1673 } |
| 1674 } | 1674 } |
| 1675 break; | 1675 break; |
| 1676 default: | 1676 default: |
| 1677 UNREACHABLE(); | 1677 UNREACHABLE(); |
| 1678 break; | 1678 break; |
| 1679 } | 1679 } |
| 1680 } | 1680 } |
| 1681 } | 1681 } |
| 1682 | 1682 |
| 1683 | 1683 |
| 1684 void LCodeGen::DoSubI(LSubI* instr) { | 1684 void LCodeGen::DoSubI(LSubI* instr) { |
| 1685 LOperand* left = instr->left(); | 1685 LOperand* left = instr->left(); |
| 1686 LOperand* right = instr->right(); | 1686 LOperand* right = instr->right(); |
| 1687 DCHECK(left->Equals(instr->result())); | 1687 DCHECK(left->Equals(instr->result())); |
| 1688 | 1688 |
| 1689 if (right->IsConstantOperand()) { | 1689 if (right->IsConstantOperand()) { |
| 1690 __ sub(ToOperand(left), | 1690 __ sub(ToOperand(left), |
| 1691 ToImmediate(right, instr->hydrogen()->representation())); | 1691 ToImmediate(right, instr->hydrogen()->representation())); |
| 1692 } else { | 1692 } else { |
| 1693 __ sub(ToRegister(left), ToOperand(right)); | 1693 __ sub(ToRegister(left), ToOperand(right)); |
| 1694 } | 1694 } |
| 1695 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1695 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1696 DeoptimizeIf(overflow, instr, "overflow"); | 1696 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1697 } | 1697 } |
| 1698 } | 1698 } |
| 1699 | 1699 |
| 1700 | 1700 |
| 1701 void LCodeGen::DoConstantI(LConstantI* instr) { | 1701 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1702 __ Move(ToRegister(instr->result()), Immediate(instr->value())); | 1702 __ Move(ToRegister(instr->result()), Immediate(instr->value())); |
| 1703 } | 1703 } |
| 1704 | 1704 |
| 1705 | 1705 |
| 1706 void LCodeGen::DoConstantS(LConstantS* instr) { | 1706 void LCodeGen::DoConstantS(LConstantS* instr) { |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1769 void LCodeGen::DoDateField(LDateField* instr) { | 1769 void LCodeGen::DoDateField(LDateField* instr) { |
| 1770 Register object = ToRegister(instr->date()); | 1770 Register object = ToRegister(instr->date()); |
| 1771 Register result = ToRegister(instr->result()); | 1771 Register result = ToRegister(instr->result()); |
| 1772 Register scratch = ToRegister(instr->temp()); | 1772 Register scratch = ToRegister(instr->temp()); |
| 1773 Smi* index = instr->index(); | 1773 Smi* index = instr->index(); |
| 1774 Label runtime, done; | 1774 Label runtime, done; |
| 1775 DCHECK(object.is(result)); | 1775 DCHECK(object.is(result)); |
| 1776 DCHECK(object.is(eax)); | 1776 DCHECK(object.is(eax)); |
| 1777 | 1777 |
| 1778 __ test(object, Immediate(kSmiTagMask)); | 1778 __ test(object, Immediate(kSmiTagMask)); |
| 1779 DeoptimizeIf(zero, instr, "Smi"); | 1779 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
| 1780 __ CmpObjectType(object, JS_DATE_TYPE, scratch); | 1780 __ CmpObjectType(object, JS_DATE_TYPE, scratch); |
| 1781 DeoptimizeIf(not_equal, instr, "not a date object"); | 1781 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject); |
| 1782 | 1782 |
| 1783 if (index->value() == 0) { | 1783 if (index->value() == 0) { |
| 1784 __ mov(result, FieldOperand(object, JSDate::kValueOffset)); | 1784 __ mov(result, FieldOperand(object, JSDate::kValueOffset)); |
| 1785 } else { | 1785 } else { |
| 1786 if (index->value() < JSDate::kFirstUncachedField) { | 1786 if (index->value() < JSDate::kFirstUncachedField) { |
| 1787 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1787 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1788 __ mov(scratch, Operand::StaticVariable(stamp)); | 1788 __ mov(scratch, Operand::StaticVariable(stamp)); |
| 1789 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); | 1789 __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); |
| 1790 __ j(not_equal, &runtime, Label::kNear); | 1790 __ j(not_equal, &runtime, Label::kNear); |
| 1791 __ mov(result, FieldOperand(object, JSDate::kValueOffset + | 1791 __ mov(result, FieldOperand(object, JSDate::kValueOffset + |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1901 __ lea(ToRegister(instr->result()), address); | 1901 __ lea(ToRegister(instr->result()), address); |
| 1902 } | 1902 } |
| 1903 } else { | 1903 } else { |
| 1904 if (right->IsConstantOperand()) { | 1904 if (right->IsConstantOperand()) { |
| 1905 __ add(ToOperand(left), | 1905 __ add(ToOperand(left), |
| 1906 ToImmediate(right, instr->hydrogen()->representation())); | 1906 ToImmediate(right, instr->hydrogen()->representation())); |
| 1907 } else { | 1907 } else { |
| 1908 __ add(ToRegister(left), ToOperand(right)); | 1908 __ add(ToRegister(left), ToOperand(right)); |
| 1909 } | 1909 } |
| 1910 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1910 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1911 DeoptimizeIf(overflow, instr, "overflow"); | 1911 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1912 } | 1912 } |
| 1913 } | 1913 } |
| 1914 } | 1914 } |
| 1915 | 1915 |
| 1916 | 1916 |
| 1917 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1917 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1918 LOperand* left = instr->left(); | 1918 LOperand* left = instr->left(); |
| 1919 LOperand* right = instr->right(); | 1919 LOperand* right = instr->right(); |
| 1920 DCHECK(left->Equals(instr->result())); | 1920 DCHECK(left->Equals(instr->result())); |
| 1921 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1921 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2149 } | 2149 } |
| 2150 | 2150 |
| 2151 if (expected.Contains(ToBooleanStub::SMI)) { | 2151 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2152 // Smis: 0 -> false, all other -> true. | 2152 // Smis: 0 -> false, all other -> true. |
| 2153 __ test(reg, Operand(reg)); | 2153 __ test(reg, Operand(reg)); |
| 2154 __ j(equal, instr->FalseLabel(chunk_)); | 2154 __ j(equal, instr->FalseLabel(chunk_)); |
| 2155 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2155 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2156 } else if (expected.NeedsMap()) { | 2156 } else if (expected.NeedsMap()) { |
| 2157 // If we need a map later and have a Smi -> deopt. | 2157 // If we need a map later and have a Smi -> deopt. |
| 2158 __ test(reg, Immediate(kSmiTagMask)); | 2158 __ test(reg, Immediate(kSmiTagMask)); |
| 2159 DeoptimizeIf(zero, instr, "Smi"); | 2159 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
| 2160 } | 2160 } |
| 2161 | 2161 |
| 2162 Register map = no_reg; // Keep the compiler happy. | 2162 Register map = no_reg; // Keep the compiler happy. |
| 2163 if (expected.NeedsMap()) { | 2163 if (expected.NeedsMap()) { |
| 2164 map = ToRegister(instr->temp()); | 2164 map = ToRegister(instr->temp()); |
| 2165 DCHECK(!map.is(reg)); | 2165 DCHECK(!map.is(reg)); |
| 2166 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); | 2166 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset)); |
| 2167 | 2167 |
| 2168 if (expected.CanBeUndetectable()) { | 2168 if (expected.CanBeUndetectable()) { |
| 2169 // Undetectable -> false. | 2169 // Undetectable -> false. |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2206 __ xorps(xmm_scratch, xmm_scratch); | 2206 __ xorps(xmm_scratch, xmm_scratch); |
| 2207 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2207 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
| 2208 __ j(zero, instr->FalseLabel(chunk_)); | 2208 __ j(zero, instr->FalseLabel(chunk_)); |
| 2209 __ jmp(instr->TrueLabel(chunk_)); | 2209 __ jmp(instr->TrueLabel(chunk_)); |
| 2210 __ bind(¬_heap_number); | 2210 __ bind(¬_heap_number); |
| 2211 } | 2211 } |
| 2212 | 2212 |
| 2213 if (!expected.IsGeneric()) { | 2213 if (!expected.IsGeneric()) { |
| 2214 // We've seen something for the first time -> deopt. | 2214 // We've seen something for the first time -> deopt. |
| 2215 // This can only happen if we are not generic already. | 2215 // This can only happen if we are not generic already. |
| 2216 DeoptimizeIf(no_condition, instr, "unexpected object"); | 2216 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject); |
| 2217 } | 2217 } |
| 2218 } | 2218 } |
| 2219 } | 2219 } |
| 2220 } | 2220 } |
| 2221 | 2221 |
| 2222 | 2222 |
| 2223 void LCodeGen::EmitGoto(int block) { | 2223 void LCodeGen::EmitGoto(int block) { |
| 2224 if (!IsNextEmittedBlock(block)) { | 2224 if (!IsNextEmittedBlock(block)) { |
| 2225 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2225 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2226 } | 2226 } |
| (...skipping 611 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2838 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset()); | 2838 info()->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
| 2839 } | 2839 } |
| 2840 } | 2840 } |
| 2841 | 2841 |
| 2842 | 2842 |
| 2843 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2843 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2844 Register result = ToRegister(instr->result()); | 2844 Register result = ToRegister(instr->result()); |
| 2845 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); | 2845 __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); |
| 2846 if (instr->hydrogen()->RequiresHoleCheck()) { | 2846 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2847 __ cmp(result, factory()->the_hole_value()); | 2847 __ cmp(result, factory()->the_hole_value()); |
| 2848 DeoptimizeIf(equal, instr, "hole"); | 2848 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2849 } | 2849 } |
| 2850 } | 2850 } |
| 2851 | 2851 |
| 2852 | 2852 |
| 2853 template <class T> | 2853 template <class T> |
| 2854 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2854 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 2855 DCHECK(FLAG_vector_ics); | 2855 DCHECK(FLAG_vector_ics); |
| 2856 Register vector_register = ToRegister(instr->temp_vector()); | 2856 Register vector_register = ToRegister(instr->temp_vector()); |
| 2857 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 2857 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
| 2858 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 2858 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
| (...skipping 28 matching lines...) Expand all Loading... |
| 2887 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { | 2887 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
| 2888 Register value = ToRegister(instr->value()); | 2888 Register value = ToRegister(instr->value()); |
| 2889 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle(); | 2889 Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle(); |
| 2890 | 2890 |
| 2891 // If the cell we are storing to contains the hole it could have | 2891 // If the cell we are storing to contains the hole it could have |
| 2892 // been deleted from the property dictionary. In that case, we need | 2892 // been deleted from the property dictionary. In that case, we need |
| 2893 // to update the property details in the property dictionary to mark | 2893 // to update the property details in the property dictionary to mark |
| 2894 // it as no longer deleted. We deoptimize in that case. | 2894 // it as no longer deleted. We deoptimize in that case. |
| 2895 if (instr->hydrogen()->RequiresHoleCheck()) { | 2895 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2896 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value()); | 2896 __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value()); |
| 2897 DeoptimizeIf(equal, instr, "hole"); | 2897 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2898 } | 2898 } |
| 2899 | 2899 |
| 2900 // Store the value. | 2900 // Store the value. |
| 2901 __ mov(Operand::ForCell(cell_handle), value); | 2901 __ mov(Operand::ForCell(cell_handle), value); |
| 2902 // Cells are always rescanned, so no write barrier here. | 2902 // Cells are always rescanned, so no write barrier here. |
| 2903 } | 2903 } |
| 2904 | 2904 |
| 2905 | 2905 |
| 2906 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2906 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2907 Register context = ToRegister(instr->context()); | 2907 Register context = ToRegister(instr->context()); |
| 2908 Register result = ToRegister(instr->result()); | 2908 Register result = ToRegister(instr->result()); |
| 2909 __ mov(result, ContextOperand(context, instr->slot_index())); | 2909 __ mov(result, ContextOperand(context, instr->slot_index())); |
| 2910 | 2910 |
| 2911 if (instr->hydrogen()->RequiresHoleCheck()) { | 2911 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2912 __ cmp(result, factory()->the_hole_value()); | 2912 __ cmp(result, factory()->the_hole_value()); |
| 2913 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2913 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2914 DeoptimizeIf(equal, instr, "hole"); | 2914 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2915 } else { | 2915 } else { |
| 2916 Label is_not_hole; | 2916 Label is_not_hole; |
| 2917 __ j(not_equal, &is_not_hole, Label::kNear); | 2917 __ j(not_equal, &is_not_hole, Label::kNear); |
| 2918 __ mov(result, factory()->undefined_value()); | 2918 __ mov(result, factory()->undefined_value()); |
| 2919 __ bind(&is_not_hole); | 2919 __ bind(&is_not_hole); |
| 2920 } | 2920 } |
| 2921 } | 2921 } |
| 2922 } | 2922 } |
| 2923 | 2923 |
| 2924 | 2924 |
| 2925 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2925 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2926 Register context = ToRegister(instr->context()); | 2926 Register context = ToRegister(instr->context()); |
| 2927 Register value = ToRegister(instr->value()); | 2927 Register value = ToRegister(instr->value()); |
| 2928 | 2928 |
| 2929 Label skip_assignment; | 2929 Label skip_assignment; |
| 2930 | 2930 |
| 2931 Operand target = ContextOperand(context, instr->slot_index()); | 2931 Operand target = ContextOperand(context, instr->slot_index()); |
| 2932 if (instr->hydrogen()->RequiresHoleCheck()) { | 2932 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2933 __ cmp(target, factory()->the_hole_value()); | 2933 __ cmp(target, factory()->the_hole_value()); |
| 2934 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2934 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2935 DeoptimizeIf(equal, instr, "hole"); | 2935 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2936 } else { | 2936 } else { |
| 2937 __ j(not_equal, &skip_assignment, Label::kNear); | 2937 __ j(not_equal, &skip_assignment, Label::kNear); |
| 2938 } | 2938 } |
| 2939 } | 2939 } |
| 2940 | 2940 |
| 2941 __ mov(target, value); | 2941 __ mov(target, value); |
| 2942 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2942 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2943 SmiCheck check_needed = | 2943 SmiCheck check_needed = |
| 2944 instr->hydrogen()->value()->type().IsHeapObject() | 2944 instr->hydrogen()->value()->type().IsHeapObject() |
| 2945 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2945 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3025 Register function = ToRegister(instr->function()); | 3025 Register function = ToRegister(instr->function()); |
| 3026 Register temp = ToRegister(instr->temp()); | 3026 Register temp = ToRegister(instr->temp()); |
| 3027 Register result = ToRegister(instr->result()); | 3027 Register result = ToRegister(instr->result()); |
| 3028 | 3028 |
| 3029 // Get the prototype or initial map from the function. | 3029 // Get the prototype or initial map from the function. |
| 3030 __ mov(result, | 3030 __ mov(result, |
| 3031 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3031 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3032 | 3032 |
| 3033 // Check that the function has a prototype or an initial map. | 3033 // Check that the function has a prototype or an initial map. |
| 3034 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); | 3034 __ cmp(Operand(result), Immediate(factory()->the_hole_value())); |
| 3035 DeoptimizeIf(equal, instr, "hole"); | 3035 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3036 | 3036 |
| 3037 // If the function does not have an initial map, we're done. | 3037 // If the function does not have an initial map, we're done. |
| 3038 Label done; | 3038 Label done; |
| 3039 __ CmpObjectType(result, MAP_TYPE, temp); | 3039 __ CmpObjectType(result, MAP_TYPE, temp); |
| 3040 __ j(not_equal, &done, Label::kNear); | 3040 __ j(not_equal, &done, Label::kNear); |
| 3041 | 3041 |
| 3042 // Get the prototype from the initial map. | 3042 // Get the prototype from the initial map. |
| 3043 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); | 3043 __ mov(result, FieldOperand(result, Map::kPrototypeOffset)); |
| 3044 | 3044 |
| 3045 // All done. | 3045 // All done. |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3118 break; | 3118 break; |
| 3119 case EXTERNAL_INT32_ELEMENTS: | 3119 case EXTERNAL_INT32_ELEMENTS: |
| 3120 case INT32_ELEMENTS: | 3120 case INT32_ELEMENTS: |
| 3121 __ mov(result, operand); | 3121 __ mov(result, operand); |
| 3122 break; | 3122 break; |
| 3123 case EXTERNAL_UINT32_ELEMENTS: | 3123 case EXTERNAL_UINT32_ELEMENTS: |
| 3124 case UINT32_ELEMENTS: | 3124 case UINT32_ELEMENTS: |
| 3125 __ mov(result, operand); | 3125 __ mov(result, operand); |
| 3126 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3126 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3127 __ test(result, Operand(result)); | 3127 __ test(result, Operand(result)); |
| 3128 DeoptimizeIf(negative, instr, "negative value"); | 3128 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
| 3129 } | 3129 } |
| 3130 break; | 3130 break; |
| 3131 case EXTERNAL_FLOAT32_ELEMENTS: | 3131 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3132 case EXTERNAL_FLOAT64_ELEMENTS: | 3132 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3133 case FLOAT32_ELEMENTS: | 3133 case FLOAT32_ELEMENTS: |
| 3134 case FLOAT64_ELEMENTS: | 3134 case FLOAT64_ELEMENTS: |
| 3135 case FAST_SMI_ELEMENTS: | 3135 case FAST_SMI_ELEMENTS: |
| 3136 case FAST_ELEMENTS: | 3136 case FAST_ELEMENTS: |
| 3137 case FAST_DOUBLE_ELEMENTS: | 3137 case FAST_DOUBLE_ELEMENTS: |
| 3138 case FAST_HOLEY_SMI_ELEMENTS: | 3138 case FAST_HOLEY_SMI_ELEMENTS: |
| 3139 case FAST_HOLEY_ELEMENTS: | 3139 case FAST_HOLEY_ELEMENTS: |
| 3140 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3140 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3141 case DICTIONARY_ELEMENTS: | 3141 case DICTIONARY_ELEMENTS: |
| 3142 case SLOPPY_ARGUMENTS_ELEMENTS: | 3142 case SLOPPY_ARGUMENTS_ELEMENTS: |
| 3143 UNREACHABLE(); | 3143 UNREACHABLE(); |
| 3144 break; | 3144 break; |
| 3145 } | 3145 } |
| 3146 } | 3146 } |
| 3147 } | 3147 } |
| 3148 | 3148 |
| 3149 | 3149 |
| 3150 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | 3150 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| 3151 if (instr->hydrogen()->RequiresHoleCheck()) { | 3151 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3152 Operand hole_check_operand = BuildFastArrayOperand( | 3152 Operand hole_check_operand = BuildFastArrayOperand( |
| 3153 instr->elements(), instr->key(), | 3153 instr->elements(), instr->key(), |
| 3154 instr->hydrogen()->key()->representation(), | 3154 instr->hydrogen()->key()->representation(), |
| 3155 FAST_DOUBLE_ELEMENTS, | 3155 FAST_DOUBLE_ELEMENTS, |
| 3156 instr->base_offset() + sizeof(kHoleNanLower32)); | 3156 instr->base_offset() + sizeof(kHoleNanLower32)); |
| 3157 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); | 3157 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); |
| 3158 DeoptimizeIf(equal, instr, "hole"); | 3158 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3159 } | 3159 } |
| 3160 | 3160 |
| 3161 Operand double_load_operand = BuildFastArrayOperand( | 3161 Operand double_load_operand = BuildFastArrayOperand( |
| 3162 instr->elements(), | 3162 instr->elements(), |
| 3163 instr->key(), | 3163 instr->key(), |
| 3164 instr->hydrogen()->key()->representation(), | 3164 instr->hydrogen()->key()->representation(), |
| 3165 FAST_DOUBLE_ELEMENTS, | 3165 FAST_DOUBLE_ELEMENTS, |
| 3166 instr->base_offset()); | 3166 instr->base_offset()); |
| 3167 XMMRegister result = ToDoubleRegister(instr->result()); | 3167 XMMRegister result = ToDoubleRegister(instr->result()); |
| 3168 __ movsd(result, double_load_operand); | 3168 __ movsd(result, double_load_operand); |
| 3169 } | 3169 } |
| 3170 | 3170 |
| 3171 | 3171 |
| 3172 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3172 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3173 Register result = ToRegister(instr->result()); | 3173 Register result = ToRegister(instr->result()); |
| 3174 | 3174 |
| 3175 // Load the result. | 3175 // Load the result. |
| 3176 __ mov(result, | 3176 __ mov(result, |
| 3177 BuildFastArrayOperand(instr->elements(), instr->key(), | 3177 BuildFastArrayOperand(instr->elements(), instr->key(), |
| 3178 instr->hydrogen()->key()->representation(), | 3178 instr->hydrogen()->key()->representation(), |
| 3179 FAST_ELEMENTS, instr->base_offset())); | 3179 FAST_ELEMENTS, instr->base_offset())); |
| 3180 | 3180 |
| 3181 // Check for the hole value. | 3181 // Check for the hole value. |
| 3182 if (instr->hydrogen()->RequiresHoleCheck()) { | 3182 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3183 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3183 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3184 __ test(result, Immediate(kSmiTagMask)); | 3184 __ test(result, Immediate(kSmiTagMask)); |
| 3185 DeoptimizeIf(not_equal, instr, "not a Smi"); | 3185 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi); |
| 3186 } else { | 3186 } else { |
| 3187 __ cmp(result, factory()->the_hole_value()); | 3187 __ cmp(result, factory()->the_hole_value()); |
| 3188 DeoptimizeIf(equal, instr, "hole"); | 3188 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3189 } | 3189 } |
| 3190 } | 3190 } |
| 3191 } | 3191 } |
| 3192 | 3192 |
| 3193 | 3193 |
| 3194 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3194 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3195 if (instr->is_typed_elements()) { | 3195 if (instr->is_typed_elements()) { |
| 3196 DoLoadKeyedExternalArray(instr); | 3196 DoLoadKeyedExternalArray(instr); |
| 3197 } else if (instr->hydrogen()->representation().IsDouble()) { | 3197 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3198 DoLoadKeyedFixedDoubleArray(instr); | 3198 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3325 } | 3325 } |
| 3326 | 3326 |
| 3327 // Normal function. Replace undefined or null with global receiver. | 3327 // Normal function. Replace undefined or null with global receiver. |
| 3328 __ cmp(receiver, factory()->null_value()); | 3328 __ cmp(receiver, factory()->null_value()); |
| 3329 __ j(equal, &global_object, Label::kNear); | 3329 __ j(equal, &global_object, Label::kNear); |
| 3330 __ cmp(receiver, factory()->undefined_value()); | 3330 __ cmp(receiver, factory()->undefined_value()); |
| 3331 __ j(equal, &global_object, Label::kNear); | 3331 __ j(equal, &global_object, Label::kNear); |
| 3332 | 3332 |
| 3333 // The receiver should be a JS object. | 3333 // The receiver should be a JS object. |
| 3334 __ test(receiver, Immediate(kSmiTagMask)); | 3334 __ test(receiver, Immediate(kSmiTagMask)); |
| 3335 DeoptimizeIf(equal, instr, "Smi"); | 3335 DeoptimizeIf(equal, instr, Deoptimizer::kSmi); |
| 3336 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); | 3336 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch); |
| 3337 DeoptimizeIf(below, instr, "not a JavaScript object"); | 3337 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject); |
| 3338 | 3338 |
| 3339 __ jmp(&receiver_ok, Label::kNear); | 3339 __ jmp(&receiver_ok, Label::kNear); |
| 3340 __ bind(&global_object); | 3340 __ bind(&global_object); |
| 3341 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); | 3341 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset)); |
| 3342 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); | 3342 const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); |
| 3343 __ mov(receiver, Operand(receiver, global_offset)); | 3343 __ mov(receiver, Operand(receiver, global_offset)); |
| 3344 const int proxy_offset = GlobalObject::kGlobalProxyOffset; | 3344 const int proxy_offset = GlobalObject::kGlobalProxyOffset; |
| 3345 __ mov(receiver, FieldOperand(receiver, proxy_offset)); | 3345 __ mov(receiver, FieldOperand(receiver, proxy_offset)); |
| 3346 __ bind(&receiver_ok); | 3346 __ bind(&receiver_ok); |
| 3347 } | 3347 } |
| 3348 | 3348 |
| 3349 | 3349 |
| 3350 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 3350 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| 3351 Register receiver = ToRegister(instr->receiver()); | 3351 Register receiver = ToRegister(instr->receiver()); |
| 3352 Register function = ToRegister(instr->function()); | 3352 Register function = ToRegister(instr->function()); |
| 3353 Register length = ToRegister(instr->length()); | 3353 Register length = ToRegister(instr->length()); |
| 3354 Register elements = ToRegister(instr->elements()); | 3354 Register elements = ToRegister(instr->elements()); |
| 3355 DCHECK(receiver.is(eax)); // Used for parameter count. | 3355 DCHECK(receiver.is(eax)); // Used for parameter count. |
| 3356 DCHECK(function.is(edi)); // Required by InvokeFunction. | 3356 DCHECK(function.is(edi)); // Required by InvokeFunction. |
| 3357 DCHECK(ToRegister(instr->result()).is(eax)); | 3357 DCHECK(ToRegister(instr->result()).is(eax)); |
| 3358 | 3358 |
| 3359 // Copy the arguments to this function possibly from the | 3359 // Copy the arguments to this function possibly from the |
| 3360 // adaptor frame below it. | 3360 // adaptor frame below it. |
| 3361 const uint32_t kArgumentsLimit = 1 * KB; | 3361 const uint32_t kArgumentsLimit = 1 * KB; |
| 3362 __ cmp(length, kArgumentsLimit); | 3362 __ cmp(length, kArgumentsLimit); |
| 3363 DeoptimizeIf(above, instr, "too many arguments"); | 3363 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments); |
| 3364 | 3364 |
| 3365 __ push(receiver); | 3365 __ push(receiver); |
| 3366 __ mov(receiver, length); | 3366 __ mov(receiver, length); |
| 3367 | 3367 |
| 3368 // Loop through the arguments pushing them onto the execution | 3368 // Loop through the arguments pushing them onto the execution |
| 3369 // stack. | 3369 // stack. |
| 3370 Label invoke, loop; | 3370 Label invoke, loop; |
| 3371 // length is a small non-negative integer, due to the test above. | 3371 // length is a small non-negative integer, due to the test above. |
| 3372 __ test(length, Operand(length)); | 3372 __ test(length, Operand(length)); |
| 3373 __ j(zero, &invoke, Label::kNear); | 3373 __ j(zero, &invoke, Label::kNear); |
| (...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3578 } | 3578 } |
| 3579 | 3579 |
| 3580 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 3580 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
| 3581 } | 3581 } |
| 3582 | 3582 |
| 3583 | 3583 |
| 3584 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3584 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3585 Register input_reg = ToRegister(instr->value()); | 3585 Register input_reg = ToRegister(instr->value()); |
| 3586 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 3586 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 3587 factory()->heap_number_map()); | 3587 factory()->heap_number_map()); |
| 3588 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3588 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 3589 | 3589 |
| 3590 Label slow, allocated, done; | 3590 Label slow, allocated, done; |
| 3591 Register tmp = input_reg.is(eax) ? ecx : eax; | 3591 Register tmp = input_reg.is(eax) ? ecx : eax; |
| 3592 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; | 3592 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx; |
| 3593 | 3593 |
| 3594 // Preserve the value of all registers. | 3594 // Preserve the value of all registers. |
| 3595 PushSafepointRegistersScope scope(this); | 3595 PushSafepointRegistersScope scope(this); |
| 3596 | 3596 |
| 3597 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | 3597 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 3598 // Check the sign of the argument. If the argument is positive, just | 3598 // Check the sign of the argument. If the argument is positive, just |
| (...skipping 26 matching lines...) Expand all Loading... |
| 3625 __ bind(&done); | 3625 __ bind(&done); |
| 3626 } | 3626 } |
| 3627 | 3627 |
| 3628 | 3628 |
| 3629 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3629 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3630 Register input_reg = ToRegister(instr->value()); | 3630 Register input_reg = ToRegister(instr->value()); |
| 3631 __ test(input_reg, Operand(input_reg)); | 3631 __ test(input_reg, Operand(input_reg)); |
| 3632 Label is_positive; | 3632 Label is_positive; |
| 3633 __ j(not_sign, &is_positive, Label::kNear); | 3633 __ j(not_sign, &is_positive, Label::kNear); |
| 3634 __ neg(input_reg); // Sets flags. | 3634 __ neg(input_reg); // Sets flags. |
| 3635 DeoptimizeIf(negative, instr, "overflow"); | 3635 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); |
| 3636 __ bind(&is_positive); | 3636 __ bind(&is_positive); |
| 3637 } | 3637 } |
| 3638 | 3638 |
| 3639 | 3639 |
| 3640 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3640 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3641 // Class for deferred case. | 3641 // Class for deferred case. |
| 3642 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3642 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3643 public: | 3643 public: |
| 3644 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3644 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
| 3645 LMathAbs* instr) | 3645 LMathAbs* instr) |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3684 if (CpuFeatures::IsSupported(SSE4_1)) { | 3684 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 3685 CpuFeatureScope scope(masm(), SSE4_1); | 3685 CpuFeatureScope scope(masm(), SSE4_1); |
| 3686 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3686 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3687 // Deoptimize on negative zero. | 3687 // Deoptimize on negative zero. |
| 3688 Label non_zero; | 3688 Label non_zero; |
| 3689 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3689 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| 3690 __ ucomisd(input_reg, xmm_scratch); | 3690 __ ucomisd(input_reg, xmm_scratch); |
| 3691 __ j(not_equal, &non_zero, Label::kNear); | 3691 __ j(not_equal, &non_zero, Label::kNear); |
| 3692 __ movmskpd(output_reg, input_reg); | 3692 __ movmskpd(output_reg, input_reg); |
| 3693 __ test(output_reg, Immediate(1)); | 3693 __ test(output_reg, Immediate(1)); |
| 3694 DeoptimizeIf(not_zero, instr, "minus zero"); | 3694 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 3695 __ bind(&non_zero); | 3695 __ bind(&non_zero); |
| 3696 } | 3696 } |
| 3697 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | 3697 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
| 3698 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | 3698 __ cvttsd2si(output_reg, Operand(xmm_scratch)); |
| 3699 // Overflow is signalled with minint. | 3699 // Overflow is signalled with minint. |
| 3700 __ cmp(output_reg, 0x1); | 3700 __ cmp(output_reg, 0x1); |
| 3701 DeoptimizeIf(overflow, instr, "overflow"); | 3701 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3702 } else { | 3702 } else { |
| 3703 Label negative_sign, done; | 3703 Label negative_sign, done; |
| 3704 // Deoptimize on unordered. | 3704 // Deoptimize on unordered. |
| 3705 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3705 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| 3706 __ ucomisd(input_reg, xmm_scratch); | 3706 __ ucomisd(input_reg, xmm_scratch); |
| 3707 DeoptimizeIf(parity_even, instr, "NaN"); | 3707 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
| 3708 __ j(below, &negative_sign, Label::kNear); | 3708 __ j(below, &negative_sign, Label::kNear); |
| 3709 | 3709 |
| 3710 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3710 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3711 // Check for negative zero. | 3711 // Check for negative zero. |
| 3712 Label positive_sign; | 3712 Label positive_sign; |
| 3713 __ j(above, &positive_sign, Label::kNear); | 3713 __ j(above, &positive_sign, Label::kNear); |
| 3714 __ movmskpd(output_reg, input_reg); | 3714 __ movmskpd(output_reg, input_reg); |
| 3715 __ test(output_reg, Immediate(1)); | 3715 __ test(output_reg, Immediate(1)); |
| 3716 DeoptimizeIf(not_zero, instr, "minus zero"); | 3716 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 3717 __ Move(output_reg, Immediate(0)); | 3717 __ Move(output_reg, Immediate(0)); |
| 3718 __ jmp(&done, Label::kNear); | 3718 __ jmp(&done, Label::kNear); |
| 3719 __ bind(&positive_sign); | 3719 __ bind(&positive_sign); |
| 3720 } | 3720 } |
| 3721 | 3721 |
| 3722 // Use truncating instruction (OK because input is positive). | 3722 // Use truncating instruction (OK because input is positive). |
| 3723 __ cvttsd2si(output_reg, Operand(input_reg)); | 3723 __ cvttsd2si(output_reg, Operand(input_reg)); |
| 3724 // Overflow is signalled with minint. | 3724 // Overflow is signalled with minint. |
| 3725 __ cmp(output_reg, 0x1); | 3725 __ cmp(output_reg, 0x1); |
| 3726 DeoptimizeIf(overflow, instr, "overflow"); | 3726 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3727 __ jmp(&done, Label::kNear); | 3727 __ jmp(&done, Label::kNear); |
| 3728 | 3728 |
| 3729 // Non-zero negative reaches here. | 3729 // Non-zero negative reaches here. |
| 3730 __ bind(&negative_sign); | 3730 __ bind(&negative_sign); |
| 3731 // Truncate, then compare and compensate. | 3731 // Truncate, then compare and compensate. |
| 3732 __ cvttsd2si(output_reg, Operand(input_reg)); | 3732 __ cvttsd2si(output_reg, Operand(input_reg)); |
| 3733 __ Cvtsi2sd(xmm_scratch, output_reg); | 3733 __ Cvtsi2sd(xmm_scratch, output_reg); |
| 3734 __ ucomisd(input_reg, xmm_scratch); | 3734 __ ucomisd(input_reg, xmm_scratch); |
| 3735 __ j(equal, &done, Label::kNear); | 3735 __ j(equal, &done, Label::kNear); |
| 3736 __ sub(output_reg, Immediate(1)); | 3736 __ sub(output_reg, Immediate(1)); |
| 3737 DeoptimizeIf(overflow, instr, "overflow"); | 3737 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3738 | 3738 |
| 3739 __ bind(&done); | 3739 __ bind(&done); |
| 3740 } | 3740 } |
| 3741 } | 3741 } |
| 3742 | 3742 |
| 3743 | 3743 |
| 3744 void LCodeGen::DoMathRound(LMathRound* instr) { | 3744 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3745 Register output_reg = ToRegister(instr->result()); | 3745 Register output_reg = ToRegister(instr->result()); |
| 3746 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3746 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3747 XMMRegister xmm_scratch = double_scratch0(); | 3747 XMMRegister xmm_scratch = double_scratch0(); |
| 3748 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | 3748 XMMRegister input_temp = ToDoubleRegister(instr->temp()); |
| 3749 ExternalReference one_half = ExternalReference::address_of_one_half(); | 3749 ExternalReference one_half = ExternalReference::address_of_one_half(); |
| 3750 ExternalReference minus_one_half = | 3750 ExternalReference minus_one_half = |
| 3751 ExternalReference::address_of_minus_one_half(); | 3751 ExternalReference::address_of_minus_one_half(); |
| 3752 | 3752 |
| 3753 Label done, round_to_zero, below_one_half, do_not_compensate; | 3753 Label done, round_to_zero, below_one_half, do_not_compensate; |
| 3754 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 3754 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 3755 | 3755 |
| 3756 __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); | 3756 __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); |
| 3757 __ ucomisd(xmm_scratch, input_reg); | 3757 __ ucomisd(xmm_scratch, input_reg); |
| 3758 __ j(above, &below_one_half, Label::kNear); | 3758 __ j(above, &below_one_half, Label::kNear); |
| 3759 | 3759 |
| 3760 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | 3760 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| 3761 __ addsd(xmm_scratch, input_reg); | 3761 __ addsd(xmm_scratch, input_reg); |
| 3762 __ cvttsd2si(output_reg, Operand(xmm_scratch)); | 3762 __ cvttsd2si(output_reg, Operand(xmm_scratch)); |
| 3763 // Overflow is signalled with minint. | 3763 // Overflow is signalled with minint. |
| 3764 __ cmp(output_reg, 0x1); | 3764 __ cmp(output_reg, 0x1); |
| 3765 DeoptimizeIf(overflow, instr, "overflow"); | 3765 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3766 __ jmp(&done, dist); | 3766 __ jmp(&done, dist); |
| 3767 | 3767 |
| 3768 __ bind(&below_one_half); | 3768 __ bind(&below_one_half); |
| 3769 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); | 3769 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); |
| 3770 __ ucomisd(xmm_scratch, input_reg); | 3770 __ ucomisd(xmm_scratch, input_reg); |
| 3771 __ j(below_equal, &round_to_zero, Label::kNear); | 3771 __ j(below_equal, &round_to_zero, Label::kNear); |
| 3772 | 3772 |
| 3773 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | 3773 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| 3774 // compare and compensate. | 3774 // compare and compensate. |
| 3775 __ movaps(input_temp, input_reg); // Do not alter input_reg. | 3775 __ movaps(input_temp, input_reg); // Do not alter input_reg. |
| 3776 __ subsd(input_temp, xmm_scratch); | 3776 __ subsd(input_temp, xmm_scratch); |
| 3777 __ cvttsd2si(output_reg, Operand(input_temp)); | 3777 __ cvttsd2si(output_reg, Operand(input_temp)); |
| 3778 // Catch minint due to overflow, and to prevent overflow when compensating. | 3778 // Catch minint due to overflow, and to prevent overflow when compensating. |
| 3779 __ cmp(output_reg, 0x1); | 3779 __ cmp(output_reg, 0x1); |
| 3780 DeoptimizeIf(overflow, instr, "overflow"); | 3780 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3781 | 3781 |
| 3782 __ Cvtsi2sd(xmm_scratch, output_reg); | 3782 __ Cvtsi2sd(xmm_scratch, output_reg); |
| 3783 __ ucomisd(xmm_scratch, input_temp); | 3783 __ ucomisd(xmm_scratch, input_temp); |
| 3784 __ j(equal, &done, dist); | 3784 __ j(equal, &done, dist); |
| 3785 __ sub(output_reg, Immediate(1)); | 3785 __ sub(output_reg, Immediate(1)); |
| 3786 // No overflow because we already ruled out minint. | 3786 // No overflow because we already ruled out minint. |
| 3787 __ jmp(&done, dist); | 3787 __ jmp(&done, dist); |
| 3788 | 3788 |
| 3789 __ bind(&round_to_zero); | 3789 __ bind(&round_to_zero); |
| 3790 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | 3790 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
| 3791 // we can ignore the difference between a result of -0 and +0. | 3791 // we can ignore the difference between a result of -0 and +0. |
| 3792 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3792 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3793 // If the sign is positive, we return +0. | 3793 // If the sign is positive, we return +0. |
| 3794 __ movmskpd(output_reg, input_reg); | 3794 __ movmskpd(output_reg, input_reg); |
| 3795 __ test(output_reg, Immediate(1)); | 3795 __ test(output_reg, Immediate(1)); |
| 3796 DeoptimizeIf(not_zero, instr, "minus zero"); | 3796 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 3797 } | 3797 } |
| 3798 __ Move(output_reg, Immediate(0)); | 3798 __ Move(output_reg, Immediate(0)); |
| 3799 __ bind(&done); | 3799 __ bind(&done); |
| 3800 } | 3800 } |
| 3801 | 3801 |
| 3802 | 3802 |
| 3803 void LCodeGen::DoMathFround(LMathFround* instr) { | 3803 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3804 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3804 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3805 XMMRegister output_reg = ToDoubleRegister(instr->result()); | 3805 XMMRegister output_reg = ToDoubleRegister(instr->result()); |
| 3806 __ cvtsd2ss(output_reg, input_reg); | 3806 __ cvtsd2ss(output_reg, input_reg); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3862 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); | 3862 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); |
| 3863 | 3863 |
| 3864 if (exponent_type.IsSmi()) { | 3864 if (exponent_type.IsSmi()) { |
| 3865 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3865 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3866 __ CallStub(&stub); | 3866 __ CallStub(&stub); |
| 3867 } else if (exponent_type.IsTagged()) { | 3867 } else if (exponent_type.IsTagged()) { |
| 3868 Label no_deopt; | 3868 Label no_deopt; |
| 3869 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3869 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3870 DCHECK(!ecx.is(tagged_exponent)); | 3870 DCHECK(!ecx.is(tagged_exponent)); |
| 3871 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx); | 3871 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx); |
| 3872 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3872 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 3873 __ bind(&no_deopt); | 3873 __ bind(&no_deopt); |
| 3874 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3874 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3875 __ CallStub(&stub); | 3875 __ CallStub(&stub); |
| 3876 } else if (exponent_type.IsInteger32()) { | 3876 } else if (exponent_type.IsInteger32()) { |
| 3877 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3877 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3878 __ CallStub(&stub); | 3878 __ CallStub(&stub); |
| 3879 } else { | 3879 } else { |
| 3880 DCHECK(exponent_type.IsDouble()); | 3880 DCHECK(exponent_type.IsDouble()); |
| 3881 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3881 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3882 __ CallStub(&stub); | 3882 __ CallStub(&stub); |
| (...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4192 instr->hydrogen()->index()->representation())); | 4192 instr->hydrogen()->index()->representation())); |
| 4193 } else { | 4193 } else { |
| 4194 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); | 4194 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); |
| 4195 } | 4195 } |
| 4196 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4196 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4197 Label done; | 4197 Label done; |
| 4198 __ j(NegateCondition(cc), &done, Label::kNear); | 4198 __ j(NegateCondition(cc), &done, Label::kNear); |
| 4199 __ int3(); | 4199 __ int3(); |
| 4200 __ bind(&done); | 4200 __ bind(&done); |
| 4201 } else { | 4201 } else { |
| 4202 DeoptimizeIf(cc, instr, "out of bounds"); | 4202 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
| 4203 } | 4203 } |
| 4204 } | 4204 } |
| 4205 | 4205 |
| 4206 | 4206 |
| 4207 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4207 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4208 ElementsKind elements_kind = instr->elements_kind(); | 4208 ElementsKind elements_kind = instr->elements_kind(); |
| 4209 LOperand* key = instr->key(); | 4209 LOperand* key = instr->key(); |
| 4210 if (!key->IsConstantOperand() && | 4210 if (!key->IsConstantOperand() && |
| 4211 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), | 4211 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), |
| 4212 elements_kind)) { | 4212 elements_kind)) { |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4355 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code(); | 4355 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code(); |
| 4356 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4356 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 4357 } | 4357 } |
| 4358 | 4358 |
| 4359 | 4359 |
| 4360 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4360 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4361 Register object = ToRegister(instr->object()); | 4361 Register object = ToRegister(instr->object()); |
| 4362 Register temp = ToRegister(instr->temp()); | 4362 Register temp = ToRegister(instr->temp()); |
| 4363 Label no_memento_found; | 4363 Label no_memento_found; |
| 4364 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4364 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4365 DeoptimizeIf(equal, instr, "memento found"); | 4365 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound); |
| 4366 __ bind(&no_memento_found); | 4366 __ bind(&no_memento_found); |
| 4367 } | 4367 } |
| 4368 | 4368 |
| 4369 | 4369 |
| 4370 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4370 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| 4371 Register object_reg = ToRegister(instr->object()); | 4371 Register object_reg = ToRegister(instr->object()); |
| 4372 | 4372 |
| 4373 Handle<Map> from_map = instr->original_map(); | 4373 Handle<Map> from_map = instr->original_map(); |
| 4374 Handle<Map> to_map = instr->transitioned_map(); | 4374 Handle<Map> to_map = instr->transitioned_map(); |
| 4375 ElementsKind from_kind = instr->from_kind(); | 4375 ElementsKind from_kind = instr->from_kind(); |
| (...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4701 __ StoreToSafepointRegisterSlot(reg, eax); | 4701 __ StoreToSafepointRegisterSlot(reg, eax); |
| 4702 } | 4702 } |
| 4703 | 4703 |
| 4704 | 4704 |
| 4705 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4705 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4706 HChange* hchange = instr->hydrogen(); | 4706 HChange* hchange = instr->hydrogen(); |
| 4707 Register input = ToRegister(instr->value()); | 4707 Register input = ToRegister(instr->value()); |
| 4708 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4708 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4709 hchange->value()->CheckFlag(HValue::kUint32)) { | 4709 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4710 __ test(input, Immediate(0xc0000000)); | 4710 __ test(input, Immediate(0xc0000000)); |
| 4711 DeoptimizeIf(not_zero, instr, "overflow"); | 4711 DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow); |
| 4712 } | 4712 } |
| 4713 __ SmiTag(input); | 4713 __ SmiTag(input); |
| 4714 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4714 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4715 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4715 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4716 DeoptimizeIf(overflow, instr, "overflow"); | 4716 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 4717 } | 4717 } |
| 4718 } | 4718 } |
| 4719 | 4719 |
| 4720 | 4720 |
| 4721 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4721 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4722 LOperand* input = instr->value(); | 4722 LOperand* input = instr->value(); |
| 4723 Register result = ToRegister(input); | 4723 Register result = ToRegister(input); |
| 4724 DCHECK(input->IsRegister() && input->Equals(instr->result())); | 4724 DCHECK(input->IsRegister() && input->Equals(instr->result())); |
| 4725 if (instr->needs_check()) { | 4725 if (instr->needs_check()) { |
| 4726 __ test(result, Immediate(kSmiTagMask)); | 4726 __ test(result, Immediate(kSmiTagMask)); |
| 4727 DeoptimizeIf(not_zero, instr, "not a Smi"); | 4727 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi); |
| 4728 } else { | 4728 } else { |
| 4729 __ AssertSmi(result); | 4729 __ AssertSmi(result); |
| 4730 } | 4730 } |
| 4731 __ SmiUntag(result); | 4731 __ SmiUntag(result); |
| 4732 } | 4732 } |
| 4733 | 4733 |
| 4734 | 4734 |
| 4735 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4735 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4736 Register temp_reg, XMMRegister result_reg, | 4736 Register temp_reg, XMMRegister result_reg, |
| 4737 NumberUntagDMode mode) { | 4737 NumberUntagDMode mode) { |
| 4738 bool can_convert_undefined_to_nan = | 4738 bool can_convert_undefined_to_nan = |
| 4739 instr->hydrogen()->can_convert_undefined_to_nan(); | 4739 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4740 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4740 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4741 | 4741 |
| 4742 Label convert, load_smi, done; | 4742 Label convert, load_smi, done; |
| 4743 | 4743 |
| 4744 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4744 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4745 // Smi check. | 4745 // Smi check. |
| 4746 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); | 4746 __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
| 4747 | 4747 |
| 4748 // Heap number map check. | 4748 // Heap number map check. |
| 4749 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4749 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4750 factory()->heap_number_map()); | 4750 factory()->heap_number_map()); |
| 4751 if (can_convert_undefined_to_nan) { | 4751 if (can_convert_undefined_to_nan) { |
| 4752 __ j(not_equal, &convert, Label::kNear); | 4752 __ j(not_equal, &convert, Label::kNear); |
| 4753 } else { | 4753 } else { |
| 4754 DeoptimizeIf(not_equal, instr, "not a heap number"); | 4754 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 4755 } | 4755 } |
| 4756 | 4756 |
| 4757 // Heap number to XMM conversion. | 4757 // Heap number to XMM conversion. |
| 4758 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4758 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4759 | 4759 |
| 4760 if (deoptimize_on_minus_zero) { | 4760 if (deoptimize_on_minus_zero) { |
| 4761 XMMRegister xmm_scratch = double_scratch0(); | 4761 XMMRegister xmm_scratch = double_scratch0(); |
| 4762 __ xorps(xmm_scratch, xmm_scratch); | 4762 __ xorps(xmm_scratch, xmm_scratch); |
| 4763 __ ucomisd(result_reg, xmm_scratch); | 4763 __ ucomisd(result_reg, xmm_scratch); |
| 4764 __ j(not_zero, &done, Label::kNear); | 4764 __ j(not_zero, &done, Label::kNear); |
| 4765 __ movmskpd(temp_reg, result_reg); | 4765 __ movmskpd(temp_reg, result_reg); |
| 4766 __ test_b(temp_reg, 1); | 4766 __ test_b(temp_reg, 1); |
| 4767 DeoptimizeIf(not_zero, instr, "minus zero"); | 4767 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 4768 } | 4768 } |
| 4769 __ jmp(&done, Label::kNear); | 4769 __ jmp(&done, Label::kNear); |
| 4770 | 4770 |
| 4771 if (can_convert_undefined_to_nan) { | 4771 if (can_convert_undefined_to_nan) { |
| 4772 __ bind(&convert); | 4772 __ bind(&convert); |
| 4773 | 4773 |
| 4774 // Convert undefined to NaN. | 4774 // Convert undefined to NaN. |
| 4775 __ cmp(input_reg, factory()->undefined_value()); | 4775 __ cmp(input_reg, factory()->undefined_value()); |
| 4776 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 4776 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 4777 | 4777 |
| 4778 __ pcmpeqd(result_reg, result_reg); | 4778 __ pcmpeqd(result_reg, result_reg); |
| 4779 __ jmp(&done, Label::kNear); | 4779 __ jmp(&done, Label::kNear); |
| 4780 } | 4780 } |
| 4781 } else { | 4781 } else { |
| 4782 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4782 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4783 } | 4783 } |
| 4784 | 4784 |
| 4785 __ bind(&load_smi); | 4785 __ bind(&load_smi); |
| 4786 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the | 4786 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4818 __ jmp(done); | 4818 __ jmp(done); |
| 4819 | 4819 |
| 4820 __ bind(&check_bools); | 4820 __ bind(&check_bools); |
| 4821 __ cmp(input_reg, factory()->true_value()); | 4821 __ cmp(input_reg, factory()->true_value()); |
| 4822 __ j(not_equal, &check_false, Label::kNear); | 4822 __ j(not_equal, &check_false, Label::kNear); |
| 4823 __ Move(input_reg, Immediate(1)); | 4823 __ Move(input_reg, Immediate(1)); |
| 4824 __ jmp(done); | 4824 __ jmp(done); |
| 4825 | 4825 |
| 4826 __ bind(&check_false); | 4826 __ bind(&check_false); |
| 4827 __ cmp(input_reg, factory()->false_value()); | 4827 __ cmp(input_reg, factory()->false_value()); |
| 4828 DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false"); | 4828 DeoptimizeIf(not_equal, instr, |
| 4829 Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
| 4829 __ Move(input_reg, Immediate(0)); | 4830 __ Move(input_reg, Immediate(0)); |
| 4830 } else { | 4831 } else { |
| 4831 XMMRegister scratch = ToDoubleRegister(instr->temp()); | 4832 XMMRegister scratch = ToDoubleRegister(instr->temp()); |
| 4832 DCHECK(!scratch.is(xmm0)); | 4833 DCHECK(!scratch.is(xmm0)); |
| 4833 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4834 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4834 isolate()->factory()->heap_number_map()); | 4835 isolate()->factory()->heap_number_map()); |
| 4835 DeoptimizeIf(not_equal, instr, "not a heap number"); | 4836 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 4836 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4837 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4837 __ cvttsd2si(input_reg, Operand(xmm0)); | 4838 __ cvttsd2si(input_reg, Operand(xmm0)); |
| 4838 __ Cvtsi2sd(scratch, Operand(input_reg)); | 4839 __ Cvtsi2sd(scratch, Operand(input_reg)); |
| 4839 __ ucomisd(xmm0, scratch); | 4840 __ ucomisd(xmm0, scratch); |
| 4840 DeoptimizeIf(not_equal, instr, "lost precision"); | 4841 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
| 4841 DeoptimizeIf(parity_even, instr, "NaN"); | 4842 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
| 4842 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | 4843 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { |
| 4843 __ test(input_reg, Operand(input_reg)); | 4844 __ test(input_reg, Operand(input_reg)); |
| 4844 __ j(not_zero, done); | 4845 __ j(not_zero, done); |
| 4845 __ movmskpd(input_reg, xmm0); | 4846 __ movmskpd(input_reg, xmm0); |
| 4846 __ and_(input_reg, 1); | 4847 __ and_(input_reg, 1); |
| 4847 DeoptimizeIf(not_zero, instr, "minus zero"); | 4848 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 4848 } | 4849 } |
| 4849 } | 4850 } |
| 4850 } | 4851 } |
| 4851 | 4852 |
| 4852 | 4853 |
| 4853 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4854 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4854 class DeferredTaggedToI FINAL : public LDeferredCode { | 4855 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 4855 public: | 4856 public: |
| 4856 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4857 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 4857 : LDeferredCode(codegen), instr_(instr) { } | 4858 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4917 } else { | 4918 } else { |
| 4918 Label lost_precision, is_nan, minus_zero, done; | 4919 Label lost_precision, is_nan, minus_zero, done; |
| 4919 XMMRegister input_reg = ToDoubleRegister(input); | 4920 XMMRegister input_reg = ToDoubleRegister(input); |
| 4920 XMMRegister xmm_scratch = double_scratch0(); | 4921 XMMRegister xmm_scratch = double_scratch0(); |
| 4921 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 4922 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 4922 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4923 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| 4923 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, | 4924 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, |
| 4924 &is_nan, &minus_zero, dist); | 4925 &is_nan, &minus_zero, dist); |
| 4925 __ jmp(&done, dist); | 4926 __ jmp(&done, dist); |
| 4926 __ bind(&lost_precision); | 4927 __ bind(&lost_precision); |
| 4927 DeoptimizeIf(no_condition, instr, "lost precision"); | 4928 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
| 4928 __ bind(&is_nan); | 4929 __ bind(&is_nan); |
| 4929 DeoptimizeIf(no_condition, instr, "NaN"); | 4930 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
| 4930 __ bind(&minus_zero); | 4931 __ bind(&minus_zero); |
| 4931 DeoptimizeIf(no_condition, instr, "minus zero"); | 4932 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 4932 __ bind(&done); | 4933 __ bind(&done); |
| 4933 } | 4934 } |
| 4934 } | 4935 } |
| 4935 | 4936 |
| 4936 | 4937 |
| 4937 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4938 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 4938 LOperand* input = instr->value(); | 4939 LOperand* input = instr->value(); |
| 4939 DCHECK(input->IsDoubleRegister()); | 4940 DCHECK(input->IsDoubleRegister()); |
| 4940 LOperand* result = instr->result(); | 4941 LOperand* result = instr->result(); |
| 4941 DCHECK(result->IsRegister()); | 4942 DCHECK(result->IsRegister()); |
| 4942 Register result_reg = ToRegister(result); | 4943 Register result_reg = ToRegister(result); |
| 4943 | 4944 |
| 4944 Label lost_precision, is_nan, minus_zero, done; | 4945 Label lost_precision, is_nan, minus_zero, done; |
| 4945 XMMRegister input_reg = ToDoubleRegister(input); | 4946 XMMRegister input_reg = ToDoubleRegister(input); |
| 4946 XMMRegister xmm_scratch = double_scratch0(); | 4947 XMMRegister xmm_scratch = double_scratch0(); |
| 4947 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 4948 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 4948 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 4949 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| 4949 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, | 4950 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, |
| 4950 &minus_zero, dist); | 4951 &minus_zero, dist); |
| 4951 __ jmp(&done, dist); | 4952 __ jmp(&done, dist); |
| 4952 __ bind(&lost_precision); | 4953 __ bind(&lost_precision); |
| 4953 DeoptimizeIf(no_condition, instr, "lost precision"); | 4954 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
| 4954 __ bind(&is_nan); | 4955 __ bind(&is_nan); |
| 4955 DeoptimizeIf(no_condition, instr, "NaN"); | 4956 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
| 4956 __ bind(&minus_zero); | 4957 __ bind(&minus_zero); |
| 4957 DeoptimizeIf(no_condition, instr, "minus zero"); | 4958 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 4958 __ bind(&done); | 4959 __ bind(&done); |
| 4959 __ SmiTag(result_reg); | 4960 __ SmiTag(result_reg); |
| 4960 DeoptimizeIf(overflow, instr, "overflow"); | 4961 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 4961 } | 4962 } |
| 4962 | 4963 |
| 4963 | 4964 |
| 4964 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 4965 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 4965 LOperand* input = instr->value(); | 4966 LOperand* input = instr->value(); |
| 4966 __ test(ToOperand(input), Immediate(kSmiTagMask)); | 4967 __ test(ToOperand(input), Immediate(kSmiTagMask)); |
| 4967 DeoptimizeIf(not_zero, instr, "not a Smi"); | 4968 DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi); |
| 4968 } | 4969 } |
| 4969 | 4970 |
| 4970 | 4971 |
| 4971 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 4972 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 4972 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 4973 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 4973 LOperand* input = instr->value(); | 4974 LOperand* input = instr->value(); |
| 4974 __ test(ToOperand(input), Immediate(kSmiTagMask)); | 4975 __ test(ToOperand(input), Immediate(kSmiTagMask)); |
| 4975 DeoptimizeIf(zero, instr, "Smi"); | 4976 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
| 4976 } | 4977 } |
| 4977 } | 4978 } |
| 4978 | 4979 |
| 4979 | 4980 |
| 4980 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 4981 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 4981 Register input = ToRegister(instr->value()); | 4982 Register input = ToRegister(instr->value()); |
| 4982 Register temp = ToRegister(instr->temp()); | 4983 Register temp = ToRegister(instr->temp()); |
| 4983 | 4984 |
| 4984 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); | 4985 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); |
| 4985 | 4986 |
| 4986 if (instr->hydrogen()->is_interval_check()) { | 4987 if (instr->hydrogen()->is_interval_check()) { |
| 4987 InstanceType first; | 4988 InstanceType first; |
| 4988 InstanceType last; | 4989 InstanceType last; |
| 4989 instr->hydrogen()->GetCheckInterval(&first, &last); | 4990 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 4990 | 4991 |
| 4991 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), | 4992 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), |
| 4992 static_cast<int8_t>(first)); | 4993 static_cast<int8_t>(first)); |
| 4993 | 4994 |
| 4994 // If there is only one type in the interval check for equality. | 4995 // If there is only one type in the interval check for equality. |
| 4995 if (first == last) { | 4996 if (first == last) { |
| 4996 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 4997 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
| 4997 } else { | 4998 } else { |
| 4998 DeoptimizeIf(below, instr, "wrong instance type"); | 4999 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType); |
| 4999 // Omit check for the last type. | 5000 // Omit check for the last type. |
| 5000 if (last != LAST_TYPE) { | 5001 if (last != LAST_TYPE) { |
| 5001 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), | 5002 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), |
| 5002 static_cast<int8_t>(last)); | 5003 static_cast<int8_t>(last)); |
| 5003 DeoptimizeIf(above, instr, "wrong instance type"); | 5004 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType); |
| 5004 } | 5005 } |
| 5005 } | 5006 } |
| 5006 } else { | 5007 } else { |
| 5007 uint8_t mask; | 5008 uint8_t mask; |
| 5008 uint8_t tag; | 5009 uint8_t tag; |
| 5009 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5010 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5010 | 5011 |
| 5011 if (base::bits::IsPowerOfTwo32(mask)) { | 5012 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5012 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5013 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5013 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); | 5014 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask); |
| 5014 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type"); | 5015 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, |
| 5016 Deoptimizer::kWrongInstanceType); |
| 5015 } else { | 5017 } else { |
| 5016 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); | 5018 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); |
| 5017 __ and_(temp, mask); | 5019 __ and_(temp, mask); |
| 5018 __ cmp(temp, tag); | 5020 __ cmp(temp, tag); |
| 5019 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 5021 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
| 5020 } | 5022 } |
| 5021 } | 5023 } |
| 5022 } | 5024 } |
| 5023 | 5025 |
| 5024 | 5026 |
| 5025 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5027 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5026 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5028 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5027 if (instr->hydrogen()->object_in_new_space()) { | 5029 if (instr->hydrogen()->object_in_new_space()) { |
| 5028 Register reg = ToRegister(instr->value()); | 5030 Register reg = ToRegister(instr->value()); |
| 5029 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5031 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5030 __ cmp(reg, Operand::ForCell(cell)); | 5032 __ cmp(reg, Operand::ForCell(cell)); |
| 5031 } else { | 5033 } else { |
| 5032 Operand operand = ToOperand(instr->value()); | 5034 Operand operand = ToOperand(instr->value()); |
| 5033 __ cmp(operand, object); | 5035 __ cmp(operand, object); |
| 5034 } | 5036 } |
| 5035 DeoptimizeIf(not_equal, instr, "value mismatch"); | 5037 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch); |
| 5036 } | 5038 } |
| 5037 | 5039 |
| 5038 | 5040 |
| 5039 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5041 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5040 { | 5042 { |
| 5041 PushSafepointRegistersScope scope(this); | 5043 PushSafepointRegistersScope scope(this); |
| 5042 __ push(object); | 5044 __ push(object); |
| 5043 __ xor_(esi, esi); | 5045 __ xor_(esi, esi); |
| 5044 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5046 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5045 RecordSafepointWithRegisters( | 5047 RecordSafepointWithRegisters( |
| 5046 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5048 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5047 | 5049 |
| 5048 __ test(eax, Immediate(kSmiTagMask)); | 5050 __ test(eax, Immediate(kSmiTagMask)); |
| 5049 } | 5051 } |
| 5050 DeoptimizeIf(zero, instr, "instance migration failed"); | 5052 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed); |
| 5051 } | 5053 } |
| 5052 | 5054 |
| 5053 | 5055 |
| 5054 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5056 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5055 class DeferredCheckMaps FINAL : public LDeferredCode { | 5057 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5056 public: | 5058 public: |
| 5057 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5059 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5058 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5060 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5059 SetExit(check_maps()); | 5061 SetExit(check_maps()); |
| 5060 } | 5062 } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5094 Handle<Map> map = maps->at(i).handle(); | 5096 Handle<Map> map = maps->at(i).handle(); |
| 5095 __ CompareMap(reg, map); | 5097 __ CompareMap(reg, map); |
| 5096 __ j(equal, &success, Label::kNear); | 5098 __ j(equal, &success, Label::kNear); |
| 5097 } | 5099 } |
| 5098 | 5100 |
| 5099 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5101 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5100 __ CompareMap(reg, map); | 5102 __ CompareMap(reg, map); |
| 5101 if (instr->hydrogen()->HasMigrationTarget()) { | 5103 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5102 __ j(not_equal, deferred->entry()); | 5104 __ j(not_equal, deferred->entry()); |
| 5103 } else { | 5105 } else { |
| 5104 DeoptimizeIf(not_equal, instr, "wrong map"); | 5106 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5105 } | 5107 } |
| 5106 | 5108 |
| 5107 __ bind(&success); | 5109 __ bind(&success); |
| 5108 } | 5110 } |
| 5109 | 5111 |
| 5110 | 5112 |
| 5111 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5113 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5112 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5114 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5113 XMMRegister xmm_scratch = double_scratch0(); | 5115 XMMRegister xmm_scratch = double_scratch0(); |
| 5114 Register result_reg = ToRegister(instr->result()); | 5116 Register result_reg = ToRegister(instr->result()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5133 __ JumpIfSmi(input_reg, &is_smi); | 5135 __ JumpIfSmi(input_reg, &is_smi); |
| 5134 | 5136 |
| 5135 // Check for heap number | 5137 // Check for heap number |
| 5136 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5138 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5137 factory()->heap_number_map()); | 5139 factory()->heap_number_map()); |
| 5138 __ j(equal, &heap_number, Label::kNear); | 5140 __ j(equal, &heap_number, Label::kNear); |
| 5139 | 5141 |
| 5140 // Check for undefined. Undefined is converted to zero for clamping | 5142 // Check for undefined. Undefined is converted to zero for clamping |
| 5141 // conversions. | 5143 // conversions. |
| 5142 __ cmp(input_reg, factory()->undefined_value()); | 5144 __ cmp(input_reg, factory()->undefined_value()); |
| 5143 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 5145 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 5144 __ mov(input_reg, 0); | 5146 __ mov(input_reg, 0); |
| 5145 __ jmp(&done, Label::kNear); | 5147 __ jmp(&done, Label::kNear); |
| 5146 | 5148 |
| 5147 // Heap number | 5149 // Heap number |
| 5148 __ bind(&heap_number); | 5150 __ bind(&heap_number); |
| 5149 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5151 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5150 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5152 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
| 5151 __ jmp(&done, Label::kNear); | 5153 __ jmp(&done, Label::kNear); |
| 5152 | 5154 |
| 5153 // smi | 5155 // smi |
| (...skipping 467 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5621 DCHECK(!environment->HasBeenRegistered()); | 5623 DCHECK(!environment->HasBeenRegistered()); |
| 5622 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5624 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5623 | 5625 |
| 5624 GenerateOsrPrologue(); | 5626 GenerateOsrPrologue(); |
| 5625 } | 5627 } |
| 5626 | 5628 |
| 5627 | 5629 |
| 5628 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5630 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5629 DCHECK(ToRegister(instr->context()).is(esi)); | 5631 DCHECK(ToRegister(instr->context()).is(esi)); |
| 5630 __ cmp(eax, isolate()->factory()->undefined_value()); | 5632 __ cmp(eax, isolate()->factory()->undefined_value()); |
| 5631 DeoptimizeIf(equal, instr, "undefined"); | 5633 DeoptimizeIf(equal, instr, Deoptimizer::kUndefined); |
| 5632 | 5634 |
| 5633 __ cmp(eax, isolate()->factory()->null_value()); | 5635 __ cmp(eax, isolate()->factory()->null_value()); |
| 5634 DeoptimizeIf(equal, instr, "null"); | 5636 DeoptimizeIf(equal, instr, Deoptimizer::kNull); |
| 5635 | 5637 |
| 5636 __ test(eax, Immediate(kSmiTagMask)); | 5638 __ test(eax, Immediate(kSmiTagMask)); |
| 5637 DeoptimizeIf(zero, instr, "Smi"); | 5639 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
| 5638 | 5640 |
| 5639 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5641 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5640 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); | 5642 __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); |
| 5641 DeoptimizeIf(below_equal, instr, "wrong instance type"); | 5643 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType); |
| 5642 | 5644 |
| 5643 Label use_cache, call_runtime; | 5645 Label use_cache, call_runtime; |
| 5644 __ CheckEnumCache(&call_runtime); | 5646 __ CheckEnumCache(&call_runtime); |
| 5645 | 5647 |
| 5646 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); | 5648 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); |
| 5647 __ jmp(&use_cache, Label::kNear); | 5649 __ jmp(&use_cache, Label::kNear); |
| 5648 | 5650 |
| 5649 // Get the set of properties to enumerate. | 5651 // Get the set of properties to enumerate. |
| 5650 __ bind(&call_runtime); | 5652 __ bind(&call_runtime); |
| 5651 __ push(eax); | 5653 __ push(eax); |
| 5652 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5654 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5653 | 5655 |
| 5654 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), | 5656 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), |
| 5655 isolate()->factory()->meta_map()); | 5657 isolate()->factory()->meta_map()); |
| 5656 DeoptimizeIf(not_equal, instr, "wrong map"); | 5658 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5657 __ bind(&use_cache); | 5659 __ bind(&use_cache); |
| 5658 } | 5660 } |
| 5659 | 5661 |
| 5660 | 5662 |
| 5661 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5663 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5662 Register map = ToRegister(instr->map()); | 5664 Register map = ToRegister(instr->map()); |
| 5663 Register result = ToRegister(instr->result()); | 5665 Register result = ToRegister(instr->result()); |
| 5664 Label load_cache, done; | 5666 Label load_cache, done; |
| 5665 __ EnumLength(result, map); | 5667 __ EnumLength(result, map); |
| 5666 __ cmp(result, Immediate(Smi::FromInt(0))); | 5668 __ cmp(result, Immediate(Smi::FromInt(0))); |
| 5667 __ j(not_equal, &load_cache, Label::kNear); | 5669 __ j(not_equal, &load_cache, Label::kNear); |
| 5668 __ mov(result, isolate()->factory()->empty_fixed_array()); | 5670 __ mov(result, isolate()->factory()->empty_fixed_array()); |
| 5669 __ jmp(&done, Label::kNear); | 5671 __ jmp(&done, Label::kNear); |
| 5670 | 5672 |
| 5671 __ bind(&load_cache); | 5673 __ bind(&load_cache); |
| 5672 __ LoadInstanceDescriptors(map, result); | 5674 __ LoadInstanceDescriptors(map, result); |
| 5673 __ mov(result, | 5675 __ mov(result, |
| 5674 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); | 5676 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5675 __ mov(result, | 5677 __ mov(result, |
| 5676 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); | 5678 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5677 __ bind(&done); | 5679 __ bind(&done); |
| 5678 __ test(result, result); | 5680 __ test(result, result); |
| 5679 DeoptimizeIf(equal, instr, "no cache"); | 5681 DeoptimizeIf(equal, instr, Deoptimizer::kNoCache); |
| 5680 } | 5682 } |
| 5681 | 5683 |
| 5682 | 5684 |
| 5683 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5685 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5684 Register object = ToRegister(instr->value()); | 5686 Register object = ToRegister(instr->value()); |
| 5685 __ cmp(ToRegister(instr->map()), | 5687 __ cmp(ToRegister(instr->map()), |
| 5686 FieldOperand(object, HeapObject::kMapOffset)); | 5688 FieldOperand(object, HeapObject::kMapOffset)); |
| 5687 DeoptimizeIf(not_equal, instr, "wrong map"); | 5689 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5688 } | 5690 } |
| 5689 | 5691 |
| 5690 | 5692 |
| 5691 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5693 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5692 Register object, | 5694 Register object, |
| 5693 Register index) { | 5695 Register index) { |
| 5694 PushSafepointRegistersScope scope(this); | 5696 PushSafepointRegistersScope scope(this); |
| 5695 __ push(object); | 5697 __ push(object); |
| 5696 __ push(index); | 5698 __ push(index); |
| 5697 __ xor_(esi, esi); | 5699 __ xor_(esi, esi); |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5772 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5774 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5773 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5775 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5774 } | 5776 } |
| 5775 | 5777 |
| 5776 | 5778 |
| 5777 #undef __ | 5779 #undef __ |
| 5778 | 5780 |
| 5779 } } // namespace v8::internal | 5781 } } // namespace v8::internal |
| 5780 | 5782 |
| 5781 #endif // V8_TARGET_ARCH_IA32 | 5783 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |