| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/crankshaft/arm/lithium-codegen-arm.h" | 5 #include "src/crankshaft/arm/lithium-codegen-arm.h" |
| 6 | 6 |
| 7 #include "src/base/bits.h" | 7 #include "src/base/bits.h" |
| 8 #include "src/code-factory.h" | 8 #include "src/code-factory.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/crankshaft/arm/lithium-gap-resolver-arm.h" | 10 #include "src/crankshaft/arm/lithium-gap-resolver-arm.h" |
| (...skipping 751 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 762 WriteTranslation(environment, &translation); | 762 WriteTranslation(environment, &translation); |
| 763 int deoptimization_index = deoptimizations_.length(); | 763 int deoptimization_index = deoptimizations_.length(); |
| 764 int pc_offset = masm()->pc_offset(); | 764 int pc_offset = masm()->pc_offset(); |
| 765 environment->Register(deoptimization_index, | 765 environment->Register(deoptimization_index, |
| 766 translation.index(), | 766 translation.index(), |
| 767 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 767 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 768 deoptimizations_.Add(environment, zone()); | 768 deoptimizations_.Add(environment, zone()); |
| 769 } | 769 } |
| 770 } | 770 } |
| 771 | 771 |
| 772 | |
| 773 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 772 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 774 Deoptimizer::DeoptReason deopt_reason, | 773 DeoptimizeReason deopt_reason, |
| 775 Deoptimizer::BailoutType bailout_type) { | 774 Deoptimizer::BailoutType bailout_type) { |
| 776 LEnvironment* environment = instr->environment(); | 775 LEnvironment* environment = instr->environment(); |
| 777 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 776 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 778 DCHECK(environment->HasBeenRegistered()); | 777 DCHECK(environment->HasBeenRegistered()); |
| 779 int id = environment->deoptimization_index(); | 778 int id = environment->deoptimization_index(); |
| 780 Address entry = | 779 Address entry = |
| 781 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 780 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 782 if (entry == NULL) { | 781 if (entry == NULL) { |
| 783 Abort(kBailoutWasNotPrepared); | 782 Abort(kBailoutWasNotPrepared); |
| 784 return; | 783 return; |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 839 // jump entry if this is the case. | 838 // jump entry if this is the case. |
| 840 if (FLAG_trace_deopt || isolate()->is_profiling() || | 839 if (FLAG_trace_deopt || isolate()->is_profiling() || |
| 841 jump_table_.is_empty() || | 840 jump_table_.is_empty() || |
| 842 !table_entry.IsEquivalentTo(jump_table_.last())) { | 841 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 843 jump_table_.Add(table_entry, zone()); | 842 jump_table_.Add(table_entry, zone()); |
| 844 } | 843 } |
| 845 __ b(condition, &jump_table_.last().label); | 844 __ b(condition, &jump_table_.last().label); |
| 846 } | 845 } |
| 847 } | 846 } |
| 848 | 847 |
| 849 | |
| 850 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 848 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 851 Deoptimizer::DeoptReason deopt_reason) { | 849 DeoptimizeReason deopt_reason) { |
| 852 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 850 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 853 ? Deoptimizer::LAZY | 851 ? Deoptimizer::LAZY |
| 854 : Deoptimizer::EAGER; | 852 : Deoptimizer::EAGER; |
| 855 DeoptimizeIf(condition, instr, deopt_reason, bailout_type); | 853 DeoptimizeIf(condition, instr, deopt_reason, bailout_type); |
| 856 } | 854 } |
| 857 | 855 |
| 858 | 856 |
| 859 void LCodeGen::RecordSafepointWithLazyDeopt( | 857 void LCodeGen::RecordSafepointWithLazyDeopt( |
| 860 LInstruction* instr, SafepointMode safepoint_mode) { | 858 LInstruction* instr, SafepointMode safepoint_mode) { |
| 861 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 859 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 974 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 972 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 975 Label dividend_is_not_negative, done; | 973 Label dividend_is_not_negative, done; |
| 976 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 974 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 977 __ cmp(dividend, Operand::Zero()); | 975 __ cmp(dividend, Operand::Zero()); |
| 978 __ b(pl, ÷nd_is_not_negative); | 976 __ b(pl, ÷nd_is_not_negative); |
| 979 // Note that this is correct even for kMinInt operands. | 977 // Note that this is correct even for kMinInt operands. |
| 980 __ rsb(dividend, dividend, Operand::Zero()); | 978 __ rsb(dividend, dividend, Operand::Zero()); |
| 981 __ and_(dividend, dividend, Operand(mask)); | 979 __ and_(dividend, dividend, Operand(mask)); |
| 982 __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 980 __ rsb(dividend, dividend, Operand::Zero(), SetCC); |
| 983 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 981 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 984 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 982 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 985 } | 983 } |
| 986 __ b(&done); | 984 __ b(&done); |
| 987 } | 985 } |
| 988 | 986 |
| 989 __ bind(÷nd_is_not_negative); | 987 __ bind(÷nd_is_not_negative); |
| 990 __ and_(dividend, dividend, Operand(mask)); | 988 __ and_(dividend, dividend, Operand(mask)); |
| 991 __ bind(&done); | 989 __ bind(&done); |
| 992 } | 990 } |
| 993 | 991 |
| 994 | 992 |
| 995 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 993 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 996 Register dividend = ToRegister(instr->dividend()); | 994 Register dividend = ToRegister(instr->dividend()); |
| 997 int32_t divisor = instr->divisor(); | 995 int32_t divisor = instr->divisor(); |
| 998 Register result = ToRegister(instr->result()); | 996 Register result = ToRegister(instr->result()); |
| 999 DCHECK(!dividend.is(result)); | 997 DCHECK(!dividend.is(result)); |
| 1000 | 998 |
| 1001 if (divisor == 0) { | 999 if (divisor == 0) { |
| 1002 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1000 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1003 return; | 1001 return; |
| 1004 } | 1002 } |
| 1005 | 1003 |
| 1006 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1004 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1007 __ mov(ip, Operand(Abs(divisor))); | 1005 __ mov(ip, Operand(Abs(divisor))); |
| 1008 __ smull(result, ip, result, ip); | 1006 __ smull(result, ip, result, ip); |
| 1009 __ sub(result, dividend, result, SetCC); | 1007 __ sub(result, dividend, result, SetCC); |
| 1010 | 1008 |
| 1011 // Check for negative zero. | 1009 // Check for negative zero. |
| 1012 HMod* hmod = instr->hydrogen(); | 1010 HMod* hmod = instr->hydrogen(); |
| 1013 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1011 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1014 Label remainder_not_zero; | 1012 Label remainder_not_zero; |
| 1015 __ b(ne, &remainder_not_zero); | 1013 __ b(ne, &remainder_not_zero); |
| 1016 __ cmp(dividend, Operand::Zero()); | 1014 __ cmp(dividend, Operand::Zero()); |
| 1017 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1015 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1018 __ bind(&remainder_not_zero); | 1016 __ bind(&remainder_not_zero); |
| 1019 } | 1017 } |
| 1020 } | 1018 } |
| 1021 | 1019 |
| 1022 | 1020 |
| 1023 void LCodeGen::DoModI(LModI* instr) { | 1021 void LCodeGen::DoModI(LModI* instr) { |
| 1024 HMod* hmod = instr->hydrogen(); | 1022 HMod* hmod = instr->hydrogen(); |
| 1025 if (CpuFeatures::IsSupported(SUDIV)) { | 1023 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1026 CpuFeatureScope scope(masm(), SUDIV); | 1024 CpuFeatureScope scope(masm(), SUDIV); |
| 1027 | 1025 |
| 1028 Register left_reg = ToRegister(instr->left()); | 1026 Register left_reg = ToRegister(instr->left()); |
| 1029 Register right_reg = ToRegister(instr->right()); | 1027 Register right_reg = ToRegister(instr->right()); |
| 1030 Register result_reg = ToRegister(instr->result()); | 1028 Register result_reg = ToRegister(instr->result()); |
| 1031 | 1029 |
| 1032 Label done; | 1030 Label done; |
| 1033 // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 1031 // Check for x % 0, sdiv might signal an exception. We have to deopt in this |
| 1034 // case because we can't return a NaN. | 1032 // case because we can't return a NaN. |
| 1035 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1033 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1036 __ cmp(right_reg, Operand::Zero()); | 1034 __ cmp(right_reg, Operand::Zero()); |
| 1037 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1035 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1038 } | 1036 } |
| 1039 | 1037 |
| 1040 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 1038 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
| 1041 // want. We have to deopt if we care about -0, because we can't return that. | 1039 // want. We have to deopt if we care about -0, because we can't return that. |
| 1042 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1040 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1043 Label no_overflow_possible; | 1041 Label no_overflow_possible; |
| 1044 __ cmp(left_reg, Operand(kMinInt)); | 1042 __ cmp(left_reg, Operand(kMinInt)); |
| 1045 __ b(ne, &no_overflow_possible); | 1043 __ b(ne, &no_overflow_possible); |
| 1046 __ cmp(right_reg, Operand(-1)); | 1044 __ cmp(right_reg, Operand(-1)); |
| 1047 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1045 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1048 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1046 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1049 } else { | 1047 } else { |
| 1050 __ b(ne, &no_overflow_possible); | 1048 __ b(ne, &no_overflow_possible); |
| 1051 __ mov(result_reg, Operand::Zero()); | 1049 __ mov(result_reg, Operand::Zero()); |
| 1052 __ jmp(&done); | 1050 __ jmp(&done); |
| 1053 } | 1051 } |
| 1054 __ bind(&no_overflow_possible); | 1052 __ bind(&no_overflow_possible); |
| 1055 } | 1053 } |
| 1056 | 1054 |
| 1057 // For 'r3 = r1 % r2' we can have the following ARM code: | 1055 // For 'r3 = r1 % r2' we can have the following ARM code: |
| 1058 // sdiv r3, r1, r2 | 1056 // sdiv r3, r1, r2 |
| 1059 // mls r3, r3, r2, r1 | 1057 // mls r3, r3, r2, r1 |
| 1060 | 1058 |
| 1061 __ sdiv(result_reg, left_reg, right_reg); | 1059 __ sdiv(result_reg, left_reg, right_reg); |
| 1062 __ Mls(result_reg, result_reg, right_reg, left_reg); | 1060 __ Mls(result_reg, result_reg, right_reg, left_reg); |
| 1063 | 1061 |
| 1064 // If we care about -0, test if the dividend is <0 and the result is 0. | 1062 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1065 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1063 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1066 __ cmp(result_reg, Operand::Zero()); | 1064 __ cmp(result_reg, Operand::Zero()); |
| 1067 __ b(ne, &done); | 1065 __ b(ne, &done); |
| 1068 __ cmp(left_reg, Operand::Zero()); | 1066 __ cmp(left_reg, Operand::Zero()); |
| 1069 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1067 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); |
| 1070 } | 1068 } |
| 1071 __ bind(&done); | 1069 __ bind(&done); |
| 1072 | 1070 |
| 1073 } else { | 1071 } else { |
| 1074 // General case, without any SDIV support. | 1072 // General case, without any SDIV support. |
| 1075 Register left_reg = ToRegister(instr->left()); | 1073 Register left_reg = ToRegister(instr->left()); |
| 1076 Register right_reg = ToRegister(instr->right()); | 1074 Register right_reg = ToRegister(instr->right()); |
| 1077 Register result_reg = ToRegister(instr->result()); | 1075 Register result_reg = ToRegister(instr->result()); |
| 1078 Register scratch = scratch0(); | 1076 Register scratch = scratch0(); |
| 1079 DCHECK(!scratch.is(left_reg)); | 1077 DCHECK(!scratch.is(left_reg)); |
| 1080 DCHECK(!scratch.is(right_reg)); | 1078 DCHECK(!scratch.is(right_reg)); |
| 1081 DCHECK(!scratch.is(result_reg)); | 1079 DCHECK(!scratch.is(result_reg)); |
| 1082 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1080 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
| 1083 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1081 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
| 1084 DCHECK(!divisor.is(dividend)); | 1082 DCHECK(!divisor.is(dividend)); |
| 1085 LowDwVfpRegister quotient = double_scratch0(); | 1083 LowDwVfpRegister quotient = double_scratch0(); |
| 1086 DCHECK(!quotient.is(dividend)); | 1084 DCHECK(!quotient.is(dividend)); |
| 1087 DCHECK(!quotient.is(divisor)); | 1085 DCHECK(!quotient.is(divisor)); |
| 1088 | 1086 |
| 1089 Label done; | 1087 Label done; |
| 1090 // Check for x % 0, we have to deopt in this case because we can't return a | 1088 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1091 // NaN. | 1089 // NaN. |
| 1092 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1090 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1093 __ cmp(right_reg, Operand::Zero()); | 1091 __ cmp(right_reg, Operand::Zero()); |
| 1094 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1092 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1095 } | 1093 } |
| 1096 | 1094 |
| 1097 __ Move(result_reg, left_reg); | 1095 __ Move(result_reg, left_reg); |
| 1098 // Load the arguments in VFP registers. The divisor value is preloaded | 1096 // Load the arguments in VFP registers. The divisor value is preloaded |
| 1099 // before. Be careful that 'right_reg' is only live on entry. | 1097 // before. Be careful that 'right_reg' is only live on entry. |
| 1100 // TODO(svenpanne) The last comments seems to be wrong nowadays. | 1098 // TODO(svenpanne) The last comments seems to be wrong nowadays. |
| 1101 __ vmov(double_scratch0().low(), left_reg); | 1099 __ vmov(double_scratch0().low(), left_reg); |
| 1102 __ vcvt_f64_s32(dividend, double_scratch0().low()); | 1100 __ vcvt_f64_s32(dividend, double_scratch0().low()); |
| 1103 __ vmov(double_scratch0().low(), right_reg); | 1101 __ vmov(double_scratch0().low(), right_reg); |
| 1104 __ vcvt_f64_s32(divisor, double_scratch0().low()); | 1102 __ vcvt_f64_s32(divisor, double_scratch0().low()); |
| 1105 | 1103 |
| 1106 // We do not care about the sign of the divisor. Note that we still handle | 1104 // We do not care about the sign of the divisor. Note that we still handle |
| 1107 // the kMinInt % -1 case correctly, though. | 1105 // the kMinInt % -1 case correctly, though. |
| 1108 __ vabs(divisor, divisor); | 1106 __ vabs(divisor, divisor); |
| 1109 // Compute the quotient and round it to a 32bit integer. | 1107 // Compute the quotient and round it to a 32bit integer. |
| 1110 __ vdiv(quotient, dividend, divisor); | 1108 __ vdiv(quotient, dividend, divisor); |
| 1111 __ vcvt_s32_f64(quotient.low(), quotient); | 1109 __ vcvt_s32_f64(quotient.low(), quotient); |
| 1112 __ vcvt_f64_s32(quotient, quotient.low()); | 1110 __ vcvt_f64_s32(quotient, quotient.low()); |
| 1113 | 1111 |
| 1114 // Compute the remainder in result. | 1112 // Compute the remainder in result. |
| 1115 __ vmul(double_scratch0(), divisor, quotient); | 1113 __ vmul(double_scratch0(), divisor, quotient); |
| 1116 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 1114 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
| 1117 __ vmov(scratch, double_scratch0().low()); | 1115 __ vmov(scratch, double_scratch0().low()); |
| 1118 __ sub(result_reg, left_reg, scratch, SetCC); | 1116 __ sub(result_reg, left_reg, scratch, SetCC); |
| 1119 | 1117 |
| 1120 // If we care about -0, test if the dividend is <0 and the result is 0. | 1118 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1121 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1119 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1122 __ b(ne, &done); | 1120 __ b(ne, &done); |
| 1123 __ cmp(left_reg, Operand::Zero()); | 1121 __ cmp(left_reg, Operand::Zero()); |
| 1124 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 1122 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); |
| 1125 } | 1123 } |
| 1126 __ bind(&done); | 1124 __ bind(&done); |
| 1127 } | 1125 } |
| 1128 } | 1126 } |
| 1129 | 1127 |
| 1130 | 1128 |
| 1131 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1129 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1132 Register dividend = ToRegister(instr->dividend()); | 1130 Register dividend = ToRegister(instr->dividend()); |
| 1133 int32_t divisor = instr->divisor(); | 1131 int32_t divisor = instr->divisor(); |
| 1134 Register result = ToRegister(instr->result()); | 1132 Register result = ToRegister(instr->result()); |
| 1135 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1133 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1136 DCHECK(!result.is(dividend)); | 1134 DCHECK(!result.is(dividend)); |
| 1137 | 1135 |
| 1138 // Check for (0 / -x) that will produce negative zero. | 1136 // Check for (0 / -x) that will produce negative zero. |
| 1139 HDiv* hdiv = instr->hydrogen(); | 1137 HDiv* hdiv = instr->hydrogen(); |
| 1140 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1138 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1141 __ cmp(dividend, Operand::Zero()); | 1139 __ cmp(dividend, Operand::Zero()); |
| 1142 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1140 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1143 } | 1141 } |
| 1144 // Check for (kMinInt / -1). | 1142 // Check for (kMinInt / -1). |
| 1145 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1143 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1146 __ cmp(dividend, Operand(kMinInt)); | 1144 __ cmp(dividend, Operand(kMinInt)); |
| 1147 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1145 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 1148 } | 1146 } |
| 1149 // Deoptimize if remainder will not be 0. | 1147 // Deoptimize if remainder will not be 0. |
| 1150 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1148 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1151 divisor != 1 && divisor != -1) { | 1149 divisor != 1 && divisor != -1) { |
| 1152 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1150 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1153 __ tst(dividend, Operand(mask)); | 1151 __ tst(dividend, Operand(mask)); |
| 1154 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1152 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); |
| 1155 } | 1153 } |
| 1156 | 1154 |
| 1157 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1155 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1158 __ rsb(result, dividend, Operand(0)); | 1156 __ rsb(result, dividend, Operand(0)); |
| 1159 return; | 1157 return; |
| 1160 } | 1158 } |
| 1161 int32_t shift = WhichPowerOf2Abs(divisor); | 1159 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1162 if (shift == 0) { | 1160 if (shift == 0) { |
| 1163 __ mov(result, dividend); | 1161 __ mov(result, dividend); |
| 1164 } else if (shift == 1) { | 1162 } else if (shift == 1) { |
| 1165 __ add(result, dividend, Operand(dividend, LSR, 31)); | 1163 __ add(result, dividend, Operand(dividend, LSR, 31)); |
| 1166 } else { | 1164 } else { |
| 1167 __ mov(result, Operand(dividend, ASR, 31)); | 1165 __ mov(result, Operand(dividend, ASR, 31)); |
| 1168 __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 1166 __ add(result, dividend, Operand(result, LSR, 32 - shift)); |
| 1169 } | 1167 } |
| 1170 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1168 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
| 1171 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1169 if (divisor < 0) __ rsb(result, result, Operand(0)); |
| 1172 } | 1170 } |
| 1173 | 1171 |
| 1174 | 1172 |
| 1175 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1173 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1176 Register dividend = ToRegister(instr->dividend()); | 1174 Register dividend = ToRegister(instr->dividend()); |
| 1177 int32_t divisor = instr->divisor(); | 1175 int32_t divisor = instr->divisor(); |
| 1178 Register result = ToRegister(instr->result()); | 1176 Register result = ToRegister(instr->result()); |
| 1179 DCHECK(!dividend.is(result)); | 1177 DCHECK(!dividend.is(result)); |
| 1180 | 1178 |
| 1181 if (divisor == 0) { | 1179 if (divisor == 0) { |
| 1182 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1180 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1183 return; | 1181 return; |
| 1184 } | 1182 } |
| 1185 | 1183 |
| 1186 // Check for (0 / -x) that will produce negative zero. | 1184 // Check for (0 / -x) that will produce negative zero. |
| 1187 HDiv* hdiv = instr->hydrogen(); | 1185 HDiv* hdiv = instr->hydrogen(); |
| 1188 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1189 __ cmp(dividend, Operand::Zero()); | 1187 __ cmp(dividend, Operand::Zero()); |
| 1190 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1188 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1191 } | 1189 } |
| 1192 | 1190 |
| 1193 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1191 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1194 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1192 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1195 | 1193 |
| 1196 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1194 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1197 __ mov(ip, Operand(divisor)); | 1195 __ mov(ip, Operand(divisor)); |
| 1198 __ smull(scratch0(), ip, result, ip); | 1196 __ smull(scratch0(), ip, result, ip); |
| 1199 __ sub(scratch0(), scratch0(), dividend, SetCC); | 1197 __ sub(scratch0(), scratch0(), dividend, SetCC); |
| 1200 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1198 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); |
| 1201 } | 1199 } |
| 1202 } | 1200 } |
| 1203 | 1201 |
| 1204 | 1202 |
| 1205 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1203 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1206 void LCodeGen::DoDivI(LDivI* instr) { | 1204 void LCodeGen::DoDivI(LDivI* instr) { |
| 1207 HBinaryOperation* hdiv = instr->hydrogen(); | 1205 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1208 Register dividend = ToRegister(instr->dividend()); | 1206 Register dividend = ToRegister(instr->dividend()); |
| 1209 Register divisor = ToRegister(instr->divisor()); | 1207 Register divisor = ToRegister(instr->divisor()); |
| 1210 Register result = ToRegister(instr->result()); | 1208 Register result = ToRegister(instr->result()); |
| 1211 | 1209 |
| 1212 // Check for x / 0. | 1210 // Check for x / 0. |
| 1213 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1211 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1214 __ cmp(divisor, Operand::Zero()); | 1212 __ cmp(divisor, Operand::Zero()); |
| 1215 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1213 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1216 } | 1214 } |
| 1217 | 1215 |
| 1218 // Check for (0 / -x) that will produce negative zero. | 1216 // Check for (0 / -x) that will produce negative zero. |
| 1219 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1217 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1220 Label positive; | 1218 Label positive; |
| 1221 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1219 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1222 // Do the test only if it hadn't be done above. | 1220 // Do the test only if it hadn't be done above. |
| 1223 __ cmp(divisor, Operand::Zero()); | 1221 __ cmp(divisor, Operand::Zero()); |
| 1224 } | 1222 } |
| 1225 __ b(pl, &positive); | 1223 __ b(pl, &positive); |
| 1226 __ cmp(dividend, Operand::Zero()); | 1224 __ cmp(dividend, Operand::Zero()); |
| 1227 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1225 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1228 __ bind(&positive); | 1226 __ bind(&positive); |
| 1229 } | 1227 } |
| 1230 | 1228 |
| 1231 // Check for (kMinInt / -1). | 1229 // Check for (kMinInt / -1). |
| 1232 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1230 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1233 (!CpuFeatures::IsSupported(SUDIV) || | 1231 (!CpuFeatures::IsSupported(SUDIV) || |
| 1234 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1232 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1235 // We don't need to check for overflow when truncating with sdiv | 1233 // We don't need to check for overflow when truncating with sdiv |
| 1236 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1234 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1237 __ cmp(dividend, Operand(kMinInt)); | 1235 __ cmp(dividend, Operand(kMinInt)); |
| 1238 __ cmp(divisor, Operand(-1), eq); | 1236 __ cmp(divisor, Operand(-1), eq); |
| 1239 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1237 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 1240 } | 1238 } |
| 1241 | 1239 |
| 1242 if (CpuFeatures::IsSupported(SUDIV)) { | 1240 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1243 CpuFeatureScope scope(masm(), SUDIV); | 1241 CpuFeatureScope scope(masm(), SUDIV); |
| 1244 __ sdiv(result, dividend, divisor); | 1242 __ sdiv(result, dividend, divisor); |
| 1245 } else { | 1243 } else { |
| 1246 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1244 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1247 DoubleRegister vright = double_scratch0(); | 1245 DoubleRegister vright = double_scratch0(); |
| 1248 __ vmov(double_scratch0().low(), dividend); | 1246 __ vmov(double_scratch0().low(), dividend); |
| 1249 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1247 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| 1250 __ vmov(double_scratch0().low(), divisor); | 1248 __ vmov(double_scratch0().low(), divisor); |
| 1251 __ vcvt_f64_s32(vright, double_scratch0().low()); | 1249 __ vcvt_f64_s32(vright, double_scratch0().low()); |
| 1252 __ vdiv(vleft, vleft, vright); // vleft now contains the result. | 1250 __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
| 1253 __ vcvt_s32_f64(double_scratch0().low(), vleft); | 1251 __ vcvt_s32_f64(double_scratch0().low(), vleft); |
| 1254 __ vmov(result, double_scratch0().low()); | 1252 __ vmov(result, double_scratch0().low()); |
| 1255 } | 1253 } |
| 1256 | 1254 |
| 1257 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1255 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1258 // Compute remainder and deopt if it's not zero. | 1256 // Compute remainder and deopt if it's not zero. |
| 1259 Register remainder = scratch0(); | 1257 Register remainder = scratch0(); |
| 1260 __ Mls(remainder, result, divisor, dividend); | 1258 __ Mls(remainder, result, divisor, dividend); |
| 1261 __ cmp(remainder, Operand::Zero()); | 1259 __ cmp(remainder, Operand::Zero()); |
| 1262 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1260 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); |
| 1263 } | 1261 } |
| 1264 } | 1262 } |
| 1265 | 1263 |
| 1266 | 1264 |
| 1267 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1265 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1268 DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1266 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
| 1269 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1267 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1270 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1268 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1271 | 1269 |
| 1272 // This is computed in-place. | 1270 // This is computed in-place. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1303 // can simply do an arithmetic right shift. | 1301 // can simply do an arithmetic right shift. |
| 1304 int32_t shift = WhichPowerOf2Abs(divisor); | 1302 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1305 if (divisor > 1) { | 1303 if (divisor > 1) { |
| 1306 __ mov(result, Operand(dividend, ASR, shift)); | 1304 __ mov(result, Operand(dividend, ASR, shift)); |
| 1307 return; | 1305 return; |
| 1308 } | 1306 } |
| 1309 | 1307 |
| 1310 // If the divisor is negative, we have to negate and handle edge cases. | 1308 // If the divisor is negative, we have to negate and handle edge cases. |
| 1311 __ rsb(result, dividend, Operand::Zero(), SetCC); | 1309 __ rsb(result, dividend, Operand::Zero(), SetCC); |
| 1312 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1310 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1313 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1311 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1314 } | 1312 } |
| 1315 | 1313 |
| 1316 // Dividing by -1 is basically negation, unless we overflow. | 1314 // Dividing by -1 is basically negation, unless we overflow. |
| 1317 if (divisor == -1) { | 1315 if (divisor == -1) { |
| 1318 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1316 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1319 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1317 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1320 } | 1318 } |
| 1321 return; | 1319 return; |
| 1322 } | 1320 } |
| 1323 | 1321 |
| 1324 // If the negation could not overflow, simply shifting is OK. | 1322 // If the negation could not overflow, simply shifting is OK. |
| 1325 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1323 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1326 __ mov(result, Operand(result, ASR, shift)); | 1324 __ mov(result, Operand(result, ASR, shift)); |
| 1327 return; | 1325 return; |
| 1328 } | 1326 } |
| 1329 | 1327 |
| 1330 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1328 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
| 1331 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1329 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
| 1332 } | 1330 } |
| 1333 | 1331 |
| 1334 | 1332 |
| 1335 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1333 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1336 Register dividend = ToRegister(instr->dividend()); | 1334 Register dividend = ToRegister(instr->dividend()); |
| 1337 int32_t divisor = instr->divisor(); | 1335 int32_t divisor = instr->divisor(); |
| 1338 Register result = ToRegister(instr->result()); | 1336 Register result = ToRegister(instr->result()); |
| 1339 DCHECK(!dividend.is(result)); | 1337 DCHECK(!dividend.is(result)); |
| 1340 | 1338 |
| 1341 if (divisor == 0) { | 1339 if (divisor == 0) { |
| 1342 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1340 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1343 return; | 1341 return; |
| 1344 } | 1342 } |
| 1345 | 1343 |
| 1346 // Check for (0 / -x) that will produce negative zero. | 1344 // Check for (0 / -x) that will produce negative zero. |
| 1347 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1345 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1348 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1346 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1349 __ cmp(dividend, Operand::Zero()); | 1347 __ cmp(dividend, Operand::Zero()); |
| 1350 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1348 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1351 } | 1349 } |
| 1352 | 1350 |
| 1353 // Easy case: We need no dynamic check for the dividend and the flooring | 1351 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1354 // division is the same as the truncating division. | 1352 // division is the same as the truncating division. |
| 1355 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1353 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1356 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1354 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1357 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1355 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1358 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1356 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1359 return; | 1357 return; |
| 1360 } | 1358 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1381 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 1379 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. |
| 1382 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 1380 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
| 1383 HBinaryOperation* hdiv = instr->hydrogen(); | 1381 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1384 Register left = ToRegister(instr->dividend()); | 1382 Register left = ToRegister(instr->dividend()); |
| 1385 Register right = ToRegister(instr->divisor()); | 1383 Register right = ToRegister(instr->divisor()); |
| 1386 Register result = ToRegister(instr->result()); | 1384 Register result = ToRegister(instr->result()); |
| 1387 | 1385 |
| 1388 // Check for x / 0. | 1386 // Check for x / 0. |
| 1389 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1387 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1390 __ cmp(right, Operand::Zero()); | 1388 __ cmp(right, Operand::Zero()); |
| 1391 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1389 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); |
| 1392 } | 1390 } |
| 1393 | 1391 |
| 1394 // Check for (0 / -x) that will produce negative zero. | 1392 // Check for (0 / -x) that will produce negative zero. |
| 1395 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1393 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1396 Label positive; | 1394 Label positive; |
| 1397 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1395 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1398 // Do the test only if it hadn't be done above. | 1396 // Do the test only if it hadn't be done above. |
| 1399 __ cmp(right, Operand::Zero()); | 1397 __ cmp(right, Operand::Zero()); |
| 1400 } | 1398 } |
| 1401 __ b(pl, &positive); | 1399 __ b(pl, &positive); |
| 1402 __ cmp(left, Operand::Zero()); | 1400 __ cmp(left, Operand::Zero()); |
| 1403 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1401 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1404 __ bind(&positive); | 1402 __ bind(&positive); |
| 1405 } | 1403 } |
| 1406 | 1404 |
| 1407 // Check for (kMinInt / -1). | 1405 // Check for (kMinInt / -1). |
| 1408 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1406 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1409 (!CpuFeatures::IsSupported(SUDIV) || | 1407 (!CpuFeatures::IsSupported(SUDIV) || |
| 1410 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1408 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1411 // We don't need to check for overflow when truncating with sdiv | 1409 // We don't need to check for overflow when truncating with sdiv |
| 1412 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1410 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1413 __ cmp(left, Operand(kMinInt)); | 1411 __ cmp(left, Operand(kMinInt)); |
| 1414 __ cmp(right, Operand(-1), eq); | 1412 __ cmp(right, Operand(-1), eq); |
| 1415 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1413 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); |
| 1416 } | 1414 } |
| 1417 | 1415 |
| 1418 if (CpuFeatures::IsSupported(SUDIV)) { | 1416 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1419 CpuFeatureScope scope(masm(), SUDIV); | 1417 CpuFeatureScope scope(masm(), SUDIV); |
| 1420 __ sdiv(result, left, right); | 1418 __ sdiv(result, left, right); |
| 1421 } else { | 1419 } else { |
| 1422 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1420 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1423 DoubleRegister vright = double_scratch0(); | 1421 DoubleRegister vright = double_scratch0(); |
| 1424 __ vmov(double_scratch0().low(), left); | 1422 __ vmov(double_scratch0().low(), left); |
| 1425 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1423 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1451 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1449 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1452 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1450 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1453 | 1451 |
| 1454 if (right_op->IsConstantOperand()) { | 1452 if (right_op->IsConstantOperand()) { |
| 1455 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1453 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1456 | 1454 |
| 1457 if (bailout_on_minus_zero && (constant < 0)) { | 1455 if (bailout_on_minus_zero && (constant < 0)) { |
| 1458 // The case of a null constant will be handled separately. | 1456 // The case of a null constant will be handled separately. |
| 1459 // If constant is negative and left is null, the result should be -0. | 1457 // If constant is negative and left is null, the result should be -0. |
| 1460 __ cmp(left, Operand::Zero()); | 1458 __ cmp(left, Operand::Zero()); |
| 1461 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1459 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1462 } | 1460 } |
| 1463 | 1461 |
| 1464 switch (constant) { | 1462 switch (constant) { |
| 1465 case -1: | 1463 case -1: |
| 1466 if (overflow) { | 1464 if (overflow) { |
| 1467 __ rsb(result, left, Operand::Zero(), SetCC); | 1465 __ rsb(result, left, Operand::Zero(), SetCC); |
| 1468 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1466 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1469 } else { | 1467 } else { |
| 1470 __ rsb(result, left, Operand::Zero()); | 1468 __ rsb(result, left, Operand::Zero()); |
| 1471 } | 1469 } |
| 1472 break; | 1470 break; |
| 1473 case 0: | 1471 case 0: |
| 1474 if (bailout_on_minus_zero) { | 1472 if (bailout_on_minus_zero) { |
| 1475 // If left is strictly negative and the constant is null, the | 1473 // If left is strictly negative and the constant is null, the |
| 1476 // result is -0. Deoptimize if required, otherwise return 0. | 1474 // result is -0. Deoptimize if required, otherwise return 0. |
| 1477 __ cmp(left, Operand::Zero()); | 1475 __ cmp(left, Operand::Zero()); |
| 1478 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 1476 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); |
| 1479 } | 1477 } |
| 1480 __ mov(result, Operand::Zero()); | 1478 __ mov(result, Operand::Zero()); |
| 1481 break; | 1479 break; |
| 1482 case 1: | 1480 case 1: |
| 1483 __ Move(result, left); | 1481 __ Move(result, left); |
| 1484 break; | 1482 break; |
| 1485 default: | 1483 default: |
| 1486 // Multiplying by powers of two and powers of two plus or minus | 1484 // Multiplying by powers of two and powers of two plus or minus |
| 1487 // one can be done faster with shifted operands. | 1485 // one can be done faster with shifted operands. |
| 1488 // For other constants we emit standard code. | 1486 // For other constants we emit standard code. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1518 if (overflow) { | 1516 if (overflow) { |
| 1519 Register scratch = scratch0(); | 1517 Register scratch = scratch0(); |
| 1520 // scratch:result = left * right. | 1518 // scratch:result = left * right. |
| 1521 if (instr->hydrogen()->representation().IsSmi()) { | 1519 if (instr->hydrogen()->representation().IsSmi()) { |
| 1522 __ SmiUntag(result, left); | 1520 __ SmiUntag(result, left); |
| 1523 __ smull(result, scratch, result, right); | 1521 __ smull(result, scratch, result, right); |
| 1524 } else { | 1522 } else { |
| 1525 __ smull(result, scratch, left, right); | 1523 __ smull(result, scratch, left, right); |
| 1526 } | 1524 } |
| 1527 __ cmp(scratch, Operand(result, ASR, 31)); | 1525 __ cmp(scratch, Operand(result, ASR, 31)); |
| 1528 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1526 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 1529 } else { | 1527 } else { |
| 1530 if (instr->hydrogen()->representation().IsSmi()) { | 1528 if (instr->hydrogen()->representation().IsSmi()) { |
| 1531 __ SmiUntag(result, left); | 1529 __ SmiUntag(result, left); |
| 1532 __ mul(result, result, right); | 1530 __ mul(result, result, right); |
| 1533 } else { | 1531 } else { |
| 1534 __ mul(result, left, right); | 1532 __ mul(result, left, right); |
| 1535 } | 1533 } |
| 1536 } | 1534 } |
| 1537 | 1535 |
| 1538 if (bailout_on_minus_zero) { | 1536 if (bailout_on_minus_zero) { |
| 1539 Label done; | 1537 Label done; |
| 1540 __ teq(left, Operand(right)); | 1538 __ teq(left, Operand(right)); |
| 1541 __ b(pl, &done); | 1539 __ b(pl, &done); |
| 1542 // Bail out if the result is minus zero. | 1540 // Bail out if the result is minus zero. |
| 1543 __ cmp(result, Operand::Zero()); | 1541 __ cmp(result, Operand::Zero()); |
| 1544 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1542 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 1545 __ bind(&done); | 1543 __ bind(&done); |
| 1546 } | 1544 } |
| 1547 } | 1545 } |
| 1548 } | 1546 } |
| 1549 | 1547 |
| 1550 | 1548 |
| 1551 void LCodeGen::DoBitI(LBitI* instr) { | 1549 void LCodeGen::DoBitI(LBitI* instr) { |
| 1552 LOperand* left_op = instr->left(); | 1550 LOperand* left_op = instr->left(); |
| 1553 LOperand* right_op = instr->right(); | 1551 LOperand* right_op = instr->right(); |
| 1554 DCHECK(left_op->IsRegister()); | 1552 DCHECK(left_op->IsRegister()); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1597 switch (instr->op()) { | 1595 switch (instr->op()) { |
| 1598 case Token::ROR: | 1596 case Token::ROR: |
| 1599 __ mov(result, Operand(left, ROR, scratch)); | 1597 __ mov(result, Operand(left, ROR, scratch)); |
| 1600 break; | 1598 break; |
| 1601 case Token::SAR: | 1599 case Token::SAR: |
| 1602 __ mov(result, Operand(left, ASR, scratch)); | 1600 __ mov(result, Operand(left, ASR, scratch)); |
| 1603 break; | 1601 break; |
| 1604 case Token::SHR: | 1602 case Token::SHR: |
| 1605 if (instr->can_deopt()) { | 1603 if (instr->can_deopt()) { |
| 1606 __ mov(result, Operand(left, LSR, scratch), SetCC); | 1604 __ mov(result, Operand(left, LSR, scratch), SetCC); |
| 1607 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue); | 1605 DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue); |
| 1608 } else { | 1606 } else { |
| 1609 __ mov(result, Operand(left, LSR, scratch)); | 1607 __ mov(result, Operand(left, LSR, scratch)); |
| 1610 } | 1608 } |
| 1611 break; | 1609 break; |
| 1612 case Token::SHL: | 1610 case Token::SHL: |
| 1613 __ mov(result, Operand(left, LSL, scratch)); | 1611 __ mov(result, Operand(left, LSL, scratch)); |
| 1614 break; | 1612 break; |
| 1615 default: | 1613 default: |
| 1616 UNREACHABLE(); | 1614 UNREACHABLE(); |
| 1617 break; | 1615 break; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1634 } else { | 1632 } else { |
| 1635 __ Move(result, left); | 1633 __ Move(result, left); |
| 1636 } | 1634 } |
| 1637 break; | 1635 break; |
| 1638 case Token::SHR: | 1636 case Token::SHR: |
| 1639 if (shift_count != 0) { | 1637 if (shift_count != 0) { |
| 1640 __ mov(result, Operand(left, LSR, shift_count)); | 1638 __ mov(result, Operand(left, LSR, shift_count)); |
| 1641 } else { | 1639 } else { |
| 1642 if (instr->can_deopt()) { | 1640 if (instr->can_deopt()) { |
| 1643 __ tst(left, Operand(0x80000000)); | 1641 __ tst(left, Operand(0x80000000)); |
| 1644 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); | 1642 DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue); |
| 1645 } | 1643 } |
| 1646 __ Move(result, left); | 1644 __ Move(result, left); |
| 1647 } | 1645 } |
| 1648 break; | 1646 break; |
| 1649 case Token::SHL: | 1647 case Token::SHL: |
| 1650 if (shift_count != 0) { | 1648 if (shift_count != 0) { |
| 1651 if (instr->hydrogen_value()->representation().IsSmi() && | 1649 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1652 instr->can_deopt()) { | 1650 instr->can_deopt()) { |
| 1653 if (shift_count != 1) { | 1651 if (shift_count != 1) { |
| 1654 __ mov(result, Operand(left, LSL, shift_count - 1)); | 1652 __ mov(result, Operand(left, LSL, shift_count - 1)); |
| 1655 __ SmiTag(result, result, SetCC); | 1653 __ SmiTag(result, result, SetCC); |
| 1656 } else { | 1654 } else { |
| 1657 __ SmiTag(result, left, SetCC); | 1655 __ SmiTag(result, left, SetCC); |
| 1658 } | 1656 } |
| 1659 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1657 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1660 } else { | 1658 } else { |
| 1661 __ mov(result, Operand(left, LSL, shift_count)); | 1659 __ mov(result, Operand(left, LSL, shift_count)); |
| 1662 } | 1660 } |
| 1663 } else { | 1661 } else { |
| 1664 __ Move(result, left); | 1662 __ Move(result, left); |
| 1665 } | 1663 } |
| 1666 break; | 1664 break; |
| 1667 default: | 1665 default: |
| 1668 UNREACHABLE(); | 1666 UNREACHABLE(); |
| 1669 break; | 1667 break; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1681 | 1679 |
| 1682 if (right->IsStackSlot()) { | 1680 if (right->IsStackSlot()) { |
| 1683 Register right_reg = EmitLoadRegister(right, ip); | 1681 Register right_reg = EmitLoadRegister(right, ip); |
| 1684 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1682 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1685 } else { | 1683 } else { |
| 1686 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1684 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1687 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1685 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1688 } | 1686 } |
| 1689 | 1687 |
| 1690 if (can_overflow) { | 1688 if (can_overflow) { |
| 1691 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1689 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1692 } | 1690 } |
| 1693 } | 1691 } |
| 1694 | 1692 |
| 1695 | 1693 |
| 1696 void LCodeGen::DoRSubI(LRSubI* instr) { | 1694 void LCodeGen::DoRSubI(LRSubI* instr) { |
| 1697 LOperand* left = instr->left(); | 1695 LOperand* left = instr->left(); |
| 1698 LOperand* right = instr->right(); | 1696 LOperand* right = instr->right(); |
| 1699 LOperand* result = instr->result(); | 1697 LOperand* result = instr->result(); |
| 1700 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1698 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1701 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1699 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
| 1702 | 1700 |
| 1703 if (right->IsStackSlot()) { | 1701 if (right->IsStackSlot()) { |
| 1704 Register right_reg = EmitLoadRegister(right, ip); | 1702 Register right_reg = EmitLoadRegister(right, ip); |
| 1705 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1703 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1706 } else { | 1704 } else { |
| 1707 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1705 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1708 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1706 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1709 } | 1707 } |
| 1710 | 1708 |
| 1711 if (can_overflow) { | 1709 if (can_overflow) { |
| 1712 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1710 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1713 } | 1711 } |
| 1714 } | 1712 } |
| 1715 | 1713 |
| 1716 | 1714 |
| 1717 void LCodeGen::DoConstantI(LConstantI* instr) { | 1715 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1718 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1716 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
| 1719 } | 1717 } |
| 1720 | 1718 |
| 1721 | 1719 |
| 1722 void LCodeGen::DoConstantS(LConstantS* instr) { | 1720 void LCodeGen::DoConstantS(LConstantS* instr) { |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1843 | 1841 |
| 1844 if (right->IsStackSlot()) { | 1842 if (right->IsStackSlot()) { |
| 1845 Register right_reg = EmitLoadRegister(right, ip); | 1843 Register right_reg = EmitLoadRegister(right, ip); |
| 1846 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1844 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1847 } else { | 1845 } else { |
| 1848 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1846 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1849 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1847 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1850 } | 1848 } |
| 1851 | 1849 |
| 1852 if (can_overflow) { | 1850 if (can_overflow) { |
| 1853 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1851 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 1854 } | 1852 } |
| 1855 } | 1853 } |
| 1856 | 1854 |
| 1857 | 1855 |
| 1858 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1856 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1859 LOperand* left = instr->left(); | 1857 LOperand* left = instr->left(); |
| 1860 LOperand* right = instr->right(); | 1858 LOperand* right = instr->right(); |
| 1861 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1859 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1862 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 1860 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| 1863 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 1861 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2082 } | 2080 } |
| 2083 | 2081 |
| 2084 if (expected.Contains(ToBooleanICStub::SMI)) { | 2082 if (expected.Contains(ToBooleanICStub::SMI)) { |
| 2085 // Smis: 0 -> false, all other -> true. | 2083 // Smis: 0 -> false, all other -> true. |
| 2086 __ cmp(reg, Operand::Zero()); | 2084 __ cmp(reg, Operand::Zero()); |
| 2087 __ b(eq, instr->FalseLabel(chunk_)); | 2085 __ b(eq, instr->FalseLabel(chunk_)); |
| 2088 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2086 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2089 } else if (expected.NeedsMap()) { | 2087 } else if (expected.NeedsMap()) { |
| 2090 // If we need a map later and have a Smi -> deopt. | 2088 // If we need a map later and have a Smi -> deopt. |
| 2091 __ SmiTst(reg); | 2089 __ SmiTst(reg); |
| 2092 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 2090 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); |
| 2093 } | 2091 } |
| 2094 | 2092 |
| 2095 const Register map = scratch0(); | 2093 const Register map = scratch0(); |
| 2096 if (expected.NeedsMap()) { | 2094 if (expected.NeedsMap()) { |
| 2097 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2095 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2098 | 2096 |
| 2099 if (expected.CanBeUndetectable()) { | 2097 if (expected.CanBeUndetectable()) { |
| 2100 // Undetectable -> false. | 2098 // Undetectable -> false. |
| 2101 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2099 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2102 __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 2100 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2144 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2142 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
| 2145 __ cmp(r0, r0, vs); // NaN -> false. | 2143 __ cmp(r0, r0, vs); // NaN -> false. |
| 2146 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. | 2144 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. |
| 2147 __ b(instr->TrueLabel(chunk_)); | 2145 __ b(instr->TrueLabel(chunk_)); |
| 2148 __ bind(¬_heap_number); | 2146 __ bind(¬_heap_number); |
| 2149 } | 2147 } |
| 2150 | 2148 |
| 2151 if (!expected.IsGeneric()) { | 2149 if (!expected.IsGeneric()) { |
| 2152 // We've seen something for the first time -> deopt. | 2150 // We've seen something for the first time -> deopt. |
| 2153 // This can only happen if we are not generic already. | 2151 // This can only happen if we are not generic already. |
| 2154 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); | 2152 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); |
| 2155 } | 2153 } |
| 2156 } | 2154 } |
| 2157 } | 2155 } |
| 2158 } | 2156 } |
| 2159 | 2157 |
| 2160 | 2158 |
| 2161 void LCodeGen::EmitGoto(int block) { | 2159 void LCodeGen::EmitGoto(int block) { |
| 2162 if (!IsNextEmittedBlock(block)) { | 2160 if (!IsNextEmittedBlock(block)) { |
| 2163 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2161 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2164 } | 2162 } |
| (...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2508 | 2506 |
| 2509 // Loop through the {object}s prototype chain looking for the {prototype}. | 2507 // Loop through the {object}s prototype chain looking for the {prototype}. |
| 2510 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2508 __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2511 Label loop; | 2509 Label loop; |
| 2512 __ bind(&loop); | 2510 __ bind(&loop); |
| 2513 | 2511 |
| 2514 // Deoptimize if the object needs to be access checked. | 2512 // Deoptimize if the object needs to be access checked. |
| 2515 __ ldrb(object_instance_type, | 2513 __ ldrb(object_instance_type, |
| 2516 FieldMemOperand(object_map, Map::kBitFieldOffset)); | 2514 FieldMemOperand(object_map, Map::kBitFieldOffset)); |
| 2517 __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); | 2515 __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded)); |
| 2518 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck); | 2516 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck); |
| 2519 // Deoptimize for proxies. | 2517 // Deoptimize for proxies. |
| 2520 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); | 2518 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); |
| 2521 DeoptimizeIf(eq, instr, Deoptimizer::kProxy); | 2519 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); |
| 2522 | 2520 |
| 2523 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); | 2521 __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); |
| 2524 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); | 2522 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); |
| 2525 EmitFalseBranch(instr, eq); | 2523 EmitFalseBranch(instr, eq); |
| 2526 __ cmp(object_prototype, prototype); | 2524 __ cmp(object_prototype, prototype); |
| 2527 EmitTrueBranch(instr, eq); | 2525 EmitTrueBranch(instr, eq); |
| 2528 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); | 2526 __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); |
| 2529 __ b(&loop); | 2527 __ b(&loop); |
| 2530 } | 2528 } |
| 2531 | 2529 |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2629 | 2627 |
| 2630 | 2628 |
| 2631 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2629 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2632 Register context = ToRegister(instr->context()); | 2630 Register context = ToRegister(instr->context()); |
| 2633 Register result = ToRegister(instr->result()); | 2631 Register result = ToRegister(instr->result()); |
| 2634 __ ldr(result, ContextMemOperand(context, instr->slot_index())); | 2632 __ ldr(result, ContextMemOperand(context, instr->slot_index())); |
| 2635 if (instr->hydrogen()->RequiresHoleCheck()) { | 2633 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2636 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2634 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2637 __ cmp(result, ip); | 2635 __ cmp(result, ip); |
| 2638 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2636 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2639 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2637 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2640 } else { | 2638 } else { |
| 2641 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 2639 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
| 2642 } | 2640 } |
| 2643 } | 2641 } |
| 2644 } | 2642 } |
| 2645 | 2643 |
| 2646 | 2644 |
| 2647 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2645 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2648 Register context = ToRegister(instr->context()); | 2646 Register context = ToRegister(instr->context()); |
| 2649 Register value = ToRegister(instr->value()); | 2647 Register value = ToRegister(instr->value()); |
| 2650 Register scratch = scratch0(); | 2648 Register scratch = scratch0(); |
| 2651 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 2649 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
| 2652 | 2650 |
| 2653 Label skip_assignment; | 2651 Label skip_assignment; |
| 2654 | 2652 |
| 2655 if (instr->hydrogen()->RequiresHoleCheck()) { | 2653 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2656 __ ldr(scratch, target); | 2654 __ ldr(scratch, target); |
| 2657 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2655 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2658 __ cmp(scratch, ip); | 2656 __ cmp(scratch, ip); |
| 2659 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2657 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2660 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2658 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2661 } else { | 2659 } else { |
| 2662 __ b(ne, &skip_assignment); | 2660 __ b(ne, &skip_assignment); |
| 2663 } | 2661 } |
| 2664 } | 2662 } |
| 2665 | 2663 |
| 2666 __ str(value, target); | 2664 __ str(value, target); |
| 2667 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2665 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2668 SmiCheck check_needed = | 2666 SmiCheck check_needed = |
| 2669 instr->hydrogen()->value()->type().IsHeapObject() | 2667 instr->hydrogen()->value()->type().IsHeapObject() |
| 2670 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2668 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2728 Register function = ToRegister(instr->function()); | 2726 Register function = ToRegister(instr->function()); |
| 2729 Register result = ToRegister(instr->result()); | 2727 Register result = ToRegister(instr->result()); |
| 2730 | 2728 |
| 2731 // Get the prototype or initial map from the function. | 2729 // Get the prototype or initial map from the function. |
| 2732 __ ldr(result, | 2730 __ ldr(result, |
| 2733 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2731 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2734 | 2732 |
| 2735 // Check that the function has a prototype or an initial map. | 2733 // Check that the function has a prototype or an initial map. |
| 2736 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2734 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2737 __ cmp(result, ip); | 2735 __ cmp(result, ip); |
| 2738 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2736 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2739 | 2737 |
| 2740 // If the function does not have an initial map, we're done. | 2738 // If the function does not have an initial map, we're done. |
| 2741 Label done; | 2739 Label done; |
| 2742 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2740 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| 2743 __ b(ne, &done); | 2741 __ b(ne, &done); |
| 2744 | 2742 |
| 2745 // Get the prototype from the initial map. | 2743 // Get the prototype from the initial map. |
| 2746 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 2744 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 2747 | 2745 |
| 2748 // All done. | 2746 // All done. |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2842 case UINT16_ELEMENTS: | 2840 case UINT16_ELEMENTS: |
| 2843 __ ldrh(result, mem_operand); | 2841 __ ldrh(result, mem_operand); |
| 2844 break; | 2842 break; |
| 2845 case INT32_ELEMENTS: | 2843 case INT32_ELEMENTS: |
| 2846 __ ldr(result, mem_operand); | 2844 __ ldr(result, mem_operand); |
| 2847 break; | 2845 break; |
| 2848 case UINT32_ELEMENTS: | 2846 case UINT32_ELEMENTS: |
| 2849 __ ldr(result, mem_operand); | 2847 __ ldr(result, mem_operand); |
| 2850 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 2848 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 2851 __ cmp(result, Operand(0x80000000)); | 2849 __ cmp(result, Operand(0x80000000)); |
| 2852 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue); | 2850 DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue); |
| 2853 } | 2851 } |
| 2854 break; | 2852 break; |
| 2855 case FLOAT32_ELEMENTS: | 2853 case FLOAT32_ELEMENTS: |
| 2856 case FLOAT64_ELEMENTS: | 2854 case FLOAT64_ELEMENTS: |
| 2857 case FAST_HOLEY_DOUBLE_ELEMENTS: | 2855 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 2858 case FAST_HOLEY_ELEMENTS: | 2856 case FAST_HOLEY_ELEMENTS: |
| 2859 case FAST_HOLEY_SMI_ELEMENTS: | 2857 case FAST_HOLEY_SMI_ELEMENTS: |
| 2860 case FAST_DOUBLE_ELEMENTS: | 2858 case FAST_DOUBLE_ELEMENTS: |
| 2861 case FAST_ELEMENTS: | 2859 case FAST_ELEMENTS: |
| 2862 case FAST_SMI_ELEMENTS: | 2860 case FAST_SMI_ELEMENTS: |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2897 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 2895 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 2898 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 2896 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 2899 __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 2897 __ add(scratch, scratch, Operand(key, LSL, shift_size)); |
| 2900 } | 2898 } |
| 2901 | 2899 |
| 2902 __ vldr(result, scratch, 0); | 2900 __ vldr(result, scratch, 0); |
| 2903 | 2901 |
| 2904 if (instr->hydrogen()->RequiresHoleCheck()) { | 2902 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2905 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 2903 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
| 2906 __ cmp(scratch, Operand(kHoleNanUpper32)); | 2904 __ cmp(scratch, Operand(kHoleNanUpper32)); |
| 2907 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2905 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2908 } | 2906 } |
| 2909 } | 2907 } |
| 2910 | 2908 |
| 2911 | 2909 |
| 2912 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 2910 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 2913 Register elements = ToRegister(instr->elements()); | 2911 Register elements = ToRegister(instr->elements()); |
| 2914 Register result = ToRegister(instr->result()); | 2912 Register result = ToRegister(instr->result()); |
| 2915 Register scratch = scratch0(); | 2913 Register scratch = scratch0(); |
| 2916 Register store_base = scratch; | 2914 Register store_base = scratch; |
| 2917 int offset = instr->base_offset(); | 2915 int offset = instr->base_offset(); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 2931 } else { | 2929 } else { |
| 2932 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 2930 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
| 2933 } | 2931 } |
| 2934 } | 2932 } |
| 2935 __ ldr(result, MemOperand(store_base, offset)); | 2933 __ ldr(result, MemOperand(store_base, offset)); |
| 2936 | 2934 |
| 2937 // Check for the hole value. | 2935 // Check for the hole value. |
| 2938 if (instr->hydrogen()->RequiresHoleCheck()) { | 2936 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2939 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 2937 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 2940 __ SmiTst(result); | 2938 __ SmiTst(result); |
| 2941 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); | 2939 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi); |
| 2942 } else { | 2940 } else { |
| 2943 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 2941 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 2944 __ cmp(result, scratch); | 2942 __ cmp(result, scratch); |
| 2945 DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 2943 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); |
| 2946 } | 2944 } |
| 2947 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | 2945 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { |
| 2948 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); | 2946 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); |
| 2949 Label done; | 2947 Label done; |
| 2950 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 2948 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 2951 __ cmp(result, scratch); | 2949 __ cmp(result, scratch); |
| 2952 __ b(ne, &done); | 2950 __ b(ne, &done); |
| 2953 if (info()->IsStub()) { | 2951 if (info()->IsStub()) { |
| 2954 // A stub can safely convert the hole to undefined only if the array | 2952 // A stub can safely convert the hole to undefined only if the array |
| 2955 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | 2953 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise |
| 2956 // it needs to bail out. | 2954 // it needs to bail out. |
| 2957 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); | 2955 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); |
| 2958 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | 2956 __ ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
| 2959 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); | 2957 __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); |
| 2960 DeoptimizeIf(ne, instr, Deoptimizer::kHole); | 2958 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); |
| 2961 } | 2959 } |
| 2962 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2960 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2963 __ bind(&done); | 2961 __ bind(&done); |
| 2964 } | 2962 } |
| 2965 } | 2963 } |
| 2966 | 2964 |
| 2967 | 2965 |
| 2968 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 2966 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 2969 if (instr->is_fixed_typed_array()) { | 2967 if (instr->is_fixed_typed_array()) { |
| 2970 DoLoadKeyedExternalArray(instr); | 2968 DoLoadKeyedExternalArray(instr); |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3095 // Normal function. Replace undefined or null with global receiver. | 3093 // Normal function. Replace undefined or null with global receiver. |
| 3096 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3094 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3097 __ cmp(receiver, scratch); | 3095 __ cmp(receiver, scratch); |
| 3098 __ b(eq, &global_object); | 3096 __ b(eq, &global_object); |
| 3099 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3097 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3100 __ cmp(receiver, scratch); | 3098 __ cmp(receiver, scratch); |
| 3101 __ b(eq, &global_object); | 3099 __ b(eq, &global_object); |
| 3102 | 3100 |
| 3103 // Deoptimize if the receiver is not a JS object. | 3101 // Deoptimize if the receiver is not a JS object. |
| 3104 __ SmiTst(receiver); | 3102 __ SmiTst(receiver); |
| 3105 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 3103 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); |
| 3106 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); | 3104 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); |
| 3107 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); | 3105 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); |
| 3108 | 3106 |
| 3109 __ b(&result_in_receiver); | 3107 __ b(&result_in_receiver); |
| 3110 __ bind(&global_object); | 3108 __ bind(&global_object); |
| 3111 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3109 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3112 __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); | 3110 __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); |
| 3113 __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); | 3111 __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); |
| 3114 | 3112 |
| 3115 if (result.is(receiver)) { | 3113 if (result.is(receiver)) { |
| 3116 __ bind(&result_in_receiver); | 3114 __ bind(&result_in_receiver); |
| 3117 } else { | 3115 } else { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3131 Register elements = ToRegister(instr->elements()); | 3129 Register elements = ToRegister(instr->elements()); |
| 3132 Register scratch = scratch0(); | 3130 Register scratch = scratch0(); |
| 3133 DCHECK(receiver.is(r0)); // Used for parameter count. | 3131 DCHECK(receiver.is(r0)); // Used for parameter count. |
| 3134 DCHECK(function.is(r1)); // Required by InvokeFunction. | 3132 DCHECK(function.is(r1)); // Required by InvokeFunction. |
| 3135 DCHECK(ToRegister(instr->result()).is(r0)); | 3133 DCHECK(ToRegister(instr->result()).is(r0)); |
| 3136 | 3134 |
| 3137 // Copy the arguments to this function possibly from the | 3135 // Copy the arguments to this function possibly from the |
| 3138 // adaptor frame below it. | 3136 // adaptor frame below it. |
| 3139 const uint32_t kArgumentsLimit = 1 * KB; | 3137 const uint32_t kArgumentsLimit = 1 * KB; |
| 3140 __ cmp(length, Operand(kArgumentsLimit)); | 3138 __ cmp(length, Operand(kArgumentsLimit)); |
| 3141 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); | 3139 DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments); |
| 3142 | 3140 |
| 3143 // Push the receiver and use the register to keep the original | 3141 // Push the receiver and use the register to keep the original |
| 3144 // number of arguments. | 3142 // number of arguments. |
| 3145 __ push(receiver); | 3143 __ push(receiver); |
| 3146 __ mov(receiver, length); | 3144 __ mov(receiver, length); |
| 3147 // The arguments are at a one pointer size offset from elements. | 3145 // The arguments are at a one pointer size offset from elements. |
| 3148 __ add(elements, elements, Operand(1 * kPointerSize)); | 3146 __ add(elements, elements, Operand(1 * kPointerSize)); |
| 3149 | 3147 |
| 3150 // Loop through the arguments pushing them onto the execution | 3148 // Loop through the arguments pushing them onto the execution |
| 3151 // stack. | 3149 // stack. |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3285 DCHECK(instr->context() != NULL); | 3283 DCHECK(instr->context() != NULL); |
| 3286 DCHECK(ToRegister(instr->context()).is(cp)); | 3284 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3287 Register input = ToRegister(instr->value()); | 3285 Register input = ToRegister(instr->value()); |
| 3288 Register result = ToRegister(instr->result()); | 3286 Register result = ToRegister(instr->result()); |
| 3289 Register scratch = scratch0(); | 3287 Register scratch = scratch0(); |
| 3290 | 3288 |
| 3291 // Deoptimize if not a heap number. | 3289 // Deoptimize if not a heap number. |
| 3292 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3290 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3293 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3291 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3294 __ cmp(scratch, Operand(ip)); | 3292 __ cmp(scratch, Operand(ip)); |
| 3295 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3293 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 3296 | 3294 |
| 3297 Label done; | 3295 Label done; |
| 3298 Register exponent = scratch0(); | 3296 Register exponent = scratch0(); |
| 3299 scratch = no_reg; | 3297 scratch = no_reg; |
| 3300 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3298 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3301 // Check the sign of the argument. If the argument is positive, just | 3299 // Check the sign of the argument. If the argument is positive, just |
| 3302 // return it. | 3300 // return it. |
| 3303 __ tst(exponent, Operand(HeapNumber::kSignMask)); | 3301 __ tst(exponent, Operand(HeapNumber::kSignMask)); |
| 3304 // Move the input to the result if necessary. | 3302 // Move the input to the result if necessary. |
| 3305 __ Move(result, input); | 3303 __ Move(result, input); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3353 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3351 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3354 Register input = ToRegister(instr->value()); | 3352 Register input = ToRegister(instr->value()); |
| 3355 Register result = ToRegister(instr->result()); | 3353 Register result = ToRegister(instr->result()); |
| 3356 __ cmp(input, Operand::Zero()); | 3354 __ cmp(input, Operand::Zero()); |
| 3357 __ Move(result, input, pl); | 3355 __ Move(result, input, pl); |
| 3358 // We can make rsb conditional because the previous cmp instruction | 3356 // We can make rsb conditional because the previous cmp instruction |
| 3359 // will clear the V (overflow) flag and rsb won't set this flag | 3357 // will clear the V (overflow) flag and rsb won't set this flag |
| 3360 // if input is positive. | 3358 // if input is positive. |
| 3361 __ rsb(result, input, Operand::Zero(), SetCC, mi); | 3359 __ rsb(result, input, Operand::Zero(), SetCC, mi); |
| 3362 // Deoptimize on overflow. | 3360 // Deoptimize on overflow. |
| 3363 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 3361 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 3364 } | 3362 } |
| 3365 | 3363 |
| 3366 | 3364 |
| 3367 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3365 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3368 // Class for deferred case. | 3366 // Class for deferred case. |
| 3369 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { | 3367 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { |
| 3370 public: | 3368 public: |
| 3371 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3369 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3372 : LDeferredCode(codegen), instr_(instr) { } | 3370 : LDeferredCode(codegen), instr_(instr) { } |
| 3373 void Generate() override { | 3371 void Generate() override { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 3400 } | 3398 } |
| 3401 | 3399 |
| 3402 | 3400 |
| 3403 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3401 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| 3404 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3402 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3405 Register result = ToRegister(instr->result()); | 3403 Register result = ToRegister(instr->result()); |
| 3406 Register input_high = scratch0(); | 3404 Register input_high = scratch0(); |
| 3407 Label done, exact; | 3405 Label done, exact; |
| 3408 | 3406 |
| 3409 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 3407 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
| 3410 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3408 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 3411 | 3409 |
| 3412 __ bind(&exact); | 3410 __ bind(&exact); |
| 3413 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3411 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3414 // Test for -0. | 3412 // Test for -0. |
| 3415 __ cmp(result, Operand::Zero()); | 3413 __ cmp(result, Operand::Zero()); |
| 3416 __ b(ne, &done); | 3414 __ b(ne, &done); |
| 3417 __ cmp(input_high, Operand::Zero()); | 3415 __ cmp(input_high, Operand::Zero()); |
| 3418 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 3416 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); |
| 3419 } | 3417 } |
| 3420 __ bind(&done); | 3418 __ bind(&done); |
| 3421 } | 3419 } |
| 3422 | 3420 |
| 3423 | 3421 |
| 3424 void LCodeGen::DoMathRound(LMathRound* instr) { | 3422 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3425 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3423 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3426 Register result = ToRegister(instr->result()); | 3424 Register result = ToRegister(instr->result()); |
| 3427 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3425 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3428 DwVfpRegister input_plus_dot_five = double_scratch1; | 3426 DwVfpRegister input_plus_dot_five = double_scratch1; |
| 3429 Register input_high = scratch0(); | 3427 Register input_high = scratch0(); |
| 3430 DwVfpRegister dot_five = double_scratch0(); | 3428 DwVfpRegister dot_five = double_scratch0(); |
| 3431 Label convert, done; | 3429 Label convert, done; |
| 3432 | 3430 |
| 3433 __ Vmov(dot_five, 0.5, scratch0()); | 3431 __ Vmov(dot_five, 0.5, scratch0()); |
| 3434 __ vabs(double_scratch1, input); | 3432 __ vabs(double_scratch1, input); |
| 3435 __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 3433 __ VFPCompareAndSetFlags(double_scratch1, dot_five); |
| 3436 // If input is in [-0.5, -0], the result is -0. | 3434 // If input is in [-0.5, -0], the result is -0. |
| 3437 // If input is in [+0, +0.5[, the result is +0. | 3435 // If input is in [+0, +0.5[, the result is +0. |
| 3438 // If the input is +0.5, the result is 1. | 3436 // If the input is +0.5, the result is 1. |
| 3439 __ b(hi, &convert); // Out of [-0.5, +0.5]. | 3437 __ b(hi, &convert); // Out of [-0.5, +0.5]. |
| 3440 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3438 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3441 __ VmovHigh(input_high, input); | 3439 __ VmovHigh(input_high, input); |
| 3442 __ cmp(input_high, Operand::Zero()); | 3440 __ cmp(input_high, Operand::Zero()); |
| 3443 // [-0.5, -0]. | 3441 // [-0.5, -0]. |
| 3444 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 3442 DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero); |
| 3445 } | 3443 } |
| 3446 __ VFPCompareAndSetFlags(input, dot_five); | 3444 __ VFPCompareAndSetFlags(input, dot_five); |
| 3447 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. | 3445 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
| 3448 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3446 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
| 3449 // flag kBailoutOnMinusZero. | 3447 // flag kBailoutOnMinusZero. |
| 3450 __ mov(result, Operand::Zero(), LeaveCC, ne); | 3448 __ mov(result, Operand::Zero(), LeaveCC, ne); |
| 3451 __ b(&done); | 3449 __ b(&done); |
| 3452 | 3450 |
| 3453 __ bind(&convert); | 3451 __ bind(&convert); |
| 3454 __ vadd(input_plus_dot_five, input, dot_five); | 3452 __ vadd(input_plus_dot_five, input, dot_five); |
| 3455 // Reuse dot_five (double_scratch0) as we no longer need this value. | 3453 // Reuse dot_five (double_scratch0) as we no longer need this value. |
| 3456 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 3454 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
| 3457 &done, &done); | 3455 &done, &done); |
| 3458 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3456 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 3459 __ bind(&done); | 3457 __ bind(&done); |
| 3460 } | 3458 } |
| 3461 | 3459 |
| 3462 | 3460 |
| 3463 void LCodeGen::DoMathFround(LMathFround* instr) { | 3461 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3464 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 3462 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
| 3465 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 3463 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); |
| 3466 LowDwVfpRegister scratch = double_scratch0(); | 3464 LowDwVfpRegister scratch = double_scratch0(); |
| 3467 __ vcvt_f32_f64(scratch.low(), input_reg); | 3465 __ vcvt_f32_f64(scratch.low(), input_reg); |
| 3468 __ vcvt_f64_f32(output_reg, scratch.low()); | 3466 __ vcvt_f64_f32(output_reg, scratch.low()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3512 if (exponent_type.IsSmi()) { | 3510 if (exponent_type.IsSmi()) { |
| 3513 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3511 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3514 __ CallStub(&stub); | 3512 __ CallStub(&stub); |
| 3515 } else if (exponent_type.IsTagged()) { | 3513 } else if (exponent_type.IsTagged()) { |
| 3516 Label no_deopt; | 3514 Label no_deopt; |
| 3517 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3515 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3518 DCHECK(!r6.is(tagged_exponent)); | 3516 DCHECK(!r6.is(tagged_exponent)); |
| 3519 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3517 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3520 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3518 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3521 __ cmp(r6, Operand(ip)); | 3519 __ cmp(r6, Operand(ip)); |
| 3522 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3520 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 3523 __ bind(&no_deopt); | 3521 __ bind(&no_deopt); |
| 3524 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3522 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3525 __ CallStub(&stub); | 3523 __ CallStub(&stub); |
| 3526 } else if (exponent_type.IsInteger32()) { | 3524 } else if (exponent_type.IsInteger32()) { |
| 3527 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3525 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3528 __ CallStub(&stub); | 3526 __ CallStub(&stub); |
| 3529 } else { | 3527 } else { |
| 3530 DCHECK(exponent_type.IsDouble()); | 3528 DCHECK(exponent_type.IsDouble()); |
| 3531 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3529 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3532 __ CallStub(&stub); | 3530 __ CallStub(&stub); |
| (...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3886 Register index = ToRegister(instr->index()); | 3884 Register index = ToRegister(instr->index()); |
| 3887 Operand length = ToOperand(instr->length()); | 3885 Operand length = ToOperand(instr->length()); |
| 3888 __ cmp(index, length); | 3886 __ cmp(index, length); |
| 3889 } | 3887 } |
| 3890 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 3888 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 3891 Label done; | 3889 Label done; |
| 3892 __ b(NegateCondition(cc), &done); | 3890 __ b(NegateCondition(cc), &done); |
| 3893 __ stop("eliminated bounds check failed"); | 3891 __ stop("eliminated bounds check failed"); |
| 3894 __ bind(&done); | 3892 __ bind(&done); |
| 3895 } else { | 3893 } else { |
| 3896 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); | 3894 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); |
| 3897 } | 3895 } |
| 3898 } | 3896 } |
| 3899 | 3897 |
| 3900 | 3898 |
| 3901 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 3899 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 3902 Register external_pointer = ToRegister(instr->elements()); | 3900 Register external_pointer = ToRegister(instr->elements()); |
| 3903 Register key = no_reg; | 3901 Register key = no_reg; |
| 3904 ElementsKind elements_kind = instr->elements_kind(); | 3902 ElementsKind elements_kind = instr->elements_kind(); |
| 3905 bool key_is_constant = instr->key()->IsConstantOperand(); | 3903 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 3906 int constant_key = 0; | 3904 int constant_key = 0; |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4177 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), | 4175 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(), |
| 4178 instr->hydrogen()->kind()); | 4176 instr->hydrogen()->kind()); |
| 4179 __ CallStub(&stub); | 4177 __ CallStub(&stub); |
| 4180 RecordSafepointWithLazyDeopt( | 4178 RecordSafepointWithLazyDeopt( |
| 4181 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4179 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 4182 __ StoreToSafepointRegisterSlot(result, result); | 4180 __ StoreToSafepointRegisterSlot(result, result); |
| 4183 } | 4181 } |
| 4184 | 4182 |
| 4185 // Deopt on smi, which means the elements array changed to dictionary mode. | 4183 // Deopt on smi, which means the elements array changed to dictionary mode. |
| 4186 __ SmiTst(result); | 4184 __ SmiTst(result); |
| 4187 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 4185 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); |
| 4188 } | 4186 } |
| 4189 | 4187 |
| 4190 | 4188 |
| 4191 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4189 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| 4192 Register object_reg = ToRegister(instr->object()); | 4190 Register object_reg = ToRegister(instr->object()); |
| 4193 Register scratch = scratch0(); | 4191 Register scratch = scratch0(); |
| 4194 | 4192 |
| 4195 Handle<Map> from_map = instr->original_map(); | 4193 Handle<Map> from_map = instr->original_map(); |
| 4196 Handle<Map> to_map = instr->transitioned_map(); | 4194 Handle<Map> to_map = instr->transitioned_map(); |
| 4197 ElementsKind from_kind = instr->from_kind(); | 4195 ElementsKind from_kind = instr->from_kind(); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 4224 } | 4222 } |
| 4225 __ bind(¬_applicable); | 4223 __ bind(¬_applicable); |
| 4226 } | 4224 } |
| 4227 | 4225 |
| 4228 | 4226 |
| 4229 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4227 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4230 Register object = ToRegister(instr->object()); | 4228 Register object = ToRegister(instr->object()); |
| 4231 Register temp = ToRegister(instr->temp()); | 4229 Register temp = ToRegister(instr->temp()); |
| 4232 Label no_memento_found; | 4230 Label no_memento_found; |
| 4233 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4231 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4234 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); | 4232 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); |
| 4235 __ bind(&no_memento_found); | 4233 __ bind(&no_memento_found); |
| 4236 } | 4234 } |
| 4237 | 4235 |
| 4238 | 4236 |
| 4239 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4237 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4240 DCHECK(ToRegister(instr->context()).is(cp)); | 4238 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4241 DCHECK(ToRegister(instr->left()).is(r1)); | 4239 DCHECK(ToRegister(instr->left()).is(r1)); |
| 4242 DCHECK(ToRegister(instr->right()).is(r0)); | 4240 DCHECK(ToRegister(instr->right()).is(r0)); |
| 4243 StringAddStub stub(isolate(), | 4241 StringAddStub stub(isolate(), |
| 4244 instr->hydrogen()->flags(), | 4242 instr->hydrogen()->flags(), |
| (...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4548 } | 4546 } |
| 4549 | 4547 |
| 4550 | 4548 |
| 4551 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4549 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4552 HChange* hchange = instr->hydrogen(); | 4550 HChange* hchange = instr->hydrogen(); |
| 4553 Register input = ToRegister(instr->value()); | 4551 Register input = ToRegister(instr->value()); |
| 4554 Register output = ToRegister(instr->result()); | 4552 Register output = ToRegister(instr->result()); |
| 4555 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4553 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4556 hchange->value()->CheckFlag(HValue::kUint32)) { | 4554 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4557 __ tst(input, Operand(0xc0000000)); | 4555 __ tst(input, Operand(0xc0000000)); |
| 4558 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 4556 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); |
| 4559 } | 4557 } |
| 4560 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4558 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4561 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4559 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4562 __ SmiTag(output, input, SetCC); | 4560 __ SmiTag(output, input, SetCC); |
| 4563 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 4561 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 4564 } else { | 4562 } else { |
| 4565 __ SmiTag(output, input); | 4563 __ SmiTag(output, input); |
| 4566 } | 4564 } |
| 4567 } | 4565 } |
| 4568 | 4566 |
| 4569 | 4567 |
| 4570 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4568 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4571 Register input = ToRegister(instr->value()); | 4569 Register input = ToRegister(instr->value()); |
| 4572 Register result = ToRegister(instr->result()); | 4570 Register result = ToRegister(instr->result()); |
| 4573 if (instr->needs_check()) { | 4571 if (instr->needs_check()) { |
| 4574 STATIC_ASSERT(kHeapObjectTag == 1); | 4572 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4575 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4573 // If the input is a HeapObject, SmiUntag will set the carry flag. |
| 4576 __ SmiUntag(result, input, SetCC); | 4574 __ SmiUntag(result, input, SetCC); |
| 4577 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi); | 4575 DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi); |
| 4578 } else { | 4576 } else { |
| 4579 __ SmiUntag(result, input); | 4577 __ SmiUntag(result, input); |
| 4580 } | 4578 } |
| 4581 } | 4579 } |
| 4582 | 4580 |
| 4583 | 4581 |
| 4584 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4582 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4585 DwVfpRegister result_reg, | 4583 DwVfpRegister result_reg, |
| 4586 NumberUntagDMode mode) { | 4584 NumberUntagDMode mode) { |
| 4587 bool can_convert_undefined_to_nan = | 4585 bool can_convert_undefined_to_nan = |
| 4588 instr->hydrogen()->can_convert_undefined_to_nan(); | 4586 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4589 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4587 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4590 | 4588 |
| 4591 Register scratch = scratch0(); | 4589 Register scratch = scratch0(); |
| 4592 SwVfpRegister flt_scratch = double_scratch0().low(); | 4590 SwVfpRegister flt_scratch = double_scratch0().low(); |
| 4593 DCHECK(!result_reg.is(double_scratch0())); | 4591 DCHECK(!result_reg.is(double_scratch0())); |
| 4594 Label convert, load_smi, done; | 4592 Label convert, load_smi, done; |
| 4595 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4593 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4596 // Smi check. | 4594 // Smi check. |
| 4597 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4595 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4598 // Heap number map check. | 4596 // Heap number map check. |
| 4599 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4597 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4600 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4598 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 4601 __ cmp(scratch, Operand(ip)); | 4599 __ cmp(scratch, Operand(ip)); |
| 4602 if (can_convert_undefined_to_nan) { | 4600 if (can_convert_undefined_to_nan) { |
| 4603 __ b(ne, &convert); | 4601 __ b(ne, &convert); |
| 4604 } else { | 4602 } else { |
| 4605 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 4603 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 4606 } | 4604 } |
| 4607 // load heap number | 4605 // load heap number |
| 4608 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 4606 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4609 if (deoptimize_on_minus_zero) { | 4607 if (deoptimize_on_minus_zero) { |
| 4610 __ VmovLow(scratch, result_reg); | 4608 __ VmovLow(scratch, result_reg); |
| 4611 __ cmp(scratch, Operand::Zero()); | 4609 __ cmp(scratch, Operand::Zero()); |
| 4612 __ b(ne, &done); | 4610 __ b(ne, &done); |
| 4613 __ VmovHigh(scratch, result_reg); | 4611 __ VmovHigh(scratch, result_reg); |
| 4614 __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 4612 __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
| 4615 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 4613 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); |
| 4616 } | 4614 } |
| 4617 __ jmp(&done); | 4615 __ jmp(&done); |
| 4618 if (can_convert_undefined_to_nan) { | 4616 if (can_convert_undefined_to_nan) { |
| 4619 __ bind(&convert); | 4617 __ bind(&convert); |
| 4620 // Convert undefined (and hole) to NaN. | 4618 // Convert undefined (and hole) to NaN. |
| 4621 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4619 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 4622 __ cmp(input_reg, Operand(ip)); | 4620 __ cmp(input_reg, Operand(ip)); |
| 4623 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 4621 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); |
| 4624 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4622 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4625 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4623 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4626 __ jmp(&done); | 4624 __ jmp(&done); |
| 4627 } | 4625 } |
| 4628 } else { | 4626 } else { |
| 4629 __ SmiUntag(scratch, input_reg); | 4627 __ SmiUntag(scratch, input_reg); |
| 4630 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4628 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4631 } | 4629 } |
| 4632 // Smi to double register conversion | 4630 // Smi to double register conversion |
| 4633 __ bind(&load_smi); | 4631 __ bind(&load_smi); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4681 __ bind(&check_bools); | 4679 __ bind(&check_bools); |
| 4682 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 4680 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 4683 __ cmp(scratch2, Operand(ip)); | 4681 __ cmp(scratch2, Operand(ip)); |
| 4684 __ b(ne, &check_false); | 4682 __ b(ne, &check_false); |
| 4685 __ mov(input_reg, Operand(1)); | 4683 __ mov(input_reg, Operand(1)); |
| 4686 __ b(&done); | 4684 __ b(&done); |
| 4687 | 4685 |
| 4688 __ bind(&check_false); | 4686 __ bind(&check_false); |
| 4689 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 4687 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 4690 __ cmp(scratch2, Operand(ip)); | 4688 __ cmp(scratch2, Operand(ip)); |
| 4691 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); | 4689 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean); |
| 4692 __ mov(input_reg, Operand::Zero()); | 4690 __ mov(input_reg, Operand::Zero()); |
| 4693 } else { | 4691 } else { |
| 4694 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 4692 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); |
| 4695 | 4693 |
| 4696 __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 4694 __ sub(ip, scratch2, Operand(kHeapObjectTag)); |
| 4697 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 4695 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
| 4698 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 4696 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
| 4699 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 4697 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 4700 | 4698 |
| 4701 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4699 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4702 __ cmp(input_reg, Operand::Zero()); | 4700 __ cmp(input_reg, Operand::Zero()); |
| 4703 __ b(ne, &done); | 4701 __ b(ne, &done); |
| 4704 __ VmovHigh(scratch1, double_scratch2); | 4702 __ VmovHigh(scratch1, double_scratch2); |
| 4705 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 4703 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 4706 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 4704 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); |
| 4707 } | 4705 } |
| 4708 } | 4706 } |
| 4709 __ bind(&done); | 4707 __ bind(&done); |
| 4710 } | 4708 } |
| 4711 | 4709 |
| 4712 | 4710 |
| 4713 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4711 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4714 class DeferredTaggedToI final : public LDeferredCode { | 4712 class DeferredTaggedToI final : public LDeferredCode { |
| 4715 public: | 4713 public: |
| 4716 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4714 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4765 Register result_reg = ToRegister(instr->result()); | 4763 Register result_reg = ToRegister(instr->result()); |
| 4766 Register scratch1 = scratch0(); | 4764 Register scratch1 = scratch0(); |
| 4767 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 4765 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 4768 LowDwVfpRegister double_scratch = double_scratch0(); | 4766 LowDwVfpRegister double_scratch = double_scratch0(); |
| 4769 | 4767 |
| 4770 if (instr->truncating()) { | 4768 if (instr->truncating()) { |
| 4771 __ TruncateDoubleToI(result_reg, double_input); | 4769 __ TruncateDoubleToI(result_reg, double_input); |
| 4772 } else { | 4770 } else { |
| 4773 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 4771 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 4774 // Deoptimize if the input wasn't a int32 (inside a double). | 4772 // Deoptimize if the input wasn't a int32 (inside a double). |
| 4775 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 4773 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 4776 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4774 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4777 Label done; | 4775 Label done; |
| 4778 __ cmp(result_reg, Operand::Zero()); | 4776 __ cmp(result_reg, Operand::Zero()); |
| 4779 __ b(ne, &done); | 4777 __ b(ne, &done); |
| 4780 __ VmovHigh(scratch1, double_input); | 4778 __ VmovHigh(scratch1, double_input); |
| 4781 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 4779 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 4782 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 4780 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); |
| 4783 __ bind(&done); | 4781 __ bind(&done); |
| 4784 } | 4782 } |
| 4785 } | 4783 } |
| 4786 } | 4784 } |
| 4787 | 4785 |
| 4788 | 4786 |
| 4789 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4787 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 4790 Register result_reg = ToRegister(instr->result()); | 4788 Register result_reg = ToRegister(instr->result()); |
| 4791 Register scratch1 = scratch0(); | 4789 Register scratch1 = scratch0(); |
| 4792 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 4790 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 4793 LowDwVfpRegister double_scratch = double_scratch0(); | 4791 LowDwVfpRegister double_scratch = double_scratch0(); |
| 4794 | 4792 |
| 4795 if (instr->truncating()) { | 4793 if (instr->truncating()) { |
| 4796 __ TruncateDoubleToI(result_reg, double_input); | 4794 __ TruncateDoubleToI(result_reg, double_input); |
| 4797 } else { | 4795 } else { |
| 4798 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 4796 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 4799 // Deoptimize if the input wasn't a int32 (inside a double). | 4797 // Deoptimize if the input wasn't a int32 (inside a double). |
| 4800 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 4798 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); |
| 4801 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4799 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4802 Label done; | 4800 Label done; |
| 4803 __ cmp(result_reg, Operand::Zero()); | 4801 __ cmp(result_reg, Operand::Zero()); |
| 4804 __ b(ne, &done); | 4802 __ b(ne, &done); |
| 4805 __ VmovHigh(scratch1, double_input); | 4803 __ VmovHigh(scratch1, double_input); |
| 4806 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 4804 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 4807 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 4805 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero); |
| 4808 __ bind(&done); | 4806 __ bind(&done); |
| 4809 } | 4807 } |
| 4810 } | 4808 } |
| 4811 __ SmiTag(result_reg, SetCC); | 4809 __ SmiTag(result_reg, SetCC); |
| 4812 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 4810 DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow); |
| 4813 } | 4811 } |
| 4814 | 4812 |
| 4815 | 4813 |
| 4816 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 4814 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 4817 LOperand* input = instr->value(); | 4815 LOperand* input = instr->value(); |
| 4818 __ SmiTst(ToRegister(input)); | 4816 __ SmiTst(ToRegister(input)); |
| 4819 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); | 4817 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi); |
| 4820 } | 4818 } |
| 4821 | 4819 |
| 4822 | 4820 |
| 4823 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 4821 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 4824 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 4822 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 4825 LOperand* input = instr->value(); | 4823 LOperand* input = instr->value(); |
| 4826 __ SmiTst(ToRegister(input)); | 4824 __ SmiTst(ToRegister(input)); |
| 4827 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 4825 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi); |
| 4828 } | 4826 } |
| 4829 } | 4827 } |
| 4830 | 4828 |
| 4831 | 4829 |
| 4832 void LCodeGen::DoCheckArrayBufferNotNeutered( | 4830 void LCodeGen::DoCheckArrayBufferNotNeutered( |
| 4833 LCheckArrayBufferNotNeutered* instr) { | 4831 LCheckArrayBufferNotNeutered* instr) { |
| 4834 Register view = ToRegister(instr->view()); | 4832 Register view = ToRegister(instr->view()); |
| 4835 Register scratch = scratch0(); | 4833 Register scratch = scratch0(); |
| 4836 | 4834 |
| 4837 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); | 4835 __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); |
| 4838 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); | 4836 __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); |
| 4839 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); | 4837 __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); |
| 4840 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds); | 4838 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds); |
| 4841 } | 4839 } |
| 4842 | 4840 |
| 4843 | 4841 |
| 4844 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 4842 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 4845 Register input = ToRegister(instr->value()); | 4843 Register input = ToRegister(instr->value()); |
| 4846 Register scratch = scratch0(); | 4844 Register scratch = scratch0(); |
| 4847 | 4845 |
| 4848 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 4846 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 4849 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 4847 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 4850 | 4848 |
| 4851 if (instr->hydrogen()->is_interval_check()) { | 4849 if (instr->hydrogen()->is_interval_check()) { |
| 4852 InstanceType first; | 4850 InstanceType first; |
| 4853 InstanceType last; | 4851 InstanceType last; |
| 4854 instr->hydrogen()->GetCheckInterval(&first, &last); | 4852 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 4855 | 4853 |
| 4856 __ cmp(scratch, Operand(first)); | 4854 __ cmp(scratch, Operand(first)); |
| 4857 | 4855 |
| 4858 // If there is only one type in the interval check for equality. | 4856 // If there is only one type in the interval check for equality. |
| 4859 if (first == last) { | 4857 if (first == last) { |
| 4860 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 4858 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); |
| 4861 } else { | 4859 } else { |
| 4862 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); | 4860 DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType); |
| 4863 // Omit check for the last type. | 4861 // Omit check for the last type. |
| 4864 if (last != LAST_TYPE) { | 4862 if (last != LAST_TYPE) { |
| 4865 __ cmp(scratch, Operand(last)); | 4863 __ cmp(scratch, Operand(last)); |
| 4866 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); | 4864 DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType); |
| 4867 } | 4865 } |
| 4868 } | 4866 } |
| 4869 } else { | 4867 } else { |
| 4870 uint8_t mask; | 4868 uint8_t mask; |
| 4871 uint8_t tag; | 4869 uint8_t tag; |
| 4872 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 4870 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 4873 | 4871 |
| 4874 if (base::bits::IsPowerOfTwo32(mask)) { | 4872 if (base::bits::IsPowerOfTwo32(mask)) { |
| 4875 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 4873 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 4876 __ tst(scratch, Operand(mask)); | 4874 __ tst(scratch, Operand(mask)); |
| 4877 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); | 4875 DeoptimizeIf(tag == 0 ? ne : eq, instr, |
| 4876 DeoptimizeReason::kWrongInstanceType); |
| 4878 } else { | 4877 } else { |
| 4879 __ and_(scratch, scratch, Operand(mask)); | 4878 __ and_(scratch, scratch, Operand(mask)); |
| 4880 __ cmp(scratch, Operand(tag)); | 4879 __ cmp(scratch, Operand(tag)); |
| 4881 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 4880 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); |
| 4882 } | 4881 } |
| 4883 } | 4882 } |
| 4884 } | 4883 } |
| 4885 | 4884 |
| 4886 | 4885 |
| 4887 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 4886 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 4888 Register reg = ToRegister(instr->value()); | 4887 Register reg = ToRegister(instr->value()); |
| 4889 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 4888 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 4890 AllowDeferredHandleDereference smi_check; | 4889 AllowDeferredHandleDereference smi_check; |
| 4891 if (isolate()->heap()->InNewSpace(*object)) { | 4890 if (isolate()->heap()->InNewSpace(*object)) { |
| 4892 Register reg = ToRegister(instr->value()); | 4891 Register reg = ToRegister(instr->value()); |
| 4893 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 4892 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 4894 __ mov(ip, Operand(cell)); | 4893 __ mov(ip, Operand(cell)); |
| 4895 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 4894 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
| 4896 __ cmp(reg, ip); | 4895 __ cmp(reg, ip); |
| 4897 } else { | 4896 } else { |
| 4898 __ cmp(reg, Operand(object)); | 4897 __ cmp(reg, Operand(object)); |
| 4899 } | 4898 } |
| 4900 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); | 4899 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); |
| 4901 } | 4900 } |
| 4902 | 4901 |
| 4903 | 4902 |
| 4904 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 4903 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 4905 { | 4904 { |
| 4906 PushSafepointRegistersScope scope(this); | 4905 PushSafepointRegistersScope scope(this); |
| 4907 __ push(object); | 4906 __ push(object); |
| 4908 __ mov(cp, Operand::Zero()); | 4907 __ mov(cp, Operand::Zero()); |
| 4909 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 4908 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 4910 RecordSafepointWithRegisters( | 4909 RecordSafepointWithRegisters( |
| 4911 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 4910 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 4912 __ StoreToSafepointRegisterSlot(r0, scratch0()); | 4911 __ StoreToSafepointRegisterSlot(r0, scratch0()); |
| 4913 } | 4912 } |
| 4914 __ tst(scratch0(), Operand(kSmiTagMask)); | 4913 __ tst(scratch0(), Operand(kSmiTagMask)); |
| 4915 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed); | 4914 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed); |
| 4916 } | 4915 } |
| 4917 | 4916 |
| 4918 | 4917 |
| 4919 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4918 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 4920 class DeferredCheckMaps final : public LDeferredCode { | 4919 class DeferredCheckMaps final : public LDeferredCode { |
| 4921 public: | 4920 public: |
| 4922 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 4921 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 4923 : LDeferredCode(codegen), instr_(instr), object_(object) { | 4922 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 4924 SetExit(check_maps()); | 4923 SetExit(check_maps()); |
| 4925 } | 4924 } |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4963 Handle<Map> map = maps->at(i).handle(); | 4962 Handle<Map> map = maps->at(i).handle(); |
| 4964 __ CompareMap(map_reg, map, &success); | 4963 __ CompareMap(map_reg, map, &success); |
| 4965 __ b(eq, &success); | 4964 __ b(eq, &success); |
| 4966 } | 4965 } |
| 4967 | 4966 |
| 4968 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 4967 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 4969 __ CompareMap(map_reg, map, &success); | 4968 __ CompareMap(map_reg, map, &success); |
| 4970 if (instr->hydrogen()->HasMigrationTarget()) { | 4969 if (instr->hydrogen()->HasMigrationTarget()) { |
| 4971 __ b(ne, deferred->entry()); | 4970 __ b(ne, deferred->entry()); |
| 4972 } else { | 4971 } else { |
| 4973 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 4972 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); |
| 4974 } | 4973 } |
| 4975 | 4974 |
| 4976 __ bind(&success); | 4975 __ bind(&success); |
| 4977 } | 4976 } |
| 4978 | 4977 |
| 4979 | 4978 |
| 4980 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4979 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 4981 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4980 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 4982 Register result_reg = ToRegister(instr->result()); | 4981 Register result_reg = ToRegister(instr->result()); |
| 4983 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 4982 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5002 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5001 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
| 5003 | 5002 |
| 5004 // Check for heap number | 5003 // Check for heap number |
| 5005 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5004 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5006 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5005 __ cmp(scratch, Operand(factory()->heap_number_map())); |
| 5007 __ b(eq, &heap_number); | 5006 __ b(eq, &heap_number); |
| 5008 | 5007 |
| 5009 // Check for undefined. Undefined is converted to zero for clamping | 5008 // Check for undefined. Undefined is converted to zero for clamping |
| 5010 // conversions. | 5009 // conversions. |
| 5011 __ cmp(input_reg, Operand(factory()->undefined_value())); | 5010 __ cmp(input_reg, Operand(factory()->undefined_value())); |
| 5012 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 5011 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); |
| 5013 __ mov(result_reg, Operand::Zero()); | 5012 __ mov(result_reg, Operand::Zero()); |
| 5014 __ jmp(&done); | 5013 __ jmp(&done); |
| 5015 | 5014 |
| 5016 // Heap number | 5015 // Heap number |
| 5017 __ bind(&heap_number); | 5016 __ bind(&heap_number); |
| 5018 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5017 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5019 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5018 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
| 5020 __ jmp(&done); | 5019 __ jmp(&done); |
| 5021 | 5020 |
| 5022 // smi | 5021 // smi |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5435 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5434 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5436 __ jmp(&done); | 5435 __ jmp(&done); |
| 5437 | 5436 |
| 5438 __ bind(&load_cache); | 5437 __ bind(&load_cache); |
| 5439 __ LoadInstanceDescriptors(map, result); | 5438 __ LoadInstanceDescriptors(map, result); |
| 5440 __ ldr(result, | 5439 __ ldr(result, |
| 5441 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5440 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5442 __ ldr(result, | 5441 __ ldr(result, |
| 5443 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5442 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5444 __ cmp(result, Operand::Zero()); | 5443 __ cmp(result, Operand::Zero()); |
| 5445 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); | 5444 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); |
| 5446 | 5445 |
| 5447 __ bind(&done); | 5446 __ bind(&done); |
| 5448 } | 5447 } |
| 5449 | 5448 |
| 5450 | 5449 |
| 5451 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5450 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5452 Register object = ToRegister(instr->value()); | 5451 Register object = ToRegister(instr->value()); |
| 5453 Register map = ToRegister(instr->map()); | 5452 Register map = ToRegister(instr->map()); |
| 5454 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5453 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5455 __ cmp(map, scratch0()); | 5454 __ cmp(map, scratch0()); |
| 5456 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5455 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); |
| 5457 } | 5456 } |
| 5458 | 5457 |
| 5459 | 5458 |
| 5460 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5459 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5461 Register result, | 5460 Register result, |
| 5462 Register object, | 5461 Register object, |
| 5463 Register index) { | 5462 Register index) { |
| 5464 PushSafepointRegistersScope scope(this); | 5463 PushSafepointRegistersScope scope(this); |
| 5465 __ Push(object); | 5464 __ Push(object); |
| 5466 __ Push(index); | 5465 __ Push(index); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5529 __ ldr(result, FieldMemOperand(scratch, | 5528 __ ldr(result, FieldMemOperand(scratch, |
| 5530 FixedArray::kHeaderSize - kPointerSize)); | 5529 FixedArray::kHeaderSize - kPointerSize)); |
| 5531 __ bind(deferred->exit()); | 5530 __ bind(deferred->exit()); |
| 5532 __ bind(&done); | 5531 __ bind(&done); |
| 5533 } | 5532 } |
| 5534 | 5533 |
| 5535 #undef __ | 5534 #undef __ |
| 5536 | 5535 |
| 5537 } // namespace internal | 5536 } // namespace internal |
| 5538 } // namespace v8 | 5537 } // namespace v8 |
| OLD | NEW |