| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/crankshaft/mips64/lithium-codegen-mips64.h" | 5 #include "src/crankshaft/mips64/lithium-codegen-mips64.h" |
| 6 | 6 |
| 7 #include "src/code-factory.h" | 7 #include "src/code-factory.h" |
| 8 #include "src/code-stubs.h" | 8 #include "src/code-stubs.h" |
| 9 #include "src/crankshaft/hydrogen-osr.h" | 9 #include "src/crankshaft/hydrogen-osr.h" |
| 10 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h" | 10 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h" |
| (...skipping 714 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 725 WriteTranslation(environment, &translation); | 725 WriteTranslation(environment, &translation); |
| 726 int deoptimization_index = deoptimizations_.length(); | 726 int deoptimization_index = deoptimizations_.length(); |
| 727 int pc_offset = masm()->pc_offset(); | 727 int pc_offset = masm()->pc_offset(); |
| 728 environment->Register(deoptimization_index, | 728 environment->Register(deoptimization_index, |
| 729 translation.index(), | 729 translation.index(), |
| 730 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 730 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 731 deoptimizations_.Add(environment, zone()); | 731 deoptimizations_.Add(environment, zone()); |
| 732 } | 732 } |
| 733 } | 733 } |
| 734 | 734 |
| 735 | |
| 736 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 735 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 737 Deoptimizer::DeoptReason deopt_reason, | 736 DeoptimizeReason deopt_reason, |
| 738 Deoptimizer::BailoutType bailout_type, | 737 Deoptimizer::BailoutType bailout_type, |
| 739 Register src1, const Operand& src2) { | 738 Register src1, const Operand& src2) { |
| 740 LEnvironment* environment = instr->environment(); | 739 LEnvironment* environment = instr->environment(); |
| 741 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 740 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 742 DCHECK(environment->HasBeenRegistered()); | 741 DCHECK(environment->HasBeenRegistered()); |
| 743 int id = environment->deoptimization_index(); | 742 int id = environment->deoptimization_index(); |
| 744 Address entry = | 743 Address entry = |
| 745 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 744 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 746 if (entry == NULL) { | 745 if (entry == NULL) { |
| 747 Abort(kBailoutWasNotPrepared); | 746 Abort(kBailoutWasNotPrepared); |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 793 // jump entry if this is the case. | 792 // jump entry if this is the case. |
| 794 if (FLAG_trace_deopt || isolate()->is_profiling() || | 793 if (FLAG_trace_deopt || isolate()->is_profiling() || |
| 795 jump_table_.is_empty() || | 794 jump_table_.is_empty() || |
| 796 !table_entry->IsEquivalentTo(*jump_table_.last())) { | 795 !table_entry->IsEquivalentTo(*jump_table_.last())) { |
| 797 jump_table_.Add(table_entry, zone()); | 796 jump_table_.Add(table_entry, zone()); |
| 798 } | 797 } |
| 799 __ Branch(&jump_table_.last()->label, condition, src1, src2); | 798 __ Branch(&jump_table_.last()->label, condition, src1, src2); |
| 800 } | 799 } |
| 801 } | 800 } |
| 802 | 801 |
| 803 | |
| 804 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 802 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 805 Deoptimizer::DeoptReason deopt_reason, | 803 DeoptimizeReason deopt_reason, Register src1, |
| 806 Register src1, const Operand& src2) { | 804 const Operand& src2) { |
| 807 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 805 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 808 ? Deoptimizer::LAZY | 806 ? Deoptimizer::LAZY |
| 809 : Deoptimizer::EAGER; | 807 : Deoptimizer::EAGER; |
| 810 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); | 808 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); |
| 811 } | 809 } |
| 812 | 810 |
| 813 | 811 |
| 814 void LCodeGen::RecordSafepointWithLazyDeopt( | 812 void LCodeGen::RecordSafepointWithLazyDeopt( |
| 815 LInstruction* instr, SafepointMode safepoint_mode) { | 813 LInstruction* instr, SafepointMode safepoint_mode) { |
| 816 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | 814 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| (...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 928 HMod* hmod = instr->hydrogen(); | 926 HMod* hmod = instr->hydrogen(); |
| 929 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 927 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 930 Label dividend_is_not_negative, done; | 928 Label dividend_is_not_negative, done; |
| 931 | 929 |
| 932 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 930 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 933 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); | 931 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg)); |
| 934 // Note: The code below even works when right contains kMinInt. | 932 // Note: The code below even works when right contains kMinInt. |
| 935 __ dsubu(dividend, zero_reg, dividend); | 933 __ dsubu(dividend, zero_reg, dividend); |
| 936 __ And(dividend, dividend, Operand(mask)); | 934 __ And(dividend, dividend, Operand(mask)); |
| 937 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 935 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 938 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 936 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, |
| 939 Operand(zero_reg)); | 937 Operand(zero_reg)); |
| 940 } | 938 } |
| 941 __ Branch(USE_DELAY_SLOT, &done); | 939 __ Branch(USE_DELAY_SLOT, &done); |
| 942 __ dsubu(dividend, zero_reg, dividend); | 940 __ dsubu(dividend, zero_reg, dividend); |
| 943 } | 941 } |
| 944 | 942 |
| 945 __ bind(÷nd_is_not_negative); | 943 __ bind(÷nd_is_not_negative); |
| 946 __ And(dividend, dividend, Operand(mask)); | 944 __ And(dividend, dividend, Operand(mask)); |
| 947 __ bind(&done); | 945 __ bind(&done); |
| 948 } | 946 } |
| 949 | 947 |
| 950 | 948 |
| 951 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 949 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 952 Register dividend = ToRegister(instr->dividend()); | 950 Register dividend = ToRegister(instr->dividend()); |
| 953 int32_t divisor = instr->divisor(); | 951 int32_t divisor = instr->divisor(); |
| 954 Register result = ToRegister(instr->result()); | 952 Register result = ToRegister(instr->result()); |
| 955 DCHECK(!dividend.is(result)); | 953 DCHECK(!dividend.is(result)); |
| 956 | 954 |
| 957 if (divisor == 0) { | 955 if (divisor == 0) { |
| 958 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 956 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 959 return; | 957 return; |
| 960 } | 958 } |
| 961 | 959 |
| 962 __ TruncatingDiv(result, dividend, Abs(divisor)); | 960 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 963 __ Dmul(result, result, Operand(Abs(divisor))); | 961 __ Dmul(result, result, Operand(Abs(divisor))); |
| 964 __ Dsubu(result, dividend, Operand(result)); | 962 __ Dsubu(result, dividend, Operand(result)); |
| 965 | 963 |
| 966 // Check for negative zero. | 964 // Check for negative zero. |
| 967 HMod* hmod = instr->hydrogen(); | 965 HMod* hmod = instr->hydrogen(); |
| 968 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 966 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 969 Label remainder_not_zero; | 967 Label remainder_not_zero; |
| 970 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); | 968 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); |
| 971 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend, | 969 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend, |
| 972 Operand(zero_reg)); | 970 Operand(zero_reg)); |
| 973 __ bind(&remainder_not_zero); | 971 __ bind(&remainder_not_zero); |
| 974 } | 972 } |
| 975 } | 973 } |
| 976 | 974 |
| 977 | 975 |
| 978 void LCodeGen::DoModI(LModI* instr) { | 976 void LCodeGen::DoModI(LModI* instr) { |
| 979 HMod* hmod = instr->hydrogen(); | 977 HMod* hmod = instr->hydrogen(); |
| 980 const Register left_reg = ToRegister(instr->left()); | 978 const Register left_reg = ToRegister(instr->left()); |
| 981 const Register right_reg = ToRegister(instr->right()); | 979 const Register right_reg = ToRegister(instr->right()); |
| 982 const Register result_reg = ToRegister(instr->result()); | 980 const Register result_reg = ToRegister(instr->result()); |
| 983 | 981 |
| 984 // div runs in the background while we check for special cases. | 982 // div runs in the background while we check for special cases. |
| 985 __ Dmod(result_reg, left_reg, right_reg); | 983 __ Dmod(result_reg, left_reg, right_reg); |
| 986 | 984 |
| 987 Label done; | 985 Label done; |
| 988 // Check for x % 0, we have to deopt in this case because we can't return a | 986 // Check for x % 0, we have to deopt in this case because we can't return a |
| 989 // NaN. | 987 // NaN. |
| 990 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 988 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 991 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg, | 989 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg, |
| 992 Operand(zero_reg)); | 990 Operand(zero_reg)); |
| 993 } | 991 } |
| 994 | 992 |
| 995 // Check for kMinInt % -1, div will return kMinInt, which is not what we | 993 // Check for kMinInt % -1, div will return kMinInt, which is not what we |
| 996 // want. We have to deopt if we care about -0, because we can't return that. | 994 // want. We have to deopt if we care about -0, because we can't return that. |
| 997 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 995 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 998 Label no_overflow_possible; | 996 Label no_overflow_possible; |
| 999 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); | 997 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); |
| 1000 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 998 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1001 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1)); | 999 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg, |
| 1000 Operand(-1)); |
| 1002 } else { | 1001 } else { |
| 1003 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); | 1002 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); |
| 1004 __ Branch(USE_DELAY_SLOT, &done); | 1003 __ Branch(USE_DELAY_SLOT, &done); |
| 1005 __ mov(result_reg, zero_reg); | 1004 __ mov(result_reg, zero_reg); |
| 1006 } | 1005 } |
| 1007 __ bind(&no_overflow_possible); | 1006 __ bind(&no_overflow_possible); |
| 1008 } | 1007 } |
| 1009 | 1008 |
| 1010 // If we care about -0, test if the dividend is <0 and the result is 0. | 1009 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1011 __ Branch(&done, ge, left_reg, Operand(zero_reg)); | 1010 __ Branch(&done, ge, left_reg, Operand(zero_reg)); |
| 1012 | 1011 |
| 1013 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1012 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1014 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg, | 1013 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg, |
| 1015 Operand(zero_reg)); | 1014 Operand(zero_reg)); |
| 1016 } | 1015 } |
| 1017 __ bind(&done); | 1016 __ bind(&done); |
| 1018 } | 1017 } |
| 1019 | 1018 |
| 1020 | 1019 |
| 1021 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1020 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1022 Register dividend = ToRegister(instr->dividend()); | 1021 Register dividend = ToRegister(instr->dividend()); |
| 1023 int32_t divisor = instr->divisor(); | 1022 int32_t divisor = instr->divisor(); |
| 1024 Register result = ToRegister(instr->result()); | 1023 Register result = ToRegister(instr->result()); |
| 1025 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1024 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1026 DCHECK(!result.is(dividend)); | 1025 DCHECK(!result.is(dividend)); |
| 1027 | 1026 |
| 1028 // Check for (0 / -x) that will produce negative zero. | 1027 // Check for (0 / -x) that will produce negative zero. |
| 1029 HDiv* hdiv = instr->hydrogen(); | 1028 HDiv* hdiv = instr->hydrogen(); |
| 1030 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1029 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1031 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1030 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, |
| 1032 Operand(zero_reg)); | 1031 Operand(zero_reg)); |
| 1033 } | 1032 } |
| 1034 // Check for (kMinInt / -1). | 1033 // Check for (kMinInt / -1). |
| 1035 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1034 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1036 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt)); | 1035 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend, |
| 1036 Operand(kMinInt)); |
| 1037 } | 1037 } |
| 1038 // Deoptimize if remainder will not be 0. | 1038 // Deoptimize if remainder will not be 0. |
| 1039 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1039 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1040 divisor != 1 && divisor != -1) { | 1040 divisor != 1 && divisor != -1) { |
| 1041 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1041 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1042 __ And(at, dividend, Operand(mask)); | 1042 __ And(at, dividend, Operand(mask)); |
| 1043 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg)); | 1043 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at, |
| 1044 Operand(zero_reg)); |
| 1044 } | 1045 } |
| 1045 | 1046 |
| 1046 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1047 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1047 __ Dsubu(result, zero_reg, dividend); | 1048 __ Dsubu(result, zero_reg, dividend); |
| 1048 return; | 1049 return; |
| 1049 } | 1050 } |
| 1050 uint16_t shift = WhichPowerOf2Abs(divisor); | 1051 uint16_t shift = WhichPowerOf2Abs(divisor); |
| 1051 if (shift == 0) { | 1052 if (shift == 0) { |
| 1052 __ Move(result, dividend); | 1053 __ Move(result, dividend); |
| 1053 } else if (shift == 1) { | 1054 } else if (shift == 1) { |
| 1054 __ dsrl32(result, dividend, 31); | 1055 __ dsrl32(result, dividend, 31); |
| 1055 __ Daddu(result, dividend, Operand(result)); | 1056 __ Daddu(result, dividend, Operand(result)); |
| 1056 } else { | 1057 } else { |
| 1057 __ dsra32(result, dividend, 31); | 1058 __ dsra32(result, dividend, 31); |
| 1058 __ dsrl32(result, result, 32 - shift); | 1059 __ dsrl32(result, result, 32 - shift); |
| 1059 __ Daddu(result, dividend, Operand(result)); | 1060 __ Daddu(result, dividend, Operand(result)); |
| 1060 } | 1061 } |
| 1061 if (shift > 0) __ dsra(result, result, shift); | 1062 if (shift > 0) __ dsra(result, result, shift); |
| 1062 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1063 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1063 } | 1064 } |
| 1064 | 1065 |
| 1065 | 1066 |
| 1066 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1067 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1067 Register dividend = ToRegister(instr->dividend()); | 1068 Register dividend = ToRegister(instr->dividend()); |
| 1068 int32_t divisor = instr->divisor(); | 1069 int32_t divisor = instr->divisor(); |
| 1069 Register result = ToRegister(instr->result()); | 1070 Register result = ToRegister(instr->result()); |
| 1070 DCHECK(!dividend.is(result)); | 1071 DCHECK(!dividend.is(result)); |
| 1071 | 1072 |
| 1072 if (divisor == 0) { | 1073 if (divisor == 0) { |
| 1073 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1074 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1074 return; | 1075 return; |
| 1075 } | 1076 } |
| 1076 | 1077 |
| 1077 // Check for (0 / -x) that will produce negative zero. | 1078 // Check for (0 / -x) that will produce negative zero. |
| 1078 HDiv* hdiv = instr->hydrogen(); | 1079 HDiv* hdiv = instr->hydrogen(); |
| 1079 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1080 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1080 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1081 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, |
| 1081 Operand(zero_reg)); | 1082 Operand(zero_reg)); |
| 1082 } | 1083 } |
| 1083 | 1084 |
| 1084 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1085 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1085 if (divisor < 0) __ Subu(result, zero_reg, result); | 1086 if (divisor < 0) __ Subu(result, zero_reg, result); |
| 1086 | 1087 |
| 1087 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1088 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1088 __ Dmul(scratch0(), result, Operand(divisor)); | 1089 __ Dmul(scratch0(), result, Operand(divisor)); |
| 1089 __ Dsubu(scratch0(), scratch0(), dividend); | 1090 __ Dsubu(scratch0(), scratch0(), dividend); |
| 1090 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(), | 1091 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(), |
| 1091 Operand(zero_reg)); | 1092 Operand(zero_reg)); |
| 1092 } | 1093 } |
| 1093 } | 1094 } |
| 1094 | 1095 |
| 1095 | 1096 |
| 1096 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1097 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1097 void LCodeGen::DoDivI(LDivI* instr) { | 1098 void LCodeGen::DoDivI(LDivI* instr) { |
| 1098 HBinaryOperation* hdiv = instr->hydrogen(); | 1099 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1099 Register dividend = ToRegister(instr->dividend()); | 1100 Register dividend = ToRegister(instr->dividend()); |
| 1100 Register divisor = ToRegister(instr->divisor()); | 1101 Register divisor = ToRegister(instr->divisor()); |
| 1101 const Register result = ToRegister(instr->result()); | 1102 const Register result = ToRegister(instr->result()); |
| 1102 | 1103 |
| 1103 // On MIPS div is asynchronous - it will run in the background while we | 1104 // On MIPS div is asynchronous - it will run in the background while we |
| 1104 // check for special cases. | 1105 // check for special cases. |
| 1105 __ Div(result, dividend, divisor); | 1106 __ Div(result, dividend, divisor); |
| 1106 | 1107 |
| 1107 // Check for x / 0. | 1108 // Check for x / 0. |
| 1108 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1109 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1109 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1110 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, |
| 1110 Operand(zero_reg)); | 1111 Operand(zero_reg)); |
| 1111 } | 1112 } |
| 1112 | 1113 |
| 1113 // Check for (0 / -x) that will produce negative zero. | 1114 // Check for (0 / -x) that will produce negative zero. |
| 1114 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1115 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1115 Label left_not_zero; | 1116 Label left_not_zero; |
| 1116 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1117 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1117 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1118 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, |
| 1118 Operand(zero_reg)); | 1119 Operand(zero_reg)); |
| 1119 __ bind(&left_not_zero); | 1120 __ bind(&left_not_zero); |
| 1120 } | 1121 } |
| 1121 | 1122 |
| 1122 // Check for (kMinInt / -1). | 1123 // Check for (kMinInt / -1). |
| 1123 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1124 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1124 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1125 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1125 Label left_not_min_int; | 1126 Label left_not_min_int; |
| 1126 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1127 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1127 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1128 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); |
| 1128 __ bind(&left_not_min_int); | 1129 __ bind(&left_not_min_int); |
| 1129 } | 1130 } |
| 1130 | 1131 |
| 1131 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1132 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1132 // Calculate remainder. | 1133 // Calculate remainder. |
| 1133 Register remainder = ToRegister(instr->temp()); | 1134 Register remainder = ToRegister(instr->temp()); |
| 1134 if (kArchVariant != kMips64r6) { | 1135 if (kArchVariant != kMips64r6) { |
| 1135 __ mfhi(remainder); | 1136 __ mfhi(remainder); |
| 1136 } else { | 1137 } else { |
| 1137 __ dmod(remainder, dividend, divisor); | 1138 __ dmod(remainder, dividend, divisor); |
| 1138 } | 1139 } |
| 1139 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder, | 1140 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder, |
| 1140 Operand(zero_reg)); | 1141 Operand(zero_reg)); |
| 1141 } | 1142 } |
| 1142 } | 1143 } |
| 1143 | 1144 |
| 1144 | 1145 |
| 1145 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1146 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1146 DoubleRegister addend = ToDoubleRegister(instr->addend()); | 1147 DoubleRegister addend = ToDoubleRegister(instr->addend()); |
| 1147 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1148 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1148 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1149 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1149 | 1150 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1175 return; | 1176 return; |
| 1176 } | 1177 } |
| 1177 | 1178 |
| 1178 // If the divisor is negative, we have to negate and handle edge cases. | 1179 // If the divisor is negative, we have to negate and handle edge cases. |
| 1179 // Dividend can be the same register as result so save the value of it | 1180 // Dividend can be the same register as result so save the value of it |
| 1180 // for checking overflow. | 1181 // for checking overflow. |
| 1181 __ Move(scratch, dividend); | 1182 __ Move(scratch, dividend); |
| 1182 | 1183 |
| 1183 __ Dsubu(result, zero_reg, dividend); | 1184 __ Dsubu(result, zero_reg, dividend); |
| 1184 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1185 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1185 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 1186 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, |
| 1187 Operand(zero_reg)); |
| 1186 } | 1188 } |
| 1187 | 1189 |
| 1188 __ Xor(scratch, scratch, result); | 1190 __ Xor(scratch, scratch, result); |
| 1189 // Dividing by -1 is basically negation, unless we overflow. | 1191 // Dividing by -1 is basically negation, unless we overflow. |
| 1190 if (divisor == -1) { | 1192 if (divisor == -1) { |
| 1191 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1193 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1192 DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt)); | 1194 DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result, |
| 1195 Operand(kMaxInt)); |
| 1193 } | 1196 } |
| 1194 return; | 1197 return; |
| 1195 } | 1198 } |
| 1196 | 1199 |
| 1197 // If the negation could not overflow, simply shifting is OK. | 1200 // If the negation could not overflow, simply shifting is OK. |
| 1198 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1201 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1199 __ dsra(result, result, shift); | 1202 __ dsra(result, result, shift); |
| 1200 return; | 1203 return; |
| 1201 } | 1204 } |
| 1202 | 1205 |
| 1203 Label no_overflow, done; | 1206 Label no_overflow, done; |
| 1204 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); | 1207 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); |
| 1205 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); | 1208 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE); |
| 1206 __ Branch(&done); | 1209 __ Branch(&done); |
| 1207 __ bind(&no_overflow); | 1210 __ bind(&no_overflow); |
| 1208 __ dsra(result, result, shift); | 1211 __ dsra(result, result, shift); |
| 1209 __ bind(&done); | 1212 __ bind(&done); |
| 1210 } | 1213 } |
| 1211 | 1214 |
| 1212 | 1215 |
| 1213 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1216 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1214 Register dividend = ToRegister(instr->dividend()); | 1217 Register dividend = ToRegister(instr->dividend()); |
| 1215 int32_t divisor = instr->divisor(); | 1218 int32_t divisor = instr->divisor(); |
| 1216 Register result = ToRegister(instr->result()); | 1219 Register result = ToRegister(instr->result()); |
| 1217 DCHECK(!dividend.is(result)); | 1220 DCHECK(!dividend.is(result)); |
| 1218 | 1221 |
| 1219 if (divisor == 0) { | 1222 if (divisor == 0) { |
| 1220 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1223 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); |
| 1221 return; | 1224 return; |
| 1222 } | 1225 } |
| 1223 | 1226 |
| 1224 // Check for (0 / -x) that will produce negative zero. | 1227 // Check for (0 / -x) that will produce negative zero. |
| 1225 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1228 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1226 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1229 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1227 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, | 1230 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend, |
| 1228 Operand(zero_reg)); | 1231 Operand(zero_reg)); |
| 1229 } | 1232 } |
| 1230 | 1233 |
| 1231 // Easy case: We need no dynamic check for the dividend and the flooring | 1234 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1232 // division is the same as the truncating division. | 1235 // division is the same as the truncating division. |
| 1233 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1236 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1234 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1237 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1235 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1238 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1236 if (divisor < 0) __ Dsubu(result, zero_reg, result); | 1239 if (divisor < 0) __ Dsubu(result, zero_reg, result); |
| 1237 return; | 1240 return; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1262 Register dividend = ToRegister(instr->dividend()); | 1265 Register dividend = ToRegister(instr->dividend()); |
| 1263 Register divisor = ToRegister(instr->divisor()); | 1266 Register divisor = ToRegister(instr->divisor()); |
| 1264 const Register result = ToRegister(instr->result()); | 1267 const Register result = ToRegister(instr->result()); |
| 1265 | 1268 |
| 1266 // On MIPS div is asynchronous - it will run in the background while we | 1269 // On MIPS div is asynchronous - it will run in the background while we |
| 1267 // check for special cases. | 1270 // check for special cases. |
| 1268 __ Ddiv(result, dividend, divisor); | 1271 __ Ddiv(result, dividend, divisor); |
| 1269 | 1272 |
| 1270 // Check for x / 0. | 1273 // Check for x / 0. |
| 1271 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1274 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1272 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, | 1275 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor, |
| 1273 Operand(zero_reg)); | 1276 Operand(zero_reg)); |
| 1274 } | 1277 } |
| 1275 | 1278 |
| 1276 // Check for (0 / -x) that will produce negative zero. | 1279 // Check for (0 / -x) that will produce negative zero. |
| 1277 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1280 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1278 Label left_not_zero; | 1281 Label left_not_zero; |
| 1279 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); | 1282 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); |
| 1280 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, | 1283 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor, |
| 1281 Operand(zero_reg)); | 1284 Operand(zero_reg)); |
| 1282 __ bind(&left_not_zero); | 1285 __ bind(&left_not_zero); |
| 1283 } | 1286 } |
| 1284 | 1287 |
| 1285 // Check for (kMinInt / -1). | 1288 // Check for (kMinInt / -1). |
| 1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1289 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1287 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1290 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1288 Label left_not_min_int; | 1291 Label left_not_min_int; |
| 1289 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); | 1292 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); |
| 1290 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); | 1293 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1)); |
| 1291 __ bind(&left_not_min_int); | 1294 __ bind(&left_not_min_int); |
| 1292 } | 1295 } |
| 1293 | 1296 |
| 1294 // We performed a truncating division. Correct the result if necessary. | 1297 // We performed a truncating division. Correct the result if necessary. |
| 1295 Label done; | 1298 Label done; |
| 1296 Register remainder = scratch0(); | 1299 Register remainder = scratch0(); |
| 1297 if (kArchVariant != kMips64r6) { | 1300 if (kArchVariant != kMips64r6) { |
| 1298 __ mfhi(remainder); | 1301 __ mfhi(remainder); |
| 1299 } else { | 1302 } else { |
| 1300 __ dmod(remainder, dividend, divisor); | 1303 __ dmod(remainder, dividend, divisor); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1317 bool bailout_on_minus_zero = | 1320 bool bailout_on_minus_zero = |
| 1318 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1321 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1319 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1322 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1320 | 1323 |
| 1321 if (right_op->IsConstantOperand()) { | 1324 if (right_op->IsConstantOperand()) { |
| 1322 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1325 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1323 | 1326 |
| 1324 if (bailout_on_minus_zero && (constant < 0)) { | 1327 if (bailout_on_minus_zero && (constant < 0)) { |
| 1325 // The case of a null constant will be handled separately. | 1328 // The case of a null constant will be handled separately. |
| 1326 // If constant is negative and left is null, the result should be -0. | 1329 // If constant is negative and left is null, the result should be -0. |
| 1327 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | 1330 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left, |
| 1331 Operand(zero_reg)); |
| 1328 } | 1332 } |
| 1329 | 1333 |
| 1330 switch (constant) { | 1334 switch (constant) { |
| 1331 case -1: | 1335 case -1: |
| 1332 if (overflow) { | 1336 if (overflow) { |
| 1333 Label no_overflow; | 1337 Label no_overflow; |
| 1334 __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); | 1338 __ DsubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); |
| 1335 DeoptimizeIf(al, instr); | 1339 DeoptimizeIf(al, instr); |
| 1336 __ bind(&no_overflow); | 1340 __ bind(&no_overflow); |
| 1337 } else { | 1341 } else { |
| 1338 __ Dsubu(result, zero_reg, left); | 1342 __ Dsubu(result, zero_reg, left); |
| 1339 } | 1343 } |
| 1340 break; | 1344 break; |
| 1341 case 0: | 1345 case 0: |
| 1342 if (bailout_on_minus_zero) { | 1346 if (bailout_on_minus_zero) { |
| 1343 // If left is strictly negative and the constant is null, the | 1347 // If left is strictly negative and the constant is null, the |
| 1344 // result is -0. Deoptimize if required, otherwise return 0. | 1348 // result is -0. Deoptimize if required, otherwise return 0. |
| 1345 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | 1349 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left, |
| 1346 Operand(zero_reg)); | 1350 Operand(zero_reg)); |
| 1347 } | 1351 } |
| 1348 __ mov(result, zero_reg); | 1352 __ mov(result, zero_reg); |
| 1349 break; | 1353 break; |
| 1350 case 1: | 1354 case 1: |
| 1351 // Nothing to do. | 1355 // Nothing to do. |
| 1352 __ Move(result, left); | 1356 __ Move(result, left); |
| 1353 break; | 1357 break; |
| 1354 default: | 1358 default: |
| 1355 // Multiplying by powers of two and powers of two plus or minus | 1359 // Multiplying by powers of two and powers of two plus or minus |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1383 } else { | 1387 } else { |
| 1384 DCHECK(right_op->IsRegister()); | 1388 DCHECK(right_op->IsRegister()); |
| 1385 Register right = ToRegister(right_op); | 1389 Register right = ToRegister(right_op); |
| 1386 | 1390 |
| 1387 if (overflow) { | 1391 if (overflow) { |
| 1388 // hi:lo = left * right. | 1392 // hi:lo = left * right. |
| 1389 __ Dmulh(result, left, right); | 1393 __ Dmulh(result, left, right); |
| 1390 __ dsra32(scratch, result, 0); | 1394 __ dsra32(scratch, result, 0); |
| 1391 __ sra(at, result, 31); | 1395 __ sra(at, result, 31); |
| 1392 __ SmiTag(result); | 1396 __ SmiTag(result); |
| 1393 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | 1397 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch, |
| 1398 Operand(at)); |
| 1394 } else { | 1399 } else { |
| 1395 __ SmiUntag(result, left); | 1400 __ SmiUntag(result, left); |
| 1396 __ dmul(result, result, right); | 1401 __ dmul(result, result, right); |
| 1397 } | 1402 } |
| 1398 | 1403 |
| 1399 if (bailout_on_minus_zero) { | 1404 if (bailout_on_minus_zero) { |
| 1400 Label done; | 1405 Label done; |
| 1401 __ Xor(at, left, right); | 1406 __ Xor(at, left, right); |
| 1402 __ Branch(&done, ge, at, Operand(zero_reg)); | 1407 __ Branch(&done, ge, at, Operand(zero_reg)); |
| 1403 // Bail out if the result is minus zero. | 1408 // Bail out if the result is minus zero. |
| 1404 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | 1409 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, |
| 1405 Operand(zero_reg)); | 1410 Operand(zero_reg)); |
| 1406 __ bind(&done); | 1411 __ bind(&done); |
| 1407 } | 1412 } |
| 1408 } | 1413 } |
| 1409 } | 1414 } |
| 1410 | 1415 |
| 1411 | 1416 |
| 1412 void LCodeGen::DoMulI(LMulI* instr) { | 1417 void LCodeGen::DoMulI(LMulI* instr) { |
| 1413 Register scratch = scratch0(); | 1418 Register scratch = scratch0(); |
| 1414 Register result = ToRegister(instr->result()); | 1419 Register result = ToRegister(instr->result()); |
| 1415 // Note that result may alias left. | 1420 // Note that result may alias left. |
| 1416 Register left = ToRegister(instr->left()); | 1421 Register left = ToRegister(instr->left()); |
| 1417 LOperand* right_op = instr->right(); | 1422 LOperand* right_op = instr->right(); |
| 1418 | 1423 |
| 1419 bool bailout_on_minus_zero = | 1424 bool bailout_on_minus_zero = |
| 1420 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1425 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1421 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1426 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1422 | 1427 |
| 1423 if (right_op->IsConstantOperand()) { | 1428 if (right_op->IsConstantOperand()) { |
| 1424 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1429 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1425 | 1430 |
| 1426 if (bailout_on_minus_zero && (constant < 0)) { | 1431 if (bailout_on_minus_zero && (constant < 0)) { |
| 1427 // The case of a null constant will be handled separately. | 1432 // The case of a null constant will be handled separately. |
| 1428 // If constant is negative and left is null, the result should be -0. | 1433 // If constant is negative and left is null, the result should be -0. |
| 1429 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); | 1434 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left, |
| 1435 Operand(zero_reg)); |
| 1430 } | 1436 } |
| 1431 | 1437 |
| 1432 switch (constant) { | 1438 switch (constant) { |
| 1433 case -1: | 1439 case -1: |
| 1434 if (overflow) { | 1440 if (overflow) { |
| 1435 Label no_overflow; | 1441 Label no_overflow; |
| 1436 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); | 1442 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); |
| 1437 DeoptimizeIf(al, instr); | 1443 DeoptimizeIf(al, instr); |
| 1438 __ bind(&no_overflow); | 1444 __ bind(&no_overflow); |
| 1439 } else { | 1445 } else { |
| 1440 __ Subu(result, zero_reg, left); | 1446 __ Subu(result, zero_reg, left); |
| 1441 } | 1447 } |
| 1442 break; | 1448 break; |
| 1443 case 0: | 1449 case 0: |
| 1444 if (bailout_on_minus_zero) { | 1450 if (bailout_on_minus_zero) { |
| 1445 // If left is strictly negative and the constant is null, the | 1451 // If left is strictly negative and the constant is null, the |
| 1446 // result is -0. Deoptimize if required, otherwise return 0. | 1452 // result is -0. Deoptimize if required, otherwise return 0. |
| 1447 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, | 1453 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left, |
| 1448 Operand(zero_reg)); | 1454 Operand(zero_reg)); |
| 1449 } | 1455 } |
| 1450 __ mov(result, zero_reg); | 1456 __ mov(result, zero_reg); |
| 1451 break; | 1457 break; |
| 1452 case 1: | 1458 case 1: |
| 1453 // Nothing to do. | 1459 // Nothing to do. |
| 1454 __ Move(result, left); | 1460 __ Move(result, left); |
| 1455 break; | 1461 break; |
| 1456 default: | 1462 default: |
| 1457 // Multiplying by powers of two and powers of two plus or minus | 1463 // Multiplying by powers of two and powers of two plus or minus |
| (...skipping 28 matching lines...) Expand all Loading... |
| 1486 } else { | 1492 } else { |
| 1487 DCHECK(right_op->IsRegister()); | 1493 DCHECK(right_op->IsRegister()); |
| 1488 Register right = ToRegister(right_op); | 1494 Register right = ToRegister(right_op); |
| 1489 | 1495 |
| 1490 if (overflow) { | 1496 if (overflow) { |
| 1491 // hi:lo = left * right. | 1497 // hi:lo = left * right. |
| 1492 __ Dmul(result, left, right); | 1498 __ Dmul(result, left, right); |
| 1493 __ dsra32(scratch, result, 0); | 1499 __ dsra32(scratch, result, 0); |
| 1494 __ sra(at, result, 31); | 1500 __ sra(at, result, 31); |
| 1495 | 1501 |
| 1496 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); | 1502 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch, |
| 1503 Operand(at)); |
| 1497 } else { | 1504 } else { |
| 1498 __ mul(result, left, right); | 1505 __ mul(result, left, right); |
| 1499 } | 1506 } |
| 1500 | 1507 |
| 1501 if (bailout_on_minus_zero) { | 1508 if (bailout_on_minus_zero) { |
| 1502 Label done; | 1509 Label done; |
| 1503 __ Xor(at, left, right); | 1510 __ Xor(at, left, right); |
| 1504 __ Branch(&done, ge, at, Operand(zero_reg)); | 1511 __ Branch(&done, ge, at, Operand(zero_reg)); |
| 1505 // Bail out if the result is minus zero. | 1512 // Bail out if the result is minus zero. |
| 1506 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, | 1513 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result, |
| 1507 Operand(zero_reg)); | 1514 Operand(zero_reg)); |
| 1508 __ bind(&done); | 1515 __ bind(&done); |
| 1509 } | 1516 } |
| 1510 } | 1517 } |
| 1511 } | 1518 } |
| 1512 | 1519 |
| 1513 | 1520 |
| 1514 void LCodeGen::DoBitI(LBitI* instr) { | 1521 void LCodeGen::DoBitI(LBitI* instr) { |
| 1515 LOperand* left_op = instr->left(); | 1522 LOperand* left_op = instr->left(); |
| 1516 LOperand* right_op = instr->right(); | 1523 LOperand* right_op = instr->right(); |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1561 case Token::ROR: | 1568 case Token::ROR: |
| 1562 __ Ror(result, left, Operand(ToRegister(right_op))); | 1569 __ Ror(result, left, Operand(ToRegister(right_op))); |
| 1563 break; | 1570 break; |
| 1564 case Token::SAR: | 1571 case Token::SAR: |
| 1565 __ srav(result, left, ToRegister(right_op)); | 1572 __ srav(result, left, ToRegister(right_op)); |
| 1566 break; | 1573 break; |
| 1567 case Token::SHR: | 1574 case Token::SHR: |
| 1568 __ srlv(result, left, ToRegister(right_op)); | 1575 __ srlv(result, left, ToRegister(right_op)); |
| 1569 if (instr->can_deopt()) { | 1576 if (instr->can_deopt()) { |
| 1570 // TODO(yy): (-1) >>> 0. anything else? | 1577 // TODO(yy): (-1) >>> 0. anything else? |
| 1571 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result, | 1578 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result, |
| 1572 Operand(zero_reg)); | 1579 Operand(zero_reg)); |
| 1573 DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result, | 1580 DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result, |
| 1574 Operand(kMaxInt)); | 1581 Operand(kMaxInt)); |
| 1575 } | 1582 } |
| 1576 break; | 1583 break; |
| 1577 case Token::SHL: | 1584 case Token::SHL: |
| 1578 __ sllv(result, left, ToRegister(right_op)); | 1585 __ sllv(result, left, ToRegister(right_op)); |
| 1579 break; | 1586 break; |
| 1580 default: | 1587 default: |
| 1581 UNREACHABLE(); | 1588 UNREACHABLE(); |
| 1582 break; | 1589 break; |
| 1583 } | 1590 } |
| 1584 } else { | 1591 } else { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1599 } else { | 1606 } else { |
| 1600 __ Move(result, left); | 1607 __ Move(result, left); |
| 1601 } | 1608 } |
| 1602 break; | 1609 break; |
| 1603 case Token::SHR: | 1610 case Token::SHR: |
| 1604 if (shift_count != 0) { | 1611 if (shift_count != 0) { |
| 1605 __ srl(result, left, shift_count); | 1612 __ srl(result, left, shift_count); |
| 1606 } else { | 1613 } else { |
| 1607 if (instr->can_deopt()) { | 1614 if (instr->can_deopt()) { |
| 1608 __ And(at, left, Operand(0x80000000)); | 1615 __ And(at, left, Operand(0x80000000)); |
| 1609 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at, | 1616 DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at, |
| 1610 Operand(zero_reg)); | 1617 Operand(zero_reg)); |
| 1611 } | 1618 } |
| 1612 __ Move(result, left); | 1619 __ Move(result, left); |
| 1613 } | 1620 } |
| 1614 break; | 1621 break; |
| 1615 case Token::SHL: | 1622 case Token::SHL: |
| 1616 if (shift_count != 0) { | 1623 if (shift_count != 0) { |
| 1617 if (instr->hydrogen_value()->representation().IsSmi()) { | 1624 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1618 __ dsll(result, left, shift_count); | 1625 __ dsll(result, left, shift_count); |
| 1619 } else { | 1626 } else { |
| (...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2071 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); | 2078 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); |
| 2072 } | 2079 } |
| 2073 | 2080 |
| 2074 if (expected.Contains(ToBooleanICStub::SMI)) { | 2081 if (expected.Contains(ToBooleanICStub::SMI)) { |
| 2075 // Smis: 0 -> false, all other -> true. | 2082 // Smis: 0 -> false, all other -> true. |
| 2076 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); | 2083 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); |
| 2077 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2084 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2078 } else if (expected.NeedsMap()) { | 2085 } else if (expected.NeedsMap()) { |
| 2079 // If we need a map later and have a Smi -> deopt. | 2086 // If we need a map later and have a Smi -> deopt. |
| 2080 __ SmiTst(reg, at); | 2087 __ SmiTst(reg, at); |
| 2081 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 2088 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); |
| 2082 } | 2089 } |
| 2083 | 2090 |
| 2084 const Register map = scratch0(); | 2091 const Register map = scratch0(); |
| 2085 if (expected.NeedsMap()) { | 2092 if (expected.NeedsMap()) { |
| 2086 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2093 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2087 if (expected.CanBeUndetectable()) { | 2094 if (expected.CanBeUndetectable()) { |
| 2088 // Undetectable -> false. | 2095 // Undetectable -> false. |
| 2089 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); | 2096 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2090 __ And(at, at, Operand(1 << Map::kIsUndetectable)); | 2097 __ And(at, at, Operand(1 << Map::kIsUndetectable)); |
| 2091 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); | 2098 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2135 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | 2142 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
| 2136 ne, dbl_scratch, kDoubleRegZero); | 2143 ne, dbl_scratch, kDoubleRegZero); |
| 2137 // Falls through if dbl_scratch == 0. | 2144 // Falls through if dbl_scratch == 0. |
| 2138 __ Branch(instr->FalseLabel(chunk_)); | 2145 __ Branch(instr->FalseLabel(chunk_)); |
| 2139 __ bind(¬_heap_number); | 2146 __ bind(¬_heap_number); |
| 2140 } | 2147 } |
| 2141 | 2148 |
| 2142 if (!expected.IsGeneric()) { | 2149 if (!expected.IsGeneric()) { |
| 2143 // We've seen something for the first time -> deopt. | 2150 // We've seen something for the first time -> deopt. |
| 2144 // This can only happen if we are not generic already. | 2151 // This can only happen if we are not generic already. |
| 2145 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg, | 2152 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg, |
| 2146 Operand(zero_reg)); | 2153 Operand(zero_reg)); |
| 2147 } | 2154 } |
| 2148 } | 2155 } |
| 2149 } | 2156 } |
| 2150 } | 2157 } |
| 2151 | 2158 |
| 2152 | 2159 |
| 2153 void LCodeGen::EmitGoto(int block) { | 2160 void LCodeGen::EmitGoto(int block) { |
| 2154 if (!IsNextEmittedBlock(block)) { | 2161 if (!IsNextEmittedBlock(block)) { |
| 2155 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2162 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2516 // Loop through the {object}s prototype chain looking for the {prototype}. | 2523 // Loop through the {object}s prototype chain looking for the {prototype}. |
| 2517 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 2524 __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 2518 Label loop; | 2525 Label loop; |
| 2519 __ bind(&loop); | 2526 __ bind(&loop); |
| 2520 | 2527 |
| 2521 // Deoptimize if the object needs to be access checked. | 2528 // Deoptimize if the object needs to be access checked. |
| 2522 __ lbu(object_instance_type, | 2529 __ lbu(object_instance_type, |
| 2523 FieldMemOperand(object_map, Map::kBitFieldOffset)); | 2530 FieldMemOperand(object_map, Map::kBitFieldOffset)); |
| 2524 __ And(object_instance_type, object_instance_type, | 2531 __ And(object_instance_type, object_instance_type, |
| 2525 Operand(1 << Map::kIsAccessCheckNeeded)); | 2532 Operand(1 << Map::kIsAccessCheckNeeded)); |
| 2526 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type, | 2533 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type, |
| 2527 Operand(zero_reg)); | 2534 Operand(zero_reg)); |
| 2528 __ lbu(object_instance_type, | 2535 __ lbu(object_instance_type, |
| 2529 FieldMemOperand(object_map, Map::kInstanceTypeOffset)); | 2536 FieldMemOperand(object_map, Map::kInstanceTypeOffset)); |
| 2530 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type, | 2537 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type, |
| 2531 Operand(JS_PROXY_TYPE)); | 2538 Operand(JS_PROXY_TYPE)); |
| 2532 | 2539 |
| 2533 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); | 2540 __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); |
| 2534 __ LoadRoot(at, Heap::kNullValueRootIndex); | 2541 __ LoadRoot(at, Heap::kNullValueRootIndex); |
| 2535 EmitFalseBranch(instr, eq, object_prototype, Operand(at)); | 2542 EmitFalseBranch(instr, eq, object_prototype, Operand(at)); |
| 2536 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); | 2543 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); |
| 2537 __ Branch(&loop, USE_DELAY_SLOT); | 2544 __ Branch(&loop, USE_DELAY_SLOT); |
| 2538 __ ld(object_map, FieldMemOperand(object_prototype, | 2545 __ ld(object_map, FieldMemOperand(object_prototype, |
| 2539 HeapObject::kMapOffset)); // In delay slot. | 2546 HeapObject::kMapOffset)); // In delay slot. |
| 2540 } | 2547 } |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2642 | 2649 |
| 2643 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2650 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2644 Register context = ToRegister(instr->context()); | 2651 Register context = ToRegister(instr->context()); |
| 2645 Register result = ToRegister(instr->result()); | 2652 Register result = ToRegister(instr->result()); |
| 2646 | 2653 |
| 2647 __ ld(result, ContextMemOperand(context, instr->slot_index())); | 2654 __ ld(result, ContextMemOperand(context, instr->slot_index())); |
| 2648 if (instr->hydrogen()->RequiresHoleCheck()) { | 2655 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2649 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2656 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2650 | 2657 |
| 2651 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2658 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2652 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2659 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); |
| 2653 } else { | 2660 } else { |
| 2654 Label is_not_hole; | 2661 Label is_not_hole; |
| 2655 __ Branch(&is_not_hole, ne, result, Operand(at)); | 2662 __ Branch(&is_not_hole, ne, result, Operand(at)); |
| 2656 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2663 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2657 __ bind(&is_not_hole); | 2664 __ bind(&is_not_hole); |
| 2658 } | 2665 } |
| 2659 } | 2666 } |
| 2660 } | 2667 } |
| 2661 | 2668 |
| 2662 | 2669 |
| 2663 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2670 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2664 Register context = ToRegister(instr->context()); | 2671 Register context = ToRegister(instr->context()); |
| 2665 Register value = ToRegister(instr->value()); | 2672 Register value = ToRegister(instr->value()); |
| 2666 Register scratch = scratch0(); | 2673 Register scratch = scratch0(); |
| 2667 MemOperand target = ContextMemOperand(context, instr->slot_index()); | 2674 MemOperand target = ContextMemOperand(context, instr->slot_index()); |
| 2668 | 2675 |
| 2669 Label skip_assignment; | 2676 Label skip_assignment; |
| 2670 | 2677 |
| 2671 if (instr->hydrogen()->RequiresHoleCheck()) { | 2678 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2672 __ ld(scratch, target); | 2679 __ ld(scratch, target); |
| 2673 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2680 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2674 | 2681 |
| 2675 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2682 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2676 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at)); | 2683 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at)); |
| 2677 } else { | 2684 } else { |
| 2678 __ Branch(&skip_assignment, ne, scratch, Operand(at)); | 2685 __ Branch(&skip_assignment, ne, scratch, Operand(at)); |
| 2679 } | 2686 } |
| 2680 } | 2687 } |
| 2681 | 2688 |
| 2682 __ sd(value, target); | 2689 __ sd(value, target); |
| 2683 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2690 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2684 SmiCheck check_needed = | 2691 SmiCheck check_needed = |
| 2685 instr->hydrogen()->value()->type().IsHeapObject() | 2692 instr->hydrogen()->value()->type().IsHeapObject() |
| 2686 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2693 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2758 Register scratch = scratch0(); | 2765 Register scratch = scratch0(); |
| 2759 Register function = ToRegister(instr->function()); | 2766 Register function = ToRegister(instr->function()); |
| 2760 Register result = ToRegister(instr->result()); | 2767 Register result = ToRegister(instr->result()); |
| 2761 | 2768 |
| 2762 // Get the prototype or initial map from the function. | 2769 // Get the prototype or initial map from the function. |
| 2763 __ ld(result, | 2770 __ ld(result, |
| 2764 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2771 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 2765 | 2772 |
| 2766 // Check that the function has a prototype or an initial map. | 2773 // Check that the function has a prototype or an initial map. |
| 2767 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2774 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
| 2768 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); | 2775 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at)); |
| 2769 | 2776 |
| 2770 // If the function does not have an initial map, we're done. | 2777 // If the function does not have an initial map, we're done. |
| 2771 Label done; | 2778 Label done; |
| 2772 __ GetObjectType(result, scratch, scratch); | 2779 __ GetObjectType(result, scratch, scratch); |
| 2773 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); | 2780 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
| 2774 | 2781 |
| 2775 // Get the prototype from the initial map. | 2782 // Get the prototype from the initial map. |
| 2776 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 2783 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 2777 | 2784 |
| 2778 // All done. | 2785 // All done. |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2888 break; | 2895 break; |
| 2889 case UINT16_ELEMENTS: | 2896 case UINT16_ELEMENTS: |
| 2890 __ lhu(result, mem_operand); | 2897 __ lhu(result, mem_operand); |
| 2891 break; | 2898 break; |
| 2892 case INT32_ELEMENTS: | 2899 case INT32_ELEMENTS: |
| 2893 __ lw(result, mem_operand); | 2900 __ lw(result, mem_operand); |
| 2894 break; | 2901 break; |
| 2895 case UINT32_ELEMENTS: | 2902 case UINT32_ELEMENTS: |
| 2896 __ lw(result, mem_operand); | 2903 __ lw(result, mem_operand); |
| 2897 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 2904 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 2898 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue, | 2905 DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue, |
| 2899 result, Operand(0x80000000)); | 2906 result, Operand(0x80000000)); |
| 2900 } | 2907 } |
| 2901 break; | 2908 break; |
| 2902 case FLOAT32_ELEMENTS: | 2909 case FLOAT32_ELEMENTS: |
| 2903 case FLOAT64_ELEMENTS: | 2910 case FLOAT64_ELEMENTS: |
| 2904 case FAST_DOUBLE_ELEMENTS: | 2911 case FAST_DOUBLE_ELEMENTS: |
| 2905 case FAST_ELEMENTS: | 2912 case FAST_ELEMENTS: |
| 2906 case FAST_SMI_ELEMENTS: | 2913 case FAST_SMI_ELEMENTS: |
| 2907 case FAST_HOLEY_DOUBLE_ELEMENTS: | 2914 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 2908 case FAST_HOLEY_ELEMENTS: | 2915 case FAST_HOLEY_ELEMENTS: |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2951 } else { | 2958 } else { |
| 2952 __ dsra(at, key, -shift_size); | 2959 __ dsra(at, key, -shift_size); |
| 2953 } | 2960 } |
| 2954 __ Daddu(scratch, scratch, at); | 2961 __ Daddu(scratch, scratch, at); |
| 2955 } | 2962 } |
| 2956 | 2963 |
| 2957 __ ldc1(result, MemOperand(scratch)); | 2964 __ ldc1(result, MemOperand(scratch)); |
| 2958 | 2965 |
| 2959 if (instr->hydrogen()->RequiresHoleCheck()) { | 2966 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2960 __ FmoveHigh(scratch, result); | 2967 __ FmoveHigh(scratch, result); |
| 2961 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, | 2968 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, |
| 2962 Operand(static_cast<int32_t>(kHoleNanUpper32))); | 2969 Operand(static_cast<int32_t>(kHoleNanUpper32))); |
| 2963 } | 2970 } |
| 2964 } | 2971 } |
| 2965 | 2972 |
| 2966 | 2973 |
| 2967 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 2974 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 2968 HLoadKeyed* hinstr = instr->hydrogen(); | 2975 HLoadKeyed* hinstr = instr->hydrogen(); |
| 2969 Register elements = ToRegister(instr->elements()); | 2976 Register elements = ToRegister(instr->elements()); |
| 2970 Register result = ToRegister(instr->result()); | 2977 Register result = ToRegister(instr->result()); |
| 2971 Register scratch = scratch0(); | 2978 Register scratch = scratch0(); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3005 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); | 3012 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| 3006 offset = SmiWordOffset(offset); | 3013 offset = SmiWordOffset(offset); |
| 3007 } | 3014 } |
| 3008 | 3015 |
| 3009 __ Load(result, MemOperand(store_base, offset), representation); | 3016 __ Load(result, MemOperand(store_base, offset), representation); |
| 3010 | 3017 |
| 3011 // Check for the hole value. | 3018 // Check for the hole value. |
| 3012 if (hinstr->RequiresHoleCheck()) { | 3019 if (hinstr->RequiresHoleCheck()) { |
| 3013 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3020 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3014 __ SmiTst(result, scratch); | 3021 __ SmiTst(result, scratch); |
| 3015 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, | 3022 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, |
| 3016 Operand(zero_reg)); | 3023 Operand(zero_reg)); |
| 3017 } else { | 3024 } else { |
| 3018 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3025 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3019 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch)); | 3026 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, |
| 3027 Operand(scratch)); |
| 3020 } | 3028 } |
| 3021 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | 3029 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { |
| 3022 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); | 3030 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); |
| 3023 Label done; | 3031 Label done; |
| 3024 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3032 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3025 __ Branch(&done, ne, result, Operand(scratch)); | 3033 __ Branch(&done, ne, result, Operand(scratch)); |
| 3026 if (info()->IsStub()) { | 3034 if (info()->IsStub()) { |
| 3027 // A stub can safely convert the hole to undefined only if the array | 3035 // A stub can safely convert the hole to undefined only if the array |
| 3028 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | 3036 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise |
| 3029 // it needs to bail out. | 3037 // it needs to bail out. |
| 3030 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); | 3038 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); |
| 3031 // The comparison only needs LS bits of value, which is a smi. | 3039 // The comparison only needs LS bits of value, which is a smi. |
| 3032 __ ld(result, FieldMemOperand(result, Cell::kValueOffset)); | 3040 __ ld(result, FieldMemOperand(result, Cell::kValueOffset)); |
| 3033 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result, | 3041 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result, |
| 3034 Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); | 3042 Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); |
| 3035 } | 3043 } |
| 3036 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 3044 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 3037 __ bind(&done); | 3045 __ bind(&done); |
| 3038 } | 3046 } |
| 3039 } | 3047 } |
| 3040 | 3048 |
| 3041 | 3049 |
| 3042 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3050 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3043 if (instr->is_fixed_typed_array()) { | 3051 if (instr->is_fixed_typed_array()) { |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3183 } | 3191 } |
| 3184 | 3192 |
| 3185 // Normal function. Replace undefined or null with global receiver. | 3193 // Normal function. Replace undefined or null with global receiver. |
| 3186 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3194 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3187 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3195 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3188 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3196 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3189 __ Branch(&global_object, eq, receiver, Operand(scratch)); | 3197 __ Branch(&global_object, eq, receiver, Operand(scratch)); |
| 3190 | 3198 |
| 3191 // Deoptimize if the receiver is not a JS object. | 3199 // Deoptimize if the receiver is not a JS object. |
| 3192 __ SmiTst(receiver, scratch); | 3200 __ SmiTst(receiver, scratch); |
| 3193 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg)); | 3201 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg)); |
| 3194 | 3202 |
| 3195 __ GetObjectType(receiver, scratch, scratch); | 3203 __ GetObjectType(receiver, scratch, scratch); |
| 3196 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch, | 3204 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch, |
| 3197 Operand(FIRST_JS_RECEIVER_TYPE)); | 3205 Operand(FIRST_JS_RECEIVER_TYPE)); |
| 3198 __ Branch(&result_in_receiver); | 3206 __ Branch(&result_in_receiver); |
| 3199 | 3207 |
| 3200 __ bind(&global_object); | 3208 __ bind(&global_object); |
| 3201 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3209 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3202 __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); | 3210 __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); |
| 3203 __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); | 3211 __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); |
| 3204 | 3212 |
| 3205 if (result.is(receiver)) { | 3213 if (result.is(receiver)) { |
| 3206 __ bind(&result_in_receiver); | 3214 __ bind(&result_in_receiver); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3220 Register length = ToRegister(instr->length()); | 3228 Register length = ToRegister(instr->length()); |
| 3221 Register elements = ToRegister(instr->elements()); | 3229 Register elements = ToRegister(instr->elements()); |
| 3222 Register scratch = scratch0(); | 3230 Register scratch = scratch0(); |
| 3223 DCHECK(receiver.is(a0)); // Used for parameter count. | 3231 DCHECK(receiver.is(a0)); // Used for parameter count. |
| 3224 DCHECK(function.is(a1)); // Required by InvokeFunction. | 3232 DCHECK(function.is(a1)); // Required by InvokeFunction. |
| 3225 DCHECK(ToRegister(instr->result()).is(v0)); | 3233 DCHECK(ToRegister(instr->result()).is(v0)); |
| 3226 | 3234 |
| 3227 // Copy the arguments to this function possibly from the | 3235 // Copy the arguments to this function possibly from the |
| 3228 // adaptor frame below it. | 3236 // adaptor frame below it. |
| 3229 const uint32_t kArgumentsLimit = 1 * KB; | 3237 const uint32_t kArgumentsLimit = 1 * KB; |
| 3230 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length, | 3238 DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length, |
| 3231 Operand(kArgumentsLimit)); | 3239 Operand(kArgumentsLimit)); |
| 3232 | 3240 |
| 3233 // Push the receiver and use the register to keep the original | 3241 // Push the receiver and use the register to keep the original |
| 3234 // number of arguments. | 3242 // number of arguments. |
| 3235 __ push(receiver); | 3243 __ push(receiver); |
| 3236 __ Move(receiver, length); | 3244 __ Move(receiver, length); |
| 3237 // The arguments are at a one pointer size offset from elements. | 3245 // The arguments are at a one pointer size offset from elements. |
| 3238 __ Daddu(elements, elements, Operand(1 * kPointerSize)); | 3246 __ Daddu(elements, elements, Operand(1 * kPointerSize)); |
| 3239 | 3247 |
| 3240 // Loop through the arguments pushing them onto the execution | 3248 // Loop through the arguments pushing them onto the execution |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3373 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3381 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3374 DCHECK(instr->context() != NULL); | 3382 DCHECK(instr->context() != NULL); |
| 3375 DCHECK(ToRegister(instr->context()).is(cp)); | 3383 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3376 Register input = ToRegister(instr->value()); | 3384 Register input = ToRegister(instr->value()); |
| 3377 Register result = ToRegister(instr->result()); | 3385 Register result = ToRegister(instr->result()); |
| 3378 Register scratch = scratch0(); | 3386 Register scratch = scratch0(); |
| 3379 | 3387 |
| 3380 // Deoptimize if not a heap number. | 3388 // Deoptimize if not a heap number. |
| 3381 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3389 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3382 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3390 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3383 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at)); | 3391 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, |
| 3392 Operand(at)); |
| 3384 | 3393 |
| 3385 Label done; | 3394 Label done; |
| 3386 Register exponent = scratch0(); | 3395 Register exponent = scratch0(); |
| 3387 scratch = no_reg; | 3396 scratch = no_reg; |
| 3388 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3397 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3389 // Check the sign of the argument. If the argument is positive, just | 3398 // Check the sign of the argument. If the argument is positive, just |
| 3390 // return it. | 3399 // return it. |
| 3391 __ Move(result, input); | 3400 __ Move(result, input); |
| 3392 __ And(at, exponent, Operand(HeapNumber::kSignMask)); | 3401 __ And(at, exponent, Operand(HeapNumber::kSignMask)); |
| 3393 __ Branch(&done, eq, at, Operand(zero_reg)); | 3402 __ Branch(&done, eq, at, Operand(zero_reg)); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3440 | 3449 |
| 3441 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3450 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3442 Register input = ToRegister(instr->value()); | 3451 Register input = ToRegister(instr->value()); |
| 3443 Register result = ToRegister(instr->result()); | 3452 Register result = ToRegister(instr->result()); |
| 3444 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 3453 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 3445 Label done; | 3454 Label done; |
| 3446 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 3455 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 3447 __ mov(result, input); | 3456 __ mov(result, input); |
| 3448 __ subu(result, zero_reg, input); | 3457 __ subu(result, zero_reg, input); |
| 3449 // Overflow if result is still negative, i.e. 0x80000000. | 3458 // Overflow if result is still negative, i.e. 0x80000000. |
| 3450 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | 3459 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result, |
| 3460 Operand(zero_reg)); |
| 3451 __ bind(&done); | 3461 __ bind(&done); |
| 3452 } | 3462 } |
| 3453 | 3463 |
| 3454 | 3464 |
| 3455 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { | 3465 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { |
| 3456 Register input = ToRegister(instr->value()); | 3466 Register input = ToRegister(instr->value()); |
| 3457 Register result = ToRegister(instr->result()); | 3467 Register result = ToRegister(instr->result()); |
| 3458 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); | 3468 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 3459 Label done; | 3469 Label done; |
| 3460 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); | 3470 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); |
| 3461 __ mov(result, input); | 3471 __ mov(result, input); |
| 3462 __ dsubu(result, zero_reg, input); | 3472 __ dsubu(result, zero_reg, input); |
| 3463 // Overflow if result is still negative, i.e. 0x80000000 00000000. | 3473 // Overflow if result is still negative, i.e. 0x80000000 00000000. |
| 3464 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); | 3474 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result, |
| 3475 Operand(zero_reg)); |
| 3465 __ bind(&done); | 3476 __ bind(&done); |
| 3466 } | 3477 } |
| 3467 | 3478 |
| 3468 | 3479 |
| 3469 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3480 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3470 // Class for deferred case. | 3481 // Class for deferred case. |
| 3471 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { | 3482 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { |
| 3472 public: | 3483 public: |
| 3473 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3484 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3474 : LDeferredCode(codegen), instr_(instr) { } | 3485 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3511 Register except_flag = ToRegister(instr->temp()); | 3522 Register except_flag = ToRegister(instr->temp()); |
| 3512 | 3523 |
| 3513 __ EmitFPUTruncate(kRoundToMinusInf, | 3524 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3514 result, | 3525 result, |
| 3515 input, | 3526 input, |
| 3516 scratch1, | 3527 scratch1, |
| 3517 double_scratch0(), | 3528 double_scratch0(), |
| 3518 except_flag); | 3529 except_flag); |
| 3519 | 3530 |
| 3520 // Deopt if the operation did not succeed. | 3531 // Deopt if the operation did not succeed. |
| 3521 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3532 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, |
| 3522 Operand(zero_reg)); | 3533 Operand(zero_reg)); |
| 3523 | 3534 |
| 3524 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3535 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3525 // Test for -0. | 3536 // Test for -0. |
| 3526 Label done; | 3537 Label done; |
| 3527 __ Branch(&done, ne, result, Operand(zero_reg)); | 3538 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3528 __ mfhc1(scratch1, input); // Get exponent/sign bits. | 3539 __ mfhc1(scratch1, input); // Get exponent/sign bits. |
| 3529 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 3540 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 3530 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 3541 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, |
| 3531 Operand(zero_reg)); | 3542 Operand(zero_reg)); |
| 3532 __ bind(&done); | 3543 __ bind(&done); |
| 3533 } | 3544 } |
| 3534 } | 3545 } |
| 3535 | 3546 |
| 3536 | 3547 |
| 3537 void LCodeGen::DoMathRound(LMathRound* instr) { | 3548 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3538 DoubleRegister input = ToDoubleRegister(instr->value()); | 3549 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3539 Register result = ToRegister(instr->result()); | 3550 Register result = ToRegister(instr->result()); |
| 3540 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3551 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3554 __ mov(result, zero_reg); | 3565 __ mov(result, zero_reg); |
| 3555 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3566 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3556 __ Branch(&check_sign_on_zero); | 3567 __ Branch(&check_sign_on_zero); |
| 3557 } else { | 3568 } else { |
| 3558 __ Branch(&done); | 3569 __ Branch(&done); |
| 3559 } | 3570 } |
| 3560 __ bind(&skip1); | 3571 __ bind(&skip1); |
| 3561 | 3572 |
| 3562 // The following conversion will not work with numbers | 3573 // The following conversion will not work with numbers |
| 3563 // outside of ]-2^32, 2^32[. | 3574 // outside of ]-2^32, 2^32[. |
| 3564 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, | 3575 DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch, |
| 3565 Operand(HeapNumber::kExponentBias + 32)); | 3576 Operand(HeapNumber::kExponentBias + 32)); |
| 3566 | 3577 |
| 3567 // Save the original sign for later comparison. | 3578 // Save the original sign for later comparison. |
| 3568 __ And(scratch, result, Operand(HeapNumber::kSignMask)); | 3579 __ And(scratch, result, Operand(HeapNumber::kSignMask)); |
| 3569 | 3580 |
| 3570 __ Move(double_scratch0(), 0.5); | 3581 __ Move(double_scratch0(), 0.5); |
| 3571 __ add_d(double_scratch0(), input, double_scratch0()); | 3582 __ add_d(double_scratch0(), input, double_scratch0()); |
| 3572 | 3583 |
| 3573 // Check sign of the result: if the sign changed, the input | 3584 // Check sign of the result: if the sign changed, the input |
| 3574 // value was in ]0.5, 0[ and the result should be -0. | 3585 // value was in ]0.5, 0[ and the result should be -0. |
| 3575 __ mfhc1(result, double_scratch0()); | 3586 __ mfhc1(result, double_scratch0()); |
| 3576 // mfhc1 sign-extends, clear the upper bits. | 3587 // mfhc1 sign-extends, clear the upper bits. |
| 3577 __ dsll32(result, result, 0); | 3588 __ dsll32(result, result, 0); |
| 3578 __ dsrl32(result, result, 0); | 3589 __ dsrl32(result, result, 0); |
| 3579 __ Xor(result, result, Operand(scratch)); | 3590 __ Xor(result, result, Operand(scratch)); |
| 3580 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3591 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3581 // ARM uses 'mi' here, which is 'lt' | 3592 // ARM uses 'mi' here, which is 'lt' |
| 3582 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); | 3593 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result, |
| 3594 Operand(zero_reg)); |
| 3583 } else { | 3595 } else { |
| 3584 Label skip2; | 3596 Label skip2; |
| 3585 // ARM uses 'mi' here, which is 'lt' | 3597 // ARM uses 'mi' here, which is 'lt' |
| 3586 // Negating it results in 'ge' | 3598 // Negating it results in 'ge' |
| 3587 __ Branch(&skip2, ge, result, Operand(zero_reg)); | 3599 __ Branch(&skip2, ge, result, Operand(zero_reg)); |
| 3588 __ mov(result, zero_reg); | 3600 __ mov(result, zero_reg); |
| 3589 __ Branch(&done); | 3601 __ Branch(&done); |
| 3590 __ bind(&skip2); | 3602 __ bind(&skip2); |
| 3591 } | 3603 } |
| 3592 | 3604 |
| 3593 Register except_flag = scratch; | 3605 Register except_flag = scratch; |
| 3594 __ EmitFPUTruncate(kRoundToMinusInf, | 3606 __ EmitFPUTruncate(kRoundToMinusInf, |
| 3595 result, | 3607 result, |
| 3596 double_scratch0(), | 3608 double_scratch0(), |
| 3597 at, | 3609 at, |
| 3598 double_scratch1, | 3610 double_scratch1, |
| 3599 except_flag); | 3611 except_flag); |
| 3600 | 3612 |
| 3601 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 3613 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, |
| 3602 Operand(zero_reg)); | 3614 Operand(zero_reg)); |
| 3603 | 3615 |
| 3604 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3616 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3605 // Test for -0. | 3617 // Test for -0. |
| 3606 __ Branch(&done, ne, result, Operand(zero_reg)); | 3618 __ Branch(&done, ne, result, Operand(zero_reg)); |
| 3607 __ bind(&check_sign_on_zero); | 3619 __ bind(&check_sign_on_zero); |
| 3608 __ mfhc1(scratch, input); // Get exponent/sign bits. | 3620 __ mfhc1(scratch, input); // Get exponent/sign bits. |
| 3609 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 3621 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
| 3610 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch, | 3622 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch, |
| 3611 Operand(zero_reg)); | 3623 Operand(zero_reg)); |
| 3612 } | 3624 } |
| 3613 __ bind(&done); | 3625 __ bind(&done); |
| 3614 } | 3626 } |
| 3615 | 3627 |
| 3616 | 3628 |
| 3617 void LCodeGen::DoMathFround(LMathFround* instr) { | 3629 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3618 DoubleRegister input = ToDoubleRegister(instr->value()); | 3630 DoubleRegister input = ToDoubleRegister(instr->value()); |
| 3619 DoubleRegister result = ToDoubleRegister(instr->result()); | 3631 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3620 __ cvt_s_d(result, input); | 3632 __ cvt_s_d(result, input); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3667 | 3679 |
| 3668 if (exponent_type.IsSmi()) { | 3680 if (exponent_type.IsSmi()) { |
| 3669 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3681 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3670 __ CallStub(&stub); | 3682 __ CallStub(&stub); |
| 3671 } else if (exponent_type.IsTagged()) { | 3683 } else if (exponent_type.IsTagged()) { |
| 3672 Label no_deopt; | 3684 Label no_deopt; |
| 3673 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3685 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3674 DCHECK(!a7.is(tagged_exponent)); | 3686 DCHECK(!a7.is(tagged_exponent)); |
| 3675 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3687 __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3676 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 3688 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 3677 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at)); | 3689 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at)); |
| 3678 __ bind(&no_deopt); | 3690 __ bind(&no_deopt); |
| 3679 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3691 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3680 __ CallStub(&stub); | 3692 __ CallStub(&stub); |
| 3681 } else if (exponent_type.IsInteger32()) { | 3693 } else if (exponent_type.IsInteger32()) { |
| 3682 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3694 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3683 __ CallStub(&stub); | 3695 __ CallStub(&stub); |
| 3684 } else { | 3696 } else { |
| 3685 DCHECK(exponent_type.IsDouble()); | 3697 DCHECK(exponent_type.IsDouble()); |
| 3686 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3698 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3687 __ CallStub(&stub); | 3699 __ CallStub(&stub); |
| (...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4027 } else { | 4039 } else { |
| 4028 reg = ToRegister(instr->index()); | 4040 reg = ToRegister(instr->index()); |
| 4029 operand = ToOperand(instr->length()); | 4041 operand = ToOperand(instr->length()); |
| 4030 } | 4042 } |
| 4031 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4043 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4032 Label done; | 4044 Label done; |
| 4033 __ Branch(&done, NegateCondition(cc), reg, operand); | 4045 __ Branch(&done, NegateCondition(cc), reg, operand); |
| 4034 __ stop("eliminated bounds check failed"); | 4046 __ stop("eliminated bounds check failed"); |
| 4035 __ bind(&done); | 4047 __ bind(&done); |
| 4036 } else { | 4048 } else { |
| 4037 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand); | 4049 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand); |
| 4038 } | 4050 } |
| 4039 } | 4051 } |
| 4040 | 4052 |
| 4041 | 4053 |
| 4042 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4054 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4043 Register external_pointer = ToRegister(instr->elements()); | 4055 Register external_pointer = ToRegister(instr->elements()); |
| 4044 Register key = no_reg; | 4056 Register key = no_reg; |
| 4045 ElementsKind elements_kind = instr->elements_kind(); | 4057 ElementsKind elements_kind = instr->elements_kind(); |
| 4046 bool key_is_constant = instr->key()->IsConstantOperand(); | 4058 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4047 int constant_key = 0; | 4059 int constant_key = 0; |
| (...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4355 instr->hydrogen()->kind()); | 4367 instr->hydrogen()->kind()); |
| 4356 __ mov(a0, result); | 4368 __ mov(a0, result); |
| 4357 __ CallStub(&stub); | 4369 __ CallStub(&stub); |
| 4358 RecordSafepointWithLazyDeopt( | 4370 RecordSafepointWithLazyDeopt( |
| 4359 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | 4371 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
| 4360 __ StoreToSafepointRegisterSlot(result, result); | 4372 __ StoreToSafepointRegisterSlot(result, result); |
| 4361 } | 4373 } |
| 4362 | 4374 |
| 4363 // Deopt on smi, which means the elements array changed to dictionary mode. | 4375 // Deopt on smi, which means the elements array changed to dictionary mode. |
| 4364 __ SmiTst(result, at); | 4376 __ SmiTst(result, at); |
| 4365 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 4377 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); |
| 4366 } | 4378 } |
| 4367 | 4379 |
| 4368 | 4380 |
| 4369 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | 4381 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| 4370 Register object_reg = ToRegister(instr->object()); | 4382 Register object_reg = ToRegister(instr->object()); |
| 4371 Register scratch = scratch0(); | 4383 Register scratch = scratch0(); |
| 4372 | 4384 |
| 4373 Handle<Map> from_map = instr->original_map(); | 4385 Handle<Map> from_map = instr->original_map(); |
| 4374 Handle<Map> to_map = instr->transitioned_map(); | 4386 Handle<Map> to_map = instr->transitioned_map(); |
| 4375 ElementsKind from_kind = instr->from_kind(); | 4387 ElementsKind from_kind = instr->from_kind(); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 4401 } | 4413 } |
| 4402 __ bind(¬_applicable); | 4414 __ bind(¬_applicable); |
| 4403 } | 4415 } |
| 4404 | 4416 |
| 4405 | 4417 |
| 4406 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4418 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4407 Register object = ToRegister(instr->object()); | 4419 Register object = ToRegister(instr->object()); |
| 4408 Register temp = ToRegister(instr->temp()); | 4420 Register temp = ToRegister(instr->temp()); |
| 4409 Label no_memento_found; | 4421 Label no_memento_found; |
| 4410 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4422 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4411 DeoptimizeIf(al, instr, Deoptimizer::kMementoFound); | 4423 DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound); |
| 4412 __ bind(&no_memento_found); | 4424 __ bind(&no_memento_found); |
| 4413 } | 4425 } |
| 4414 | 4426 |
| 4415 | 4427 |
| 4416 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4428 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4417 DCHECK(ToRegister(instr->context()).is(cp)); | 4429 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4418 DCHECK(ToRegister(instr->left()).is(a1)); | 4430 DCHECK(ToRegister(instr->left()).is(a1)); |
| 4419 DCHECK(ToRegister(instr->right()).is(a0)); | 4431 DCHECK(ToRegister(instr->right()).is(a0)); |
| 4420 StringAddStub stub(isolate(), | 4432 StringAddStub stub(isolate(), |
| 4421 instr->hydrogen()->flags(), | 4433 instr->hydrogen()->flags(), |
| (...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4695 } | 4707 } |
| 4696 | 4708 |
| 4697 | 4709 |
| 4698 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4710 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4699 HChange* hchange = instr->hydrogen(); | 4711 HChange* hchange = instr->hydrogen(); |
| 4700 Register input = ToRegister(instr->value()); | 4712 Register input = ToRegister(instr->value()); |
| 4701 Register output = ToRegister(instr->result()); | 4713 Register output = ToRegister(instr->result()); |
| 4702 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4714 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4703 hchange->value()->CheckFlag(HValue::kUint32)) { | 4715 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4704 __ And(at, input, Operand(0x80000000)); | 4716 __ And(at, input, Operand(0x80000000)); |
| 4705 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4717 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); |
| 4706 } | 4718 } |
| 4707 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4719 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4708 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4720 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4709 __ SmiTagCheckOverflow(output, input, at); | 4721 __ SmiTagCheckOverflow(output, input, at); |
| 4710 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); | 4722 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg)); |
| 4711 } else { | 4723 } else { |
| 4712 __ SmiTag(output, input); | 4724 __ SmiTag(output, input); |
| 4713 } | 4725 } |
| 4714 } | 4726 } |
| 4715 | 4727 |
| 4716 | 4728 |
| 4717 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4729 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4718 Register scratch = scratch0(); | 4730 Register scratch = scratch0(); |
| 4719 Register input = ToRegister(instr->value()); | 4731 Register input = ToRegister(instr->value()); |
| 4720 Register result = ToRegister(instr->result()); | 4732 Register result = ToRegister(instr->result()); |
| 4721 if (instr->needs_check()) { | 4733 if (instr->needs_check()) { |
| 4722 STATIC_ASSERT(kHeapObjectTag == 1); | 4734 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4723 // If the input is a HeapObject, value of scratch won't be zero. | 4735 // If the input is a HeapObject, value of scratch won't be zero. |
| 4724 __ And(scratch, input, Operand(kHeapObjectTag)); | 4736 __ And(scratch, input, Operand(kHeapObjectTag)); |
| 4725 __ SmiUntag(result, input); | 4737 __ SmiUntag(result, input); |
| 4726 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg)); | 4738 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch, |
| 4739 Operand(zero_reg)); |
| 4727 } else { | 4740 } else { |
| 4728 __ SmiUntag(result, input); | 4741 __ SmiUntag(result, input); |
| 4729 } | 4742 } |
| 4730 } | 4743 } |
| 4731 | 4744 |
| 4732 | 4745 |
| 4733 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4746 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4734 DoubleRegister result_reg, | 4747 DoubleRegister result_reg, |
| 4735 NumberUntagDMode mode) { | 4748 NumberUntagDMode mode) { |
| 4736 bool can_convert_undefined_to_nan = | 4749 bool can_convert_undefined_to_nan = |
| 4737 instr->hydrogen()->can_convert_undefined_to_nan(); | 4750 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4738 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4751 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4739 | 4752 |
| 4740 Register scratch = scratch0(); | 4753 Register scratch = scratch0(); |
| 4741 Label convert, load_smi, done; | 4754 Label convert, load_smi, done; |
| 4742 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4755 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4743 // Smi check. | 4756 // Smi check. |
| 4744 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4757 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4745 // Heap number map check. | 4758 // Heap number map check. |
| 4746 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4759 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4747 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4760 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4748 if (can_convert_undefined_to_nan) { | 4761 if (can_convert_undefined_to_nan) { |
| 4749 __ Branch(&convert, ne, scratch, Operand(at)); | 4762 __ Branch(&convert, ne, scratch, Operand(at)); |
| 4750 } else { | 4763 } else { |
| 4751 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, | 4764 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch, |
| 4752 Operand(at)); | 4765 Operand(at)); |
| 4753 } | 4766 } |
| 4754 // Load heap number. | 4767 // Load heap number. |
| 4755 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4768 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4756 if (deoptimize_on_minus_zero) { | 4769 if (deoptimize_on_minus_zero) { |
| 4757 __ mfc1(at, result_reg); | 4770 __ mfc1(at, result_reg); |
| 4758 __ Branch(&done, ne, at, Operand(zero_reg)); | 4771 __ Branch(&done, ne, at, Operand(zero_reg)); |
| 4759 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. | 4772 __ mfhc1(scratch, result_reg); // Get exponent/sign bits. |
| 4760 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch, | 4773 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch, |
| 4761 Operand(HeapNumber::kSignMask)); | 4774 Operand(HeapNumber::kSignMask)); |
| 4762 } | 4775 } |
| 4763 __ Branch(&done); | 4776 __ Branch(&done); |
| 4764 if (can_convert_undefined_to_nan) { | 4777 if (can_convert_undefined_to_nan) { |
| 4765 __ bind(&convert); | 4778 __ bind(&convert); |
| 4766 // Convert undefined (and hole) to NaN. | 4779 // Convert undefined (and hole) to NaN. |
| 4767 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 4780 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 4768 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 4781 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, |
| 4769 Operand(at)); | 4782 input_reg, Operand(at)); |
| 4770 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4783 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4771 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | 4784 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
| 4772 __ Branch(&done); | 4785 __ Branch(&done); |
| 4773 } | 4786 } |
| 4774 } else { | 4787 } else { |
| 4775 __ SmiUntag(scratch, input_reg); | 4788 __ SmiUntag(scratch, input_reg); |
| 4776 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4789 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4777 } | 4790 } |
| 4778 // Smi to double register conversion | 4791 // Smi to double register conversion |
| 4779 __ bind(&load_smi); | 4792 __ bind(&load_smi); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4823 __ mov(input_reg, zero_reg); // In delay slot. | 4836 __ mov(input_reg, zero_reg); // In delay slot. |
| 4824 | 4837 |
| 4825 __ bind(&check_bools); | 4838 __ bind(&check_bools); |
| 4826 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 4839 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
| 4827 __ Branch(&check_false, ne, scratch2, Operand(at)); | 4840 __ Branch(&check_false, ne, scratch2, Operand(at)); |
| 4828 __ Branch(USE_DELAY_SLOT, &done); | 4841 __ Branch(USE_DELAY_SLOT, &done); |
| 4829 __ li(input_reg, Operand(1)); // In delay slot. | 4842 __ li(input_reg, Operand(1)); // In delay slot. |
| 4830 | 4843 |
| 4831 __ bind(&check_false); | 4844 __ bind(&check_false); |
| 4832 __ LoadRoot(at, Heap::kFalseValueRootIndex); | 4845 __ LoadRoot(at, Heap::kFalseValueRootIndex); |
| 4833 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean, | 4846 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean, |
| 4834 scratch2, Operand(at)); | 4847 scratch2, Operand(at)); |
| 4835 __ Branch(USE_DELAY_SLOT, &done); | 4848 __ Branch(USE_DELAY_SLOT, &done); |
| 4836 __ mov(input_reg, zero_reg); // In delay slot. | 4849 __ mov(input_reg, zero_reg); // In delay slot. |
| 4837 } else { | 4850 } else { |
| 4838 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1, | 4851 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1, |
| 4839 Operand(at)); | 4852 Operand(at)); |
| 4840 | 4853 |
| 4841 // Load the double value. | 4854 // Load the double value. |
| 4842 __ ldc1(double_scratch, | 4855 __ ldc1(double_scratch, |
| 4843 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 4856 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 4844 | 4857 |
| 4845 Register except_flag = scratch2; | 4858 Register except_flag = scratch2; |
| 4846 __ EmitFPUTruncate(kRoundToZero, | 4859 __ EmitFPUTruncate(kRoundToZero, |
| 4847 input_reg, | 4860 input_reg, |
| 4848 double_scratch, | 4861 double_scratch, |
| 4849 scratch1, | 4862 scratch1, |
| 4850 double_scratch2, | 4863 double_scratch2, |
| 4851 except_flag, | 4864 except_flag, |
| 4852 kCheckForInexactConversion); | 4865 kCheckForInexactConversion); |
| 4853 | 4866 |
| 4854 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 4867 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, |
| 4855 Operand(zero_reg)); | 4868 Operand(zero_reg)); |
| 4856 | 4869 |
| 4857 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4870 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4858 __ Branch(&done, ne, input_reg, Operand(zero_reg)); | 4871 __ Branch(&done, ne, input_reg, Operand(zero_reg)); |
| 4859 | 4872 |
| 4860 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. | 4873 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits. |
| 4861 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 4874 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 4862 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 4875 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, |
| 4863 Operand(zero_reg)); | 4876 Operand(zero_reg)); |
| 4864 } | 4877 } |
| 4865 } | 4878 } |
| 4866 __ bind(&done); | 4879 __ bind(&done); |
| 4867 } | 4880 } |
| 4868 | 4881 |
| 4869 | 4882 |
| 4870 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4883 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4871 class DeferredTaggedToI final : public LDeferredCode { | 4884 class DeferredTaggedToI final : public LDeferredCode { |
| 4872 public: | 4885 public: |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4929 | 4942 |
| 4930 __ EmitFPUTruncate(kRoundToMinusInf, | 4943 __ EmitFPUTruncate(kRoundToMinusInf, |
| 4931 result_reg, | 4944 result_reg, |
| 4932 double_input, | 4945 double_input, |
| 4933 scratch1, | 4946 scratch1, |
| 4934 double_scratch0(), | 4947 double_scratch0(), |
| 4935 except_flag, | 4948 except_flag, |
| 4936 kCheckForInexactConversion); | 4949 kCheckForInexactConversion); |
| 4937 | 4950 |
| 4938 // Deopt if the operation did not succeed (except_flag != 0). | 4951 // Deopt if the operation did not succeed (except_flag != 0). |
| 4939 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 4952 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, |
| 4940 Operand(zero_reg)); | 4953 Operand(zero_reg)); |
| 4941 | 4954 |
| 4942 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4955 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4943 Label done; | 4956 Label done; |
| 4944 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 4957 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 4945 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 4958 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 4946 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 4959 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 4947 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 4960 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, |
| 4948 Operand(zero_reg)); | 4961 Operand(zero_reg)); |
| 4949 __ bind(&done); | 4962 __ bind(&done); |
| 4950 } | 4963 } |
| 4951 } | 4964 } |
| 4952 } | 4965 } |
| 4953 | 4966 |
| 4954 | 4967 |
| 4955 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 4968 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 4956 Register result_reg = ToRegister(instr->result()); | 4969 Register result_reg = ToRegister(instr->result()); |
| 4957 Register scratch1 = LCodeGen::scratch0(); | 4970 Register scratch1 = LCodeGen::scratch0(); |
| 4958 DoubleRegister double_input = ToDoubleRegister(instr->value()); | 4971 DoubleRegister double_input = ToDoubleRegister(instr->value()); |
| 4959 | 4972 |
| 4960 if (instr->truncating()) { | 4973 if (instr->truncating()) { |
| 4961 __ TruncateDoubleToI(result_reg, double_input); | 4974 __ TruncateDoubleToI(result_reg, double_input); |
| 4962 } else { | 4975 } else { |
| 4963 Register except_flag = LCodeGen::scratch1(); | 4976 Register except_flag = LCodeGen::scratch1(); |
| 4964 | 4977 |
| 4965 __ EmitFPUTruncate(kRoundToMinusInf, | 4978 __ EmitFPUTruncate(kRoundToMinusInf, |
| 4966 result_reg, | 4979 result_reg, |
| 4967 double_input, | 4980 double_input, |
| 4968 scratch1, | 4981 scratch1, |
| 4969 double_scratch0(), | 4982 double_scratch0(), |
| 4970 except_flag, | 4983 except_flag, |
| 4971 kCheckForInexactConversion); | 4984 kCheckForInexactConversion); |
| 4972 | 4985 |
| 4973 // Deopt if the operation did not succeed (except_flag != 0). | 4986 // Deopt if the operation did not succeed (except_flag != 0). |
| 4974 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, | 4987 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag, |
| 4975 Operand(zero_reg)); | 4988 Operand(zero_reg)); |
| 4976 | 4989 |
| 4977 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4990 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4978 Label done; | 4991 Label done; |
| 4979 __ Branch(&done, ne, result_reg, Operand(zero_reg)); | 4992 __ Branch(&done, ne, result_reg, Operand(zero_reg)); |
| 4980 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. | 4993 __ mfhc1(scratch1, double_input); // Get exponent/sign bits. |
| 4981 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 4994 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
| 4982 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, | 4995 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1, |
| 4983 Operand(zero_reg)); | 4996 Operand(zero_reg)); |
| 4984 __ bind(&done); | 4997 __ bind(&done); |
| 4985 } | 4998 } |
| 4986 } | 4999 } |
| 4987 __ SmiTag(result_reg, result_reg); | 5000 __ SmiTag(result_reg, result_reg); |
| 4988 } | 5001 } |
| 4989 | 5002 |
| 4990 | 5003 |
| 4991 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5004 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 4992 LOperand* input = instr->value(); | 5005 LOperand* input = instr->value(); |
| 4993 __ SmiTst(ToRegister(input), at); | 5006 __ SmiTst(ToRegister(input), at); |
| 4994 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg)); | 5007 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg)); |
| 4995 } | 5008 } |
| 4996 | 5009 |
| 4997 | 5010 |
| 4998 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5011 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 4999 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5012 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5000 LOperand* input = instr->value(); | 5013 LOperand* input = instr->value(); |
| 5001 __ SmiTst(ToRegister(input), at); | 5014 __ SmiTst(ToRegister(input), at); |
| 5002 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); | 5015 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg)); |
| 5003 } | 5016 } |
| 5004 } | 5017 } |
| 5005 | 5018 |
| 5006 | 5019 |
| 5007 void LCodeGen::DoCheckArrayBufferNotNeutered( | 5020 void LCodeGen::DoCheckArrayBufferNotNeutered( |
| 5008 LCheckArrayBufferNotNeutered* instr) { | 5021 LCheckArrayBufferNotNeutered* instr) { |
| 5009 Register view = ToRegister(instr->view()); | 5022 Register view = ToRegister(instr->view()); |
| 5010 Register scratch = scratch0(); | 5023 Register scratch = scratch0(); |
| 5011 | 5024 |
| 5012 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); | 5025 __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); |
| 5013 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); | 5026 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); |
| 5014 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); | 5027 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); |
| 5015 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg)); | 5028 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at, |
| 5029 Operand(zero_reg)); |
| 5016 } | 5030 } |
| 5017 | 5031 |
| 5018 | 5032 |
| 5019 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5033 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5020 Register input = ToRegister(instr->value()); | 5034 Register input = ToRegister(instr->value()); |
| 5021 Register scratch = scratch0(); | 5035 Register scratch = scratch0(); |
| 5022 | 5036 |
| 5023 __ GetObjectType(input, scratch, scratch); | 5037 __ GetObjectType(input, scratch, scratch); |
| 5024 | 5038 |
| 5025 if (instr->hydrogen()->is_interval_check()) { | 5039 if (instr->hydrogen()->is_interval_check()) { |
| 5026 InstanceType first; | 5040 InstanceType first; |
| 5027 InstanceType last; | 5041 InstanceType last; |
| 5028 instr->hydrogen()->GetCheckInterval(&first, &last); | 5042 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5029 | 5043 |
| 5030 // If there is only one type in the interval check for equality. | 5044 // If there is only one type in the interval check for equality. |
| 5031 if (first == last) { | 5045 if (first == last) { |
| 5032 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5046 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, |
| 5033 Operand(first)); | 5047 Operand(first)); |
| 5034 } else { | 5048 } else { |
| 5035 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch, | 5049 DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch, |
| 5036 Operand(first)); | 5050 Operand(first)); |
| 5037 // Omit check for the last type. | 5051 // Omit check for the last type. |
| 5038 if (last != LAST_TYPE) { | 5052 if (last != LAST_TYPE) { |
| 5039 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch, | 5053 DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch, |
| 5040 Operand(last)); | 5054 Operand(last)); |
| 5041 } | 5055 } |
| 5042 } | 5056 } |
| 5043 } else { | 5057 } else { |
| 5044 uint8_t mask; | 5058 uint8_t mask; |
| 5045 uint8_t tag; | 5059 uint8_t tag; |
| 5046 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5060 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5047 | 5061 |
| 5048 if (base::bits::IsPowerOfTwo32(mask)) { | 5062 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5049 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5063 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5050 __ And(at, scratch, mask); | 5064 __ And(at, scratch, mask); |
| 5051 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, | 5065 DeoptimizeIf(tag == 0 ? ne : eq, instr, |
| 5052 at, Operand(zero_reg)); | 5066 DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg)); |
| 5053 } else { | 5067 } else { |
| 5054 __ And(scratch, scratch, Operand(mask)); | 5068 __ And(scratch, scratch, Operand(mask)); |
| 5055 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, | 5069 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch, |
| 5056 Operand(tag)); | 5070 Operand(tag)); |
| 5057 } | 5071 } |
| 5058 } | 5072 } |
| 5059 } | 5073 } |
| 5060 | 5074 |
| 5061 | 5075 |
| 5062 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5076 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5063 Register reg = ToRegister(instr->value()); | 5077 Register reg = ToRegister(instr->value()); |
| 5064 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5078 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5065 AllowDeferredHandleDereference smi_check; | 5079 AllowDeferredHandleDereference smi_check; |
| 5066 if (isolate()->heap()->InNewSpace(*object)) { | 5080 if (isolate()->heap()->InNewSpace(*object)) { |
| 5067 Register reg = ToRegister(instr->value()); | 5081 Register reg = ToRegister(instr->value()); |
| 5068 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5082 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5069 __ li(at, Operand(cell)); | 5083 __ li(at, Operand(cell)); |
| 5070 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); | 5084 __ ld(at, FieldMemOperand(at, Cell::kValueOffset)); |
| 5071 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at)); | 5085 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at)); |
| 5072 } else { | 5086 } else { |
| 5073 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object)); | 5087 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, |
| 5088 Operand(object)); |
| 5074 } | 5089 } |
| 5075 } | 5090 } |
| 5076 | 5091 |
| 5077 | 5092 |
| 5078 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5093 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5079 { | 5094 { |
| 5080 PushSafepointRegistersScope scope(this); | 5095 PushSafepointRegistersScope scope(this); |
| 5081 __ push(object); | 5096 __ push(object); |
| 5082 __ mov(cp, zero_reg); | 5097 __ mov(cp, zero_reg); |
| 5083 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5098 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5084 RecordSafepointWithRegisters( | 5099 RecordSafepointWithRegisters( |
| 5085 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5100 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5086 __ StoreToSafepointRegisterSlot(v0, scratch0()); | 5101 __ StoreToSafepointRegisterSlot(v0, scratch0()); |
| 5087 } | 5102 } |
| 5088 __ SmiTst(scratch0(), at); | 5103 __ SmiTst(scratch0(), at); |
| 5089 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at, | 5104 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at, |
| 5090 Operand(zero_reg)); | 5105 Operand(zero_reg)); |
| 5091 } | 5106 } |
| 5092 | 5107 |
| 5093 | 5108 |
| 5094 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5109 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5095 class DeferredCheckMaps final : public LDeferredCode { | 5110 class DeferredCheckMaps final : public LDeferredCode { |
| 5096 public: | 5111 public: |
| 5097 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5112 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5098 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5113 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5099 SetExit(check_maps()); | 5114 SetExit(check_maps()); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5134 Label success; | 5149 Label success; |
| 5135 for (int i = 0; i < maps->size() - 1; i++) { | 5150 for (int i = 0; i < maps->size() - 1; i++) { |
| 5136 Handle<Map> map = maps->at(i).handle(); | 5151 Handle<Map> map = maps->at(i).handle(); |
| 5137 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); | 5152 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); |
| 5138 } | 5153 } |
| 5139 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5154 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5140 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). | 5155 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). |
| 5141 if (instr->hydrogen()->HasMigrationTarget()) { | 5156 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5142 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); | 5157 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); |
| 5143 } else { | 5158 } else { |
| 5144 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map)); | 5159 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map)); |
| 5145 } | 5160 } |
| 5146 | 5161 |
| 5147 __ bind(&success); | 5162 __ bind(&success); |
| 5148 } | 5163 } |
| 5149 | 5164 |
| 5150 | 5165 |
| 5151 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5166 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5152 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5167 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5153 Register result_reg = ToRegister(instr->result()); | 5168 Register result_reg = ToRegister(instr->result()); |
| 5154 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 5169 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5172 | 5187 |
| 5173 // Both smi and heap number cases are handled. | 5188 // Both smi and heap number cases are handled. |
| 5174 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | 5189 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); |
| 5175 | 5190 |
| 5176 // Check for heap number | 5191 // Check for heap number |
| 5177 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5192 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5178 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); | 5193 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); |
| 5179 | 5194 |
| 5180 // Check for undefined. Undefined is converted to zero for clamping | 5195 // Check for undefined. Undefined is converted to zero for clamping |
| 5181 // conversions. | 5196 // conversions. |
| 5182 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, | 5197 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg, |
| 5183 Operand(factory()->undefined_value())); | 5198 Operand(factory()->undefined_value())); |
| 5184 __ mov(result_reg, zero_reg); | 5199 __ mov(result_reg, zero_reg); |
| 5185 __ jmp(&done); | 5200 __ jmp(&done); |
| 5186 | 5201 |
| 5187 // Heap number | 5202 // Heap number |
| 5188 __ bind(&heap_number); | 5203 __ bind(&heap_number); |
| 5189 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, | 5204 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, |
| 5190 HeapNumber::kValueOffset)); | 5205 HeapNumber::kValueOffset)); |
| 5191 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); | 5206 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
| 5192 __ jmp(&done); | 5207 __ jmp(&done); |
| (...skipping 448 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5641 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); | 5656 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); |
| 5642 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); | 5657 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5643 __ jmp(&done); | 5658 __ jmp(&done); |
| 5644 | 5659 |
| 5645 __ bind(&load_cache); | 5660 __ bind(&load_cache); |
| 5646 __ LoadInstanceDescriptors(map, result); | 5661 __ LoadInstanceDescriptors(map, result); |
| 5647 __ ld(result, | 5662 __ ld(result, |
| 5648 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5663 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5649 __ ld(result, | 5664 __ ld(result, |
| 5650 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5665 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5651 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg)); | 5666 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result, |
| 5667 Operand(zero_reg)); |
| 5652 | 5668 |
| 5653 __ bind(&done); | 5669 __ bind(&done); |
| 5654 } | 5670 } |
| 5655 | 5671 |
| 5656 | 5672 |
| 5657 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5673 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5658 Register object = ToRegister(instr->value()); | 5674 Register object = ToRegister(instr->value()); |
| 5659 Register map = ToRegister(instr->map()); | 5675 Register map = ToRegister(instr->map()); |
| 5660 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5676 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5661 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0())); | 5677 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map, |
| 5678 Operand(scratch0())); |
| 5662 } | 5679 } |
| 5663 | 5680 |
| 5664 | 5681 |
| 5665 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5682 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5666 Register result, | 5683 Register result, |
| 5667 Register object, | 5684 Register object, |
| 5668 Register index) { | 5685 Register index) { |
| 5669 PushSafepointRegistersScope scope(this); | 5686 PushSafepointRegistersScope scope(this); |
| 5670 __ Push(object, index); | 5687 __ Push(object, index); |
| 5671 __ mov(cp, zero_reg); | 5688 __ mov(cp, zero_reg); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5731 __ ld(result, FieldMemOperand(scratch, | 5748 __ ld(result, FieldMemOperand(scratch, |
| 5732 FixedArray::kHeaderSize - kPointerSize)); | 5749 FixedArray::kHeaderSize - kPointerSize)); |
| 5733 __ bind(deferred->exit()); | 5750 __ bind(deferred->exit()); |
| 5734 __ bind(&done); | 5751 __ bind(&done); |
| 5735 } | 5752 } |
| 5736 | 5753 |
| 5737 #undef __ | 5754 #undef __ |
| 5738 | 5755 |
| 5739 } // namespace internal | 5756 } // namespace internal |
| 5740 } // namespace v8 | 5757 } // namespace v8 |
| OLD | NEW |