Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Side by Side Diff: src/crankshaft/mips/lithium-codegen-mips.cc

Issue 2161543002: [turbofan] Add support for eager/soft deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Do the ports properly Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved.7 1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 WriteTranslation(environment, &translation); 737 WriteTranslation(environment, &translation);
738 int deoptimization_index = deoptimizations_.length(); 738 int deoptimization_index = deoptimizations_.length();
739 int pc_offset = masm()->pc_offset(); 739 int pc_offset = masm()->pc_offset();
740 environment->Register(deoptimization_index, 740 environment->Register(deoptimization_index,
741 translation.index(), 741 translation.index(),
742 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 742 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
743 deoptimizations_.Add(environment, zone()); 743 deoptimizations_.Add(environment, zone());
744 } 744 }
745 } 745 }
746 746
747
748 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 747 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
749 Deoptimizer::DeoptReason deopt_reason, 748 DeoptimizeReason deopt_reason,
750 Deoptimizer::BailoutType bailout_type, 749 Deoptimizer::BailoutType bailout_type,
751 Register src1, const Operand& src2) { 750 Register src1, const Operand& src2) {
752 LEnvironment* environment = instr->environment(); 751 LEnvironment* environment = instr->environment();
753 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 752 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
754 DCHECK(environment->HasBeenRegistered()); 753 DCHECK(environment->HasBeenRegistered());
755 int id = environment->deoptimization_index(); 754 int id = environment->deoptimization_index();
756 Address entry = 755 Address entry =
757 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 756 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
758 if (entry == NULL) { 757 if (entry == NULL) {
759 Abort(kBailoutWasNotPrepared); 758 Abort(kBailoutWasNotPrepared);
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
804 // jump entry if this is the case. 803 // jump entry if this is the case.
805 if (FLAG_trace_deopt || isolate()->is_profiling() || 804 if (FLAG_trace_deopt || isolate()->is_profiling() ||
806 jump_table_.is_empty() || 805 jump_table_.is_empty() ||
807 !table_entry.IsEquivalentTo(jump_table_.last())) { 806 !table_entry.IsEquivalentTo(jump_table_.last())) {
808 jump_table_.Add(table_entry, zone()); 807 jump_table_.Add(table_entry, zone());
809 } 808 }
810 __ Branch(&jump_table_.last().label, condition, src1, src2); 809 __ Branch(&jump_table_.last().label, condition, src1, src2);
811 } 810 }
812 } 811 }
813 812
814
815 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 813 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
816 Deoptimizer::DeoptReason deopt_reason, 814 DeoptimizeReason deopt_reason, Register src1,
817 Register src1, const Operand& src2) { 815 const Operand& src2) {
818 Deoptimizer::BailoutType bailout_type = info()->IsStub() 816 Deoptimizer::BailoutType bailout_type = info()->IsStub()
819 ? Deoptimizer::LAZY 817 ? Deoptimizer::LAZY
820 : Deoptimizer::EAGER; 818 : Deoptimizer::EAGER;
821 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2); 819 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
822 } 820 }
823 821
824 822
825 void LCodeGen::RecordSafepointWithLazyDeopt( 823 void LCodeGen::RecordSafepointWithLazyDeopt(
826 LInstruction* instr, SafepointMode safepoint_mode) { 824 LInstruction* instr, SafepointMode safepoint_mode) {
827 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 825 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
939 HMod* hmod = instr->hydrogen(); 937 HMod* hmod = instr->hydrogen();
940 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 938 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
941 Label dividend_is_not_negative, done; 939 Label dividend_is_not_negative, done;
942 940
943 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 941 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
944 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg)); 942 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
945 // Note: The code below even works when right contains kMinInt. 943 // Note: The code below even works when right contains kMinInt.
946 __ subu(dividend, zero_reg, dividend); 944 __ subu(dividend, zero_reg, dividend);
947 __ And(dividend, dividend, Operand(mask)); 945 __ And(dividend, dividend, Operand(mask));
948 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 946 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
949 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, 947 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
950 Operand(zero_reg)); 948 Operand(zero_reg));
951 } 949 }
952 __ Branch(USE_DELAY_SLOT, &done); 950 __ Branch(USE_DELAY_SLOT, &done);
953 __ subu(dividend, zero_reg, dividend); 951 __ subu(dividend, zero_reg, dividend);
954 } 952 }
955 953
956 __ bind(&dividend_is_not_negative); 954 __ bind(&dividend_is_not_negative);
957 __ And(dividend, dividend, Operand(mask)); 955 __ And(dividend, dividend, Operand(mask));
958 __ bind(&done); 956 __ bind(&done);
959 } 957 }
(...skipping 12 matching lines...) Expand all
972 970
973 __ TruncatingDiv(result, dividend, Abs(divisor)); 971 __ TruncatingDiv(result, dividend, Abs(divisor));
974 __ Mul(result, result, Operand(Abs(divisor))); 972 __ Mul(result, result, Operand(Abs(divisor)));
975 __ Subu(result, dividend, Operand(result)); 973 __ Subu(result, dividend, Operand(result));
976 974
977 // Check for negative zero. 975 // Check for negative zero.
978 HMod* hmod = instr->hydrogen(); 976 HMod* hmod = instr->hydrogen();
979 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 977 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
980 Label remainder_not_zero; 978 Label remainder_not_zero;
981 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); 979 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
982 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend, 980 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
983 Operand(zero_reg)); 981 Operand(zero_reg));
984 __ bind(&remainder_not_zero); 982 __ bind(&remainder_not_zero);
985 } 983 }
986 } 984 }
987 985
988 986
989 void LCodeGen::DoModI(LModI* instr) { 987 void LCodeGen::DoModI(LModI* instr) {
990 HMod* hmod = instr->hydrogen(); 988 HMod* hmod = instr->hydrogen();
991 const Register left_reg = ToRegister(instr->left()); 989 const Register left_reg = ToRegister(instr->left());
992 const Register right_reg = ToRegister(instr->right()); 990 const Register right_reg = ToRegister(instr->right());
993 const Register result_reg = ToRegister(instr->result()); 991 const Register result_reg = ToRegister(instr->result());
994 992
995 // div runs in the background while we check for special cases. 993 // div runs in the background while we check for special cases.
996 __ Mod(result_reg, left_reg, right_reg); 994 __ Mod(result_reg, left_reg, right_reg);
997 995
998 Label done; 996 Label done;
999 // Check for x % 0, we have to deopt in this case because we can't return a 997 // Check for x % 0, we have to deopt in this case because we can't return a
1000 // NaN. 998 // NaN.
1001 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 999 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1002 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg, 1000 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
1003 Operand(zero_reg)); 1001 Operand(zero_reg));
1004 } 1002 }
1005 1003
1006 // Check for kMinInt % -1, div will return kMinInt, which is not what we 1004 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1007 // want. We have to deopt if we care about -0, because we can't return that. 1005 // want. We have to deopt if we care about -0, because we can't return that.
1008 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1006 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1009 Label no_overflow_possible; 1007 Label no_overflow_possible;
1010 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); 1008 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1011 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1009 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1012 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1)); 1010 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
1011 Operand(-1));
1013 } else { 1012 } else {
1014 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); 1013 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1015 __ Branch(USE_DELAY_SLOT, &done); 1014 __ Branch(USE_DELAY_SLOT, &done);
1016 __ mov(result_reg, zero_reg); 1015 __ mov(result_reg, zero_reg);
1017 } 1016 }
1018 __ bind(&no_overflow_possible); 1017 __ bind(&no_overflow_possible);
1019 } 1018 }
1020 1019
1021 // If we care about -0, test if the dividend is <0 and the result is 0. 1020 // If we care about -0, test if the dividend is <0 and the result is 0.
1022 __ Branch(&done, ge, left_reg, Operand(zero_reg)); 1021 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1023 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1022 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1024 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg, 1023 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
1025 Operand(zero_reg)); 1024 Operand(zero_reg));
1026 } 1025 }
1027 __ bind(&done); 1026 __ bind(&done);
1028 } 1027 }
1029 1028
1030 1029
1031 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1030 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1032 Register dividend = ToRegister(instr->dividend()); 1031 Register dividend = ToRegister(instr->dividend());
1033 int32_t divisor = instr->divisor(); 1032 int32_t divisor = instr->divisor();
1034 Register result = ToRegister(instr->result()); 1033 Register result = ToRegister(instr->result());
1035 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1034 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1036 DCHECK(!result.is(dividend)); 1035 DCHECK(!result.is(dividend));
1037 1036
1038 // Check for (0 / -x) that will produce negative zero. 1037 // Check for (0 / -x) that will produce negative zero.
1039 HDiv* hdiv = instr->hydrogen(); 1038 HDiv* hdiv = instr->hydrogen();
1040 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1039 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1041 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, 1040 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1042 Operand(zero_reg)); 1041 Operand(zero_reg));
1043 } 1042 }
1044 // Check for (kMinInt / -1). 1043 // Check for (kMinInt / -1).
1045 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1044 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1046 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt)); 1045 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
1046 Operand(kMinInt));
1047 } 1047 }
1048 // Deoptimize if remainder will not be 0. 1048 // Deoptimize if remainder will not be 0.
1049 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1049 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1050 divisor != 1 && divisor != -1) { 1050 divisor != 1 && divisor != -1) {
1051 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1051 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1052 __ And(at, dividend, Operand(mask)); 1052 __ And(at, dividend, Operand(mask));
1053 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg)); 1053 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
1054 Operand(zero_reg));
1054 } 1055 }
1055 1056
1056 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1057 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1057 __ Subu(result, zero_reg, dividend); 1058 __ Subu(result, zero_reg, dividend);
1058 return; 1059 return;
1059 } 1060 }
1060 uint16_t shift = WhichPowerOf2Abs(divisor); 1061 uint16_t shift = WhichPowerOf2Abs(divisor);
1061 if (shift == 0) { 1062 if (shift == 0) {
1062 __ Move(result, dividend); 1063 __ Move(result, dividend);
1063 } else if (shift == 1) { 1064 } else if (shift == 1) {
(...skipping 16 matching lines...) Expand all
1080 DCHECK(!dividend.is(result)); 1081 DCHECK(!dividend.is(result));
1081 1082
1082 if (divisor == 0) { 1083 if (divisor == 0) {
1083 DeoptimizeIf(al, instr); 1084 DeoptimizeIf(al, instr);
1084 return; 1085 return;
1085 } 1086 }
1086 1087
1087 // Check for (0 / -x) that will produce negative zero. 1088 // Check for (0 / -x) that will produce negative zero.
1088 HDiv* hdiv = instr->hydrogen(); 1089 HDiv* hdiv = instr->hydrogen();
1089 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1090 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1090 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, 1091 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1091 Operand(zero_reg)); 1092 Operand(zero_reg));
1092 } 1093 }
1093 1094
1094 __ TruncatingDiv(result, dividend, Abs(divisor)); 1095 __ TruncatingDiv(result, dividend, Abs(divisor));
1095 if (divisor < 0) __ Subu(result, zero_reg, result); 1096 if (divisor < 0) __ Subu(result, zero_reg, result);
1096 1097
1097 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1098 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1098 __ Mul(scratch0(), result, Operand(divisor)); 1099 __ Mul(scratch0(), result, Operand(divisor));
1099 __ Subu(scratch0(), scratch0(), dividend); 1100 __ Subu(scratch0(), scratch0(), dividend);
1100 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(), 1101 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
1101 Operand(zero_reg)); 1102 Operand(zero_reg));
1102 } 1103 }
1103 } 1104 }
1104 1105
1105 1106
1106 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1107 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1107 void LCodeGen::DoDivI(LDivI* instr) { 1108 void LCodeGen::DoDivI(LDivI* instr) {
1108 HBinaryOperation* hdiv = instr->hydrogen(); 1109 HBinaryOperation* hdiv = instr->hydrogen();
1109 Register dividend = ToRegister(instr->dividend()); 1110 Register dividend = ToRegister(instr->dividend());
1110 Register divisor = ToRegister(instr->divisor()); 1111 Register divisor = ToRegister(instr->divisor());
1111 const Register result = ToRegister(instr->result()); 1112 const Register result = ToRegister(instr->result());
1112 Register remainder = ToRegister(instr->temp()); 1113 Register remainder = ToRegister(instr->temp());
1113 1114
1114 // On MIPS div is asynchronous - it will run in the background while we 1115 // On MIPS div is asynchronous - it will run in the background while we
1115 // check for special cases. 1116 // check for special cases.
1116 __ Div(remainder, result, dividend, divisor); 1117 __ Div(remainder, result, dividend, divisor);
1117 1118
1118 // Check for x / 0. 1119 // Check for x / 0.
1119 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1120 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1120 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, 1121 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1121 Operand(zero_reg)); 1122 Operand(zero_reg));
1122 } 1123 }
1123 1124
1124 // Check for (0 / -x) that will produce negative zero. 1125 // Check for (0 / -x) that will produce negative zero.
1125 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1126 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1126 Label left_not_zero; 1127 Label left_not_zero;
1127 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1128 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1128 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, 1129 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1129 Operand(zero_reg)); 1130 Operand(zero_reg));
1130 __ bind(&left_not_zero); 1131 __ bind(&left_not_zero);
1131 } 1132 }
1132 1133
1133 // Check for (kMinInt / -1). 1134 // Check for (kMinInt / -1).
1134 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1135 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1135 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1136 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1136 Label left_not_min_int; 1137 Label left_not_min_int;
1137 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); 1138 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1138 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); 1139 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1139 __ bind(&left_not_min_int); 1140 __ bind(&left_not_min_int);
1140 } 1141 }
1141 1142
1142 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1143 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1143 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder, 1144 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
1144 Operand(zero_reg)); 1145 Operand(zero_reg));
1145 } 1146 }
1146 } 1147 }
1147 1148
1148 1149
1149 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1150 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1150 DoubleRegister addend = ToDoubleRegister(instr->addend()); 1151 DoubleRegister addend = ToDoubleRegister(instr->addend());
1151 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1152 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1152 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1153 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1153 1154
(...skipping 26 matching lines...) Expand all
1180 } 1181 }
1181 1182
1182 // If the divisor is negative, we have to negate and handle edge cases. 1183 // If the divisor is negative, we have to negate and handle edge cases.
1183 1184
1184 // dividend can be the same register as result so save the value of it 1185 // dividend can be the same register as result so save the value of it
1185 // for checking overflow. 1186 // for checking overflow.
1186 __ Move(scratch, dividend); 1187 __ Move(scratch, dividend);
1187 1188
1188 __ Subu(result, zero_reg, dividend); 1189 __ Subu(result, zero_reg, dividend);
1189 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1190 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1190 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); 1191 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1192 Operand(zero_reg));
1191 } 1193 }
1192 1194
1193 // Dividing by -1 is basically negation, unless we overflow. 1195 // Dividing by -1 is basically negation, unless we overflow.
1194 __ Xor(scratch, scratch, result); 1196 __ Xor(scratch, scratch, result);
1195 if (divisor == -1) { 1197 if (divisor == -1) {
1196 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1198 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1197 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, 1199 DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
1198 Operand(zero_reg)); 1200 Operand(zero_reg));
1199 } 1201 }
1200 return; 1202 return;
1201 } 1203 }
1202 1204
1203 // If the negation could not overflow, simply shifting is OK. 1205 // If the negation could not overflow, simply shifting is OK.
1204 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1206 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1205 __ sra(result, result, shift); 1207 __ sra(result, result, shift);
1206 return; 1208 return;
1207 } 1209 }
(...skipping 15 matching lines...) Expand all
1223 DCHECK(!dividend.is(result)); 1225 DCHECK(!dividend.is(result));
1224 1226
1225 if (divisor == 0) { 1227 if (divisor == 0) {
1226 DeoptimizeIf(al, instr); 1228 DeoptimizeIf(al, instr);
1227 return; 1229 return;
1228 } 1230 }
1229 1231
1230 // Check for (0 / -x) that will produce negative zero. 1232 // Check for (0 / -x) that will produce negative zero.
1231 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1233 HMathFloorOfDiv* hdiv = instr->hydrogen();
1232 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1234 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1233 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend, 1235 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
1234 Operand(zero_reg)); 1236 Operand(zero_reg));
1235 } 1237 }
1236 1238
1237 // Easy case: We need no dynamic check for the dividend and the flooring 1239 // Easy case: We need no dynamic check for the dividend and the flooring
1238 // division is the same as the truncating division. 1240 // division is the same as the truncating division.
1239 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1241 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1240 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1242 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1241 __ TruncatingDiv(result, dividend, Abs(divisor)); 1243 __ TruncatingDiv(result, dividend, Abs(divisor));
1242 if (divisor < 0) __ Subu(result, zero_reg, result); 1244 if (divisor < 0) __ Subu(result, zero_reg, result);
1243 return; 1245 return;
(...skipping 24 matching lines...) Expand all
1268 Register dividend = ToRegister(instr->dividend()); 1270 Register dividend = ToRegister(instr->dividend());
1269 Register divisor = ToRegister(instr->divisor()); 1271 Register divisor = ToRegister(instr->divisor());
1270 const Register result = ToRegister(instr->result()); 1272 const Register result = ToRegister(instr->result());
1271 Register remainder = scratch0(); 1273 Register remainder = scratch0();
1272 // On MIPS div is asynchronous - it will run in the background while we 1274 // On MIPS div is asynchronous - it will run in the background while we
1273 // check for special cases. 1275 // check for special cases.
1274 __ Div(remainder, result, dividend, divisor); 1276 __ Div(remainder, result, dividend, divisor);
1275 1277
1276 // Check for x / 0. 1278 // Check for x / 0.
1277 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1279 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1278 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor, 1280 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
1279 Operand(zero_reg)); 1281 Operand(zero_reg));
1280 } 1282 }
1281 1283
1282 // Check for (0 / -x) that will produce negative zero. 1284 // Check for (0 / -x) that will produce negative zero.
1283 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1285 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1284 Label left_not_zero; 1286 Label left_not_zero;
1285 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1287 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1286 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor, 1288 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
1287 Operand(zero_reg)); 1289 Operand(zero_reg));
1288 __ bind(&left_not_zero); 1290 __ bind(&left_not_zero);
1289 } 1291 }
1290 1292
1291 // Check for (kMinInt / -1). 1293 // Check for (kMinInt / -1).
1292 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1294 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1293 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1295 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1294 Label left_not_min_int; 1296 Label left_not_min_int;
1295 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); 1297 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1296 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1)); 1298 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
1297 __ bind(&left_not_min_int); 1299 __ bind(&left_not_min_int);
1298 } 1300 }
1299 1301
1300 // We performed a truncating division. Correct the result if necessary. 1302 // We performed a truncating division. Correct the result if necessary.
1301 Label done; 1303 Label done;
1302 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); 1304 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1303 __ Xor(remainder, remainder, Operand(divisor)); 1305 __ Xor(remainder, remainder, Operand(divisor));
1304 __ Branch(&done, ge, remainder, Operand(zero_reg)); 1306 __ Branch(&done, ge, remainder, Operand(zero_reg));
1305 __ Subu(result, result, Operand(1)); 1307 __ Subu(result, result, Operand(1));
1306 __ bind(&done); 1308 __ bind(&done);
(...skipping 10 matching lines...) Expand all
1317 bool bailout_on_minus_zero = 1319 bool bailout_on_minus_zero =
1318 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1320 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1319 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1321 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1320 1322
1321 if (right_op->IsConstantOperand()) { 1323 if (right_op->IsConstantOperand()) {
1322 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1324 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1323 1325
1324 if (bailout_on_minus_zero && (constant < 0)) { 1326 if (bailout_on_minus_zero && (constant < 0)) {
1325 // The case of a null constant will be handled separately. 1327 // The case of a null constant will be handled separately.
1326 // If constant is negative and left is null, the result should be -0. 1328 // If constant is negative and left is null, the result should be -0.
1327 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg)); 1329 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
1330 Operand(zero_reg));
1328 } 1331 }
1329 1332
1330 switch (constant) { 1333 switch (constant) {
1331 case -1: 1334 case -1:
1332 if (overflow) { 1335 if (overflow) {
1333 Label no_overflow; 1336 Label no_overflow;
1334 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow); 1337 __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
1335 DeoptimizeIf(al, instr); 1338 DeoptimizeIf(al, instr);
1336 __ bind(&no_overflow); 1339 __ bind(&no_overflow);
1337 } else { 1340 } else {
1338 __ Subu(result, zero_reg, left); 1341 __ Subu(result, zero_reg, left);
1339 } 1342 }
1340 break; 1343 break;
1341 case 0: 1344 case 0:
1342 if (bailout_on_minus_zero) { 1345 if (bailout_on_minus_zero) {
1343 // If left is strictly negative and the constant is null, the 1346 // If left is strictly negative and the constant is null, the
1344 // result is -0. Deoptimize if required, otherwise return 0. 1347 // result is -0. Deoptimize if required, otherwise return 0.
1345 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left, 1348 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
1346 Operand(zero_reg)); 1349 Operand(zero_reg));
1347 } 1350 }
1348 __ mov(result, zero_reg); 1351 __ mov(result, zero_reg);
1349 break; 1352 break;
1350 case 1: 1353 case 1:
1351 // Nothing to do. 1354 // Nothing to do.
1352 __ Move(result, left); 1355 __ Move(result, left);
1353 break; 1356 break;
1354 default: 1357 default:
1355 // Multiplying by powers of two and powers of two plus or minus 1358 // Multiplying by powers of two and powers of two plus or minus
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1387 1390
1388 if (overflow) { 1391 if (overflow) {
1389 // hi:lo = left * right. 1392 // hi:lo = left * right.
1390 if (instr->hydrogen()->representation().IsSmi()) { 1393 if (instr->hydrogen()->representation().IsSmi()) {
1391 __ SmiUntag(result, left); 1394 __ SmiUntag(result, left);
1392 __ Mul(scratch, result, result, right); 1395 __ Mul(scratch, result, result, right);
1393 } else { 1396 } else {
1394 __ Mul(scratch, result, left, right); 1397 __ Mul(scratch, result, left, right);
1395 } 1398 }
1396 __ sra(at, result, 31); 1399 __ sra(at, result, 31);
1397 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at)); 1400 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
1401 Operand(at));
1398 } else { 1402 } else {
1399 if (instr->hydrogen()->representation().IsSmi()) { 1403 if (instr->hydrogen()->representation().IsSmi()) {
1400 __ SmiUntag(result, left); 1404 __ SmiUntag(result, left);
1401 __ Mul(result, result, right); 1405 __ Mul(result, result, right);
1402 } else { 1406 } else {
1403 __ Mul(result, left, right); 1407 __ Mul(result, left, right);
1404 } 1408 }
1405 } 1409 }
1406 1410
1407 if (bailout_on_minus_zero) { 1411 if (bailout_on_minus_zero) {
1408 Label done; 1412 Label done;
1409 __ Xor(at, left, right); 1413 __ Xor(at, left, right);
1410 __ Branch(&done, ge, at, Operand(zero_reg)); 1414 __ Branch(&done, ge, at, Operand(zero_reg));
1411 // Bail out if the result is minus zero. 1415 // Bail out if the result is minus zero.
1412 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, 1416 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
1413 Operand(zero_reg)); 1417 Operand(zero_reg));
1414 __ bind(&done); 1418 __ bind(&done);
1415 } 1419 }
1416 } 1420 }
1417 } 1421 }
1418 1422
1419 1423
1420 void LCodeGen::DoBitI(LBitI* instr) { 1424 void LCodeGen::DoBitI(LBitI* instr) {
1421 LOperand* left_op = instr->left(); 1425 LOperand* left_op = instr->left();
1422 LOperand* right_op = instr->right(); 1426 LOperand* right_op = instr->right();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1467 switch (instr->op()) { 1471 switch (instr->op()) {
1468 case Token::ROR: 1472 case Token::ROR:
1469 __ Ror(result, left, Operand(ToRegister(right_op))); 1473 __ Ror(result, left, Operand(ToRegister(right_op)));
1470 break; 1474 break;
1471 case Token::SAR: 1475 case Token::SAR:
1472 __ srav(result, left, ToRegister(right_op)); 1476 __ srav(result, left, ToRegister(right_op));
1473 break; 1477 break;
1474 case Token::SHR: 1478 case Token::SHR:
1475 __ srlv(result, left, ToRegister(right_op)); 1479 __ srlv(result, left, ToRegister(right_op));
1476 if (instr->can_deopt()) { 1480 if (instr->can_deopt()) {
1477 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result, 1481 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
1478 Operand(zero_reg)); 1482 Operand(zero_reg));
1479 } 1483 }
1480 break; 1484 break;
1481 case Token::SHL: 1485 case Token::SHL:
1482 __ sllv(result, left, ToRegister(right_op)); 1486 __ sllv(result, left, ToRegister(right_op));
1483 break; 1487 break;
1484 default: 1488 default:
1485 UNREACHABLE(); 1489 UNREACHABLE();
1486 break; 1490 break;
1487 } 1491 }
(...skipping 15 matching lines...) Expand all
1503 } else { 1507 } else {
1504 __ Move(result, left); 1508 __ Move(result, left);
1505 } 1509 }
1506 break; 1510 break;
1507 case Token::SHR: 1511 case Token::SHR:
1508 if (shift_count != 0) { 1512 if (shift_count != 0) {
1509 __ srl(result, left, shift_count); 1513 __ srl(result, left, shift_count);
1510 } else { 1514 } else {
1511 if (instr->can_deopt()) { 1515 if (instr->can_deopt()) {
1512 __ And(at, left, Operand(0x80000000)); 1516 __ And(at, left, Operand(0x80000000));
1513 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at, 1517 DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
1514 Operand(zero_reg)); 1518 Operand(zero_reg));
1515 } 1519 }
1516 __ Move(result, left); 1520 __ Move(result, left);
1517 } 1521 }
1518 break; 1522 break;
1519 case Token::SHL: 1523 case Token::SHL:
1520 if (shift_count != 0) { 1524 if (shift_count != 0) {
1521 if (instr->hydrogen_value()->representation().IsSmi() && 1525 if (instr->hydrogen_value()->representation().IsSmi() &&
1522 instr->can_deopt()) { 1526 instr->can_deopt()) {
1523 if (shift_count != 1) { 1527 if (shift_count != 1) {
1524 __ sll(result, left, shift_count - 1); 1528 __ sll(result, left, shift_count - 1);
1525 __ SmiTagCheckOverflow(result, result, scratch); 1529 __ SmiTagCheckOverflow(result, result, scratch);
1526 } else { 1530 } else {
1527 __ SmiTagCheckOverflow(result, left, scratch); 1531 __ SmiTagCheckOverflow(result, left, scratch);
1528 } 1532 }
1529 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch, 1533 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch,
1530 Operand(zero_reg)); 1534 Operand(zero_reg));
1531 } else { 1535 } else {
1532 __ sll(result, left, shift_count); 1536 __ sll(result, left, shift_count);
1533 } 1537 }
1534 } else { 1538 } else {
1535 __ Move(result, left); 1539 __ Move(result, left);
1536 } 1540 }
1537 break; 1541 break;
1538 default: 1542 default:
1539 UNREACHABLE(); 1543 UNREACHABLE();
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after
1952 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); 1956 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
1953 } 1957 }
1954 1958
1955 if (expected.Contains(ToBooleanICStub::SMI)) { 1959 if (expected.Contains(ToBooleanICStub::SMI)) {
1956 // Smis: 0 -> false, all other -> true. 1960 // Smis: 0 -> false, all other -> true.
1957 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); 1961 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
1958 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 1962 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
1959 } else if (expected.NeedsMap()) { 1963 } else if (expected.NeedsMap()) {
1960 // If we need a map later and have a Smi -> deopt. 1964 // If we need a map later and have a Smi -> deopt.
1961 __ SmiTst(reg, at); 1965 __ SmiTst(reg, at);
1962 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); 1966 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
1963 } 1967 }
1964 1968
1965 const Register map = scratch0(); 1969 const Register map = scratch0();
1966 if (expected.NeedsMap()) { 1970 if (expected.NeedsMap()) {
1967 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 1971 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1968 if (expected.CanBeUndetectable()) { 1972 if (expected.CanBeUndetectable()) {
1969 // Undetectable -> false. 1973 // Undetectable -> false.
1970 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); 1974 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1971 __ And(at, at, Operand(1 << Map::kIsUndetectable)); 1975 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1972 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); 1976 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
2016 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2020 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2017 ne, dbl_scratch, kDoubleRegZero); 2021 ne, dbl_scratch, kDoubleRegZero);
2018 // Falls through if dbl_scratch == 0. 2022 // Falls through if dbl_scratch == 0.
2019 __ Branch(instr->FalseLabel(chunk_)); 2023 __ Branch(instr->FalseLabel(chunk_));
2020 __ bind(&not_heap_number); 2024 __ bind(&not_heap_number);
2021 } 2025 }
2022 2026
2023 if (!expected.IsGeneric()) { 2027 if (!expected.IsGeneric()) {
2024 // We've seen something for the first time -> deopt. 2028 // We've seen something for the first time -> deopt.
2025 // This can only happen if we are not generic already. 2029 // This can only happen if we are not generic already.
2026 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg, 2030 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
2027 Operand(zero_reg)); 2031 Operand(zero_reg));
2028 } 2032 }
2029 } 2033 }
2030 } 2034 }
2031 } 2035 }
2032 2036
2033 2037
2034 void LCodeGen::EmitGoto(int block) { 2038 void LCodeGen::EmitGoto(int block) {
2035 if (!IsNextEmittedBlock(block)) { 2039 if (!IsNextEmittedBlock(block)) {
2036 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2040 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
2395 // Loop through the {object}s prototype chain looking for the {prototype}. 2399 // Loop through the {object}s prototype chain looking for the {prototype}.
2396 __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2400 __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2397 Label loop; 2401 Label loop;
2398 __ bind(&loop); 2402 __ bind(&loop);
2399 2403
2400 // Deoptimize if the object needs to be access checked. 2404 // Deoptimize if the object needs to be access checked.
2401 __ lbu(object_instance_type, 2405 __ lbu(object_instance_type,
2402 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2406 FieldMemOperand(object_map, Map::kBitFieldOffset));
2403 __ And(object_instance_type, object_instance_type, 2407 __ And(object_instance_type, object_instance_type,
2404 Operand(1 << Map::kIsAccessCheckNeeded)); 2408 Operand(1 << Map::kIsAccessCheckNeeded));
2405 DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type, 2409 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
2406 Operand(zero_reg)); 2410 Operand(zero_reg));
2407 // Deoptimize for proxies. 2411 // Deoptimize for proxies.
2408 __ lbu(object_instance_type, 2412 __ lbu(object_instance_type,
2409 FieldMemOperand(object_map, Map::kInstanceTypeOffset)); 2413 FieldMemOperand(object_map, Map::kInstanceTypeOffset));
2410 DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type, 2414 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
2411 Operand(JS_PROXY_TYPE)); 2415 Operand(JS_PROXY_TYPE));
2412 2416
2413 __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset)); 2417 __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
2414 __ LoadRoot(at, Heap::kNullValueRootIndex); 2418 __ LoadRoot(at, Heap::kNullValueRootIndex);
2415 EmitFalseBranch(instr, eq, object_prototype, Operand(at)); 2419 EmitFalseBranch(instr, eq, object_prototype, Operand(at));
2416 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype)); 2420 EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
2417 __ Branch(USE_DELAY_SLOT, &loop); 2421 __ Branch(USE_DELAY_SLOT, &loop);
2418 __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2422 __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2419 } 2423 }
2420 2424
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
2521 2525
2522 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2526 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2523 Register context = ToRegister(instr->context()); 2527 Register context = ToRegister(instr->context());
2524 Register result = ToRegister(instr->result()); 2528 Register result = ToRegister(instr->result());
2525 2529
2526 __ lw(result, ContextMemOperand(context, instr->slot_index())); 2530 __ lw(result, ContextMemOperand(context, instr->slot_index()));
2527 if (instr->hydrogen()->RequiresHoleCheck()) { 2531 if (instr->hydrogen()->RequiresHoleCheck()) {
2528 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2532 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2529 2533
2530 if (instr->hydrogen()->DeoptimizesOnHole()) { 2534 if (instr->hydrogen()->DeoptimizesOnHole()) {
2531 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); 2535 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2532 } else { 2536 } else {
2533 Label is_not_hole; 2537 Label is_not_hole;
2534 __ Branch(&is_not_hole, ne, result, Operand(at)); 2538 __ Branch(&is_not_hole, ne, result, Operand(at));
2535 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2539 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2536 __ bind(&is_not_hole); 2540 __ bind(&is_not_hole);
2537 } 2541 }
2538 } 2542 }
2539 } 2543 }
2540 2544
2541 2545
2542 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2546 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2543 Register context = ToRegister(instr->context()); 2547 Register context = ToRegister(instr->context());
2544 Register value = ToRegister(instr->value()); 2548 Register value = ToRegister(instr->value());
2545 Register scratch = scratch0(); 2549 Register scratch = scratch0();
2546 MemOperand target = ContextMemOperand(context, instr->slot_index()); 2550 MemOperand target = ContextMemOperand(context, instr->slot_index());
2547 2551
2548 Label skip_assignment; 2552 Label skip_assignment;
2549 2553
2550 if (instr->hydrogen()->RequiresHoleCheck()) { 2554 if (instr->hydrogen()->RequiresHoleCheck()) {
2551 __ lw(scratch, target); 2555 __ lw(scratch, target);
2552 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2556 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2553 2557
2554 if (instr->hydrogen()->DeoptimizesOnHole()) { 2558 if (instr->hydrogen()->DeoptimizesOnHole()) {
2555 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at)); 2559 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
2556 } else { 2560 } else {
2557 __ Branch(&skip_assignment, ne, scratch, Operand(at)); 2561 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2558 } 2562 }
2559 } 2563 }
2560 2564
2561 __ sw(value, target); 2565 __ sw(value, target);
2562 if (instr->hydrogen()->NeedsWriteBarrier()) { 2566 if (instr->hydrogen()->NeedsWriteBarrier()) {
2563 SmiCheck check_needed = 2567 SmiCheck check_needed =
2564 instr->hydrogen()->value()->type().IsHeapObject() 2568 instr->hydrogen()->value()->type().IsHeapObject()
2565 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2569 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
2622 Register scratch = scratch0(); 2626 Register scratch = scratch0();
2623 Register function = ToRegister(instr->function()); 2627 Register function = ToRegister(instr->function());
2624 Register result = ToRegister(instr->result()); 2628 Register result = ToRegister(instr->result());
2625 2629
2626 // Get the prototype or initial map from the function. 2630 // Get the prototype or initial map from the function.
2627 __ lw(result, 2631 __ lw(result,
2628 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2632 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2629 2633
2630 // Check that the function has a prototype or an initial map. 2634 // Check that the function has a prototype or an initial map.
2631 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2635 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2632 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at)); 2636 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
2633 2637
2634 // If the function does not have an initial map, we're done. 2638 // If the function does not have an initial map, we're done.
2635 Label done; 2639 Label done;
2636 __ GetObjectType(result, scratch, scratch); 2640 __ GetObjectType(result, scratch, scratch);
2637 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); 2641 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2638 2642
2639 // Get the prototype from the initial map. 2643 // Get the prototype from the initial map.
2640 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2644 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2641 2645
2642 // All done. 2646 // All done.
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2742 break; 2746 break;
2743 case UINT16_ELEMENTS: 2747 case UINT16_ELEMENTS:
2744 __ lhu(result, mem_operand); 2748 __ lhu(result, mem_operand);
2745 break; 2749 break;
2746 case INT32_ELEMENTS: 2750 case INT32_ELEMENTS:
2747 __ lw(result, mem_operand); 2751 __ lw(result, mem_operand);
2748 break; 2752 break;
2749 case UINT32_ELEMENTS: 2753 case UINT32_ELEMENTS:
2750 __ lw(result, mem_operand); 2754 __ lw(result, mem_operand);
2751 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2755 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2752 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue, 2756 DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
2753 result, Operand(0x80000000)); 2757 result, Operand(0x80000000));
2754 } 2758 }
2755 break; 2759 break;
2756 case FLOAT32_ELEMENTS: 2760 case FLOAT32_ELEMENTS:
2757 case FLOAT64_ELEMENTS: 2761 case FLOAT64_ELEMENTS:
2758 case FAST_DOUBLE_ELEMENTS: 2762 case FAST_DOUBLE_ELEMENTS:
2759 case FAST_ELEMENTS: 2763 case FAST_ELEMENTS:
2760 case FAST_SMI_ELEMENTS: 2764 case FAST_SMI_ELEMENTS:
2761 case FAST_HOLEY_DOUBLE_ELEMENTS: 2765 case FAST_HOLEY_DOUBLE_ELEMENTS:
2762 case FAST_HOLEY_ELEMENTS: 2766 case FAST_HOLEY_ELEMENTS:
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2797 key = ToRegister(instr->key()); 2801 key = ToRegister(instr->key());
2798 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 2802 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
2799 ? (element_size_shift - kSmiTagSize) : element_size_shift; 2803 ? (element_size_shift - kSmiTagSize) : element_size_shift;
2800 __ Lsa(scratch, scratch, key, shift_size); 2804 __ Lsa(scratch, scratch, key, shift_size);
2801 } 2805 }
2802 2806
2803 __ ldc1(result, MemOperand(scratch)); 2807 __ ldc1(result, MemOperand(scratch));
2804 2808
2805 if (instr->hydrogen()->RequiresHoleCheck()) { 2809 if (instr->hydrogen()->RequiresHoleCheck()) {
2806 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); 2810 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
2807 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, 2811 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
2808 Operand(kHoleNanUpper32)); 2812 Operand(kHoleNanUpper32));
2809 } 2813 }
2810 } 2814 }
2811 2815
2812 2816
2813 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 2817 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2814 Register elements = ToRegister(instr->elements()); 2818 Register elements = ToRegister(instr->elements());
2815 Register result = ToRegister(instr->result()); 2819 Register result = ToRegister(instr->result());
2816 Register scratch = scratch0(); 2820 Register scratch = scratch0();
2817 Register store_base = scratch; 2821 Register store_base = scratch;
(...skipping 14 matching lines...) Expand all
2832 } else { 2836 } else {
2833 __ Lsa(scratch, elements, key, kPointerSizeLog2); 2837 __ Lsa(scratch, elements, key, kPointerSizeLog2);
2834 } 2838 }
2835 } 2839 }
2836 __ lw(result, MemOperand(store_base, offset)); 2840 __ lw(result, MemOperand(store_base, offset));
2837 2841
2838 // Check for the hole value. 2842 // Check for the hole value.
2839 if (instr->hydrogen()->RequiresHoleCheck()) { 2843 if (instr->hydrogen()->RequiresHoleCheck()) {
2840 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 2844 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2841 __ SmiTst(result, scratch); 2845 __ SmiTst(result, scratch);
2842 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, 2846 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
2843 Operand(zero_reg)); 2847 Operand(zero_reg));
2844 } else { 2848 } else {
2845 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 2849 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2846 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch)); 2850 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
2851 Operand(scratch));
2847 } 2852 }
2848 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 2853 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2849 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 2854 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2850 Label done; 2855 Label done;
2851 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 2856 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2852 __ Branch(&done, ne, result, Operand(scratch)); 2857 __ Branch(&done, ne, result, Operand(scratch));
2853 if (info()->IsStub()) { 2858 if (info()->IsStub()) {
2854 // A stub can safely convert the hole to undefined only if the array 2859 // A stub can safely convert the hole to undefined only if the array
2855 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 2860 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
2856 // it needs to bail out. 2861 // it needs to bail out.
2857 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 2862 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2858 __ lw(result, FieldMemOperand(result, Cell::kValueOffset)); 2863 __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
2859 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result, 2864 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
2860 Operand(Smi::FromInt(Isolate::kArrayProtectorValid))); 2865 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
2861 } 2866 }
2862 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2867 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2863 __ bind(&done); 2868 __ bind(&done);
2864 } 2869 }
2865 } 2870 }
2866 2871
2867 2872
2868 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 2873 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2869 if (instr->is_fixed_typed_array()) { 2874 if (instr->is_fixed_typed_array()) {
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
2999 } 3004 }
3000 3005
3001 // Normal function. Replace undefined or null with global receiver. 3006 // Normal function. Replace undefined or null with global receiver.
3002 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3007 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3003 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3008 __ Branch(&global_object, eq, receiver, Operand(scratch));
3004 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3009 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3005 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3010 __ Branch(&global_object, eq, receiver, Operand(scratch));
3006 3011
3007 // Deoptimize if the receiver is not a JS object. 3012 // Deoptimize if the receiver is not a JS object.
3008 __ SmiTst(receiver, scratch); 3013 __ SmiTst(receiver, scratch);
3009 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg)); 3014 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
3010 3015
3011 __ GetObjectType(receiver, scratch, scratch); 3016 __ GetObjectType(receiver, scratch, scratch);
3012 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch, 3017 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
3013 Operand(FIRST_JS_RECEIVER_TYPE)); 3018 Operand(FIRST_JS_RECEIVER_TYPE));
3014 3019
3015 __ Branch(&result_in_receiver); 3020 __ Branch(&result_in_receiver);
3016 __ bind(&global_object); 3021 __ bind(&global_object);
3017 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3022 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3018 __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 3023 __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3019 __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 3024 __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3020 3025
3021 if (result.is(receiver)) { 3026 if (result.is(receiver)) {
3022 __ bind(&result_in_receiver); 3027 __ bind(&result_in_receiver);
(...skipping 13 matching lines...) Expand all
3036 Register length = ToRegister(instr->length()); 3041 Register length = ToRegister(instr->length());
3037 Register elements = ToRegister(instr->elements()); 3042 Register elements = ToRegister(instr->elements());
3038 Register scratch = scratch0(); 3043 Register scratch = scratch0();
3039 DCHECK(receiver.is(a0)); // Used for parameter count. 3044 DCHECK(receiver.is(a0)); // Used for parameter count.
3040 DCHECK(function.is(a1)); // Required by InvokeFunction. 3045 DCHECK(function.is(a1)); // Required by InvokeFunction.
3041 DCHECK(ToRegister(instr->result()).is(v0)); 3046 DCHECK(ToRegister(instr->result()).is(v0));
3042 3047
3043 // Copy the arguments to this function possibly from the 3048 // Copy the arguments to this function possibly from the
3044 // adaptor frame below it. 3049 // adaptor frame below it.
3045 const uint32_t kArgumentsLimit = 1 * KB; 3050 const uint32_t kArgumentsLimit = 1 * KB;
3046 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length, 3051 DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
3047 Operand(kArgumentsLimit)); 3052 Operand(kArgumentsLimit));
3048 3053
3049 // Push the receiver and use the register to keep the original 3054 // Push the receiver and use the register to keep the original
3050 // number of arguments. 3055 // number of arguments.
3051 __ push(receiver); 3056 __ push(receiver);
3052 __ Move(receiver, length); 3057 __ Move(receiver, length);
3053 // The arguments are at a one pointer size offset from elements. 3058 // The arguments are at a one pointer size offset from elements.
3054 __ Addu(elements, elements, Operand(1 * kPointerSize)); 3059 __ Addu(elements, elements, Operand(1 * kPointerSize));
3055 3060
3056 // Loop through the arguments pushing them onto the execution 3061 // Loop through the arguments pushing them onto the execution
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
3189 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3194 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3190 DCHECK(instr->context() != NULL); 3195 DCHECK(instr->context() != NULL);
3191 DCHECK(ToRegister(instr->context()).is(cp)); 3196 DCHECK(ToRegister(instr->context()).is(cp));
3192 Register input = ToRegister(instr->value()); 3197 Register input = ToRegister(instr->value());
3193 Register result = ToRegister(instr->result()); 3198 Register result = ToRegister(instr->result());
3194 Register scratch = scratch0(); 3199 Register scratch = scratch0();
3195 3200
3196 // Deoptimize if not a heap number. 3201 // Deoptimize if not a heap number.
3197 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3202 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3198 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3203 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3199 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at)); 3204 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
3205 Operand(at));
3200 3206
3201 Label done; 3207 Label done;
3202 Register exponent = scratch0(); 3208 Register exponent = scratch0();
3203 scratch = no_reg; 3209 scratch = no_reg;
3204 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3210 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3205 // Check the sign of the argument. If the argument is positive, just 3211 // Check the sign of the argument. If the argument is positive, just
3206 // return it. 3212 // return it.
3207 __ Move(result, input); 3213 __ Move(result, input);
3208 __ And(at, exponent, Operand(HeapNumber::kSignMask)); 3214 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3209 __ Branch(&done, eq, at, Operand(zero_reg)); 3215 __ Branch(&done, eq, at, Operand(zero_reg));
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3256 3262
3257 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3263 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3258 Register input = ToRegister(instr->value()); 3264 Register input = ToRegister(instr->value());
3259 Register result = ToRegister(instr->result()); 3265 Register result = ToRegister(instr->result());
3260 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 3266 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3261 Label done; 3267 Label done;
3262 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); 3268 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3263 __ mov(result, input); 3269 __ mov(result, input);
3264 __ subu(result, zero_reg, input); 3270 __ subu(result, zero_reg, input);
3265 // Overflow if result is still negative, i.e. 0x80000000. 3271 // Overflow if result is still negative, i.e. 0x80000000.
3266 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg)); 3272 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
3273 Operand(zero_reg));
3267 __ bind(&done); 3274 __ bind(&done);
3268 } 3275 }
3269 3276
3270 3277
3271 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3278 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3272 // Class for deferred case. 3279 // Class for deferred case.
3273 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3280 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3274 public: 3281 public:
3275 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3282 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3276 : LDeferredCode(codegen), instr_(instr) { } 3283 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3311 Register except_flag = ToRegister(instr->temp()); 3318 Register except_flag = ToRegister(instr->temp());
3312 3319
3313 __ EmitFPUTruncate(kRoundToMinusInf, 3320 __ EmitFPUTruncate(kRoundToMinusInf,
3314 result, 3321 result,
3315 input, 3322 input,
3316 scratch1, 3323 scratch1,
3317 double_scratch0(), 3324 double_scratch0(),
3318 except_flag); 3325 except_flag);
3319 3326
3320 // Deopt if the operation did not succeed. 3327 // Deopt if the operation did not succeed.
3321 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, 3328 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3322 Operand(zero_reg)); 3329 Operand(zero_reg));
3323 3330
3324 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3331 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3325 // Test for -0. 3332 // Test for -0.
3326 Label done; 3333 Label done;
3327 __ Branch(&done, ne, result, Operand(zero_reg)); 3334 __ Branch(&done, ne, result, Operand(zero_reg));
3328 __ Mfhc1(scratch1, input); 3335 __ Mfhc1(scratch1, input);
3329 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 3336 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3330 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, 3337 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
3331 Operand(zero_reg)); 3338 Operand(zero_reg));
3332 __ bind(&done); 3339 __ bind(&done);
3333 } 3340 }
3334 } 3341 }
3335 3342
3336 3343
3337 void LCodeGen::DoMathRound(LMathRound* instr) { 3344 void LCodeGen::DoMathRound(LMathRound* instr) {
3338 DoubleRegister input = ToDoubleRegister(instr->value()); 3345 DoubleRegister input = ToDoubleRegister(instr->value());
3339 Register result = ToRegister(instr->result()); 3346 Register result = ToRegister(instr->result());
3340 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3347 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
(...skipping 13 matching lines...) Expand all
3354 __ mov(result, zero_reg); 3361 __ mov(result, zero_reg);
3355 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3362 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3356 __ Branch(&check_sign_on_zero); 3363 __ Branch(&check_sign_on_zero);
3357 } else { 3364 } else {
3358 __ Branch(&done); 3365 __ Branch(&done);
3359 } 3366 }
3360 __ bind(&skip1); 3367 __ bind(&skip1);
3361 3368
3362 // The following conversion will not work with numbers 3369 // The following conversion will not work with numbers
3363 // outside of ]-2^32, 2^32[. 3370 // outside of ]-2^32, 2^32[.
3364 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch, 3371 DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
3365 Operand(HeapNumber::kExponentBias + 32)); 3372 Operand(HeapNumber::kExponentBias + 32));
3366 3373
3367 // Save the original sign for later comparison. 3374 // Save the original sign for later comparison.
3368 __ And(scratch, result, Operand(HeapNumber::kSignMask)); 3375 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3369 3376
3370 __ Move(double_scratch0(), 0.5); 3377 __ Move(double_scratch0(), 0.5);
3371 __ add_d(double_scratch0(), input, double_scratch0()); 3378 __ add_d(double_scratch0(), input, double_scratch0());
3372 3379
3373 // Check sign of the result: if the sign changed, the input 3380 // Check sign of the result: if the sign changed, the input
3374 // value was in ]0.5, 0[ and the result should be -0. 3381 // value was in ]0.5, 0[ and the result should be -0.
3375 __ Mfhc1(result, double_scratch0()); 3382 __ Mfhc1(result, double_scratch0());
3376 __ Xor(result, result, Operand(scratch)); 3383 __ Xor(result, result, Operand(scratch));
3377 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3384 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3378 // ARM uses 'mi' here, which is 'lt' 3385 // ARM uses 'mi' here, which is 'lt'
3379 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg)); 3386 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
3387 Operand(zero_reg));
3380 } else { 3388 } else {
3381 Label skip2; 3389 Label skip2;
3382 // ARM uses 'mi' here, which is 'lt' 3390 // ARM uses 'mi' here, which is 'lt'
3383 // Negating it results in 'ge' 3391 // Negating it results in 'ge'
3384 __ Branch(&skip2, ge, result, Operand(zero_reg)); 3392 __ Branch(&skip2, ge, result, Operand(zero_reg));
3385 __ mov(result, zero_reg); 3393 __ mov(result, zero_reg);
3386 __ Branch(&done); 3394 __ Branch(&done);
3387 __ bind(&skip2); 3395 __ bind(&skip2);
3388 } 3396 }
3389 3397
3390 Register except_flag = scratch; 3398 Register except_flag = scratch;
3391 __ EmitFPUTruncate(kRoundToMinusInf, 3399 __ EmitFPUTruncate(kRoundToMinusInf,
3392 result, 3400 result,
3393 double_scratch0(), 3401 double_scratch0(),
3394 at, 3402 at,
3395 double_scratch1, 3403 double_scratch1,
3396 except_flag); 3404 except_flag);
3397 3405
3398 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, 3406 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
3399 Operand(zero_reg)); 3407 Operand(zero_reg));
3400 3408
3401 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3409 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3402 // Test for -0. 3410 // Test for -0.
3403 __ Branch(&done, ne, result, Operand(zero_reg)); 3411 __ Branch(&done, ne, result, Operand(zero_reg));
3404 __ bind(&check_sign_on_zero); 3412 __ bind(&check_sign_on_zero);
3405 __ Mfhc1(scratch, input); 3413 __ Mfhc1(scratch, input);
3406 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); 3414 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3407 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch, 3415 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
3408 Operand(zero_reg)); 3416 Operand(zero_reg));
3409 } 3417 }
3410 __ bind(&done); 3418 __ bind(&done);
3411 } 3419 }
3412 3420
3413 3421
3414 void LCodeGen::DoMathFround(LMathFround* instr) { 3422 void LCodeGen::DoMathFround(LMathFround* instr) {
3415 DoubleRegister input = ToDoubleRegister(instr->value()); 3423 DoubleRegister input = ToDoubleRegister(instr->value());
3416 DoubleRegister result = ToDoubleRegister(instr->result()); 3424 DoubleRegister result = ToDoubleRegister(instr->result());
3417 __ cvt_s_d(result.low(), input); 3425 __ cvt_s_d(result.low(), input);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3464 3472
3465 if (exponent_type.IsSmi()) { 3473 if (exponent_type.IsSmi()) {
3466 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3474 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3467 __ CallStub(&stub); 3475 __ CallStub(&stub);
3468 } else if (exponent_type.IsTagged()) { 3476 } else if (exponent_type.IsTagged()) {
3469 Label no_deopt; 3477 Label no_deopt;
3470 __ JumpIfSmi(tagged_exponent, &no_deopt); 3478 __ JumpIfSmi(tagged_exponent, &no_deopt);
3471 DCHECK(!t3.is(tagged_exponent)); 3479 DCHECK(!t3.is(tagged_exponent));
3472 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); 3480 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3473 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3481 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3474 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at)); 3482 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at));
3475 __ bind(&no_deopt); 3483 __ bind(&no_deopt);
3476 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3484 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3477 __ CallStub(&stub); 3485 __ CallStub(&stub);
3478 } else if (exponent_type.IsInteger32()) { 3486 } else if (exponent_type.IsInteger32()) {
3479 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3487 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3480 __ CallStub(&stub); 3488 __ CallStub(&stub);
3481 } else { 3489 } else {
3482 DCHECK(exponent_type.IsDouble()); 3490 DCHECK(exponent_type.IsDouble());
3483 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3491 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3484 __ CallStub(&stub); 3492 __ CallStub(&stub);
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after
3818 } else { 3826 } else {
3819 reg = ToRegister(instr->index()); 3827 reg = ToRegister(instr->index());
3820 operand = ToOperand(instr->length()); 3828 operand = ToOperand(instr->length());
3821 } 3829 }
3822 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 3830 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3823 Label done; 3831 Label done;
3824 __ Branch(&done, NegateCondition(cc), reg, operand); 3832 __ Branch(&done, NegateCondition(cc), reg, operand);
3825 __ stop("eliminated bounds check failed"); 3833 __ stop("eliminated bounds check failed");
3826 __ bind(&done); 3834 __ bind(&done);
3827 } else { 3835 } else {
3828 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand); 3836 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
3829 } 3837 }
3830 } 3838 }
3831 3839
3832 3840
3833 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 3841 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3834 Register external_pointer = ToRegister(instr->elements()); 3842 Register external_pointer = ToRegister(instr->elements());
3835 Register key = no_reg; 3843 Register key = no_reg;
3836 ElementsKind elements_kind = instr->elements_kind(); 3844 ElementsKind elements_kind = instr->elements_kind();
3837 bool key_is_constant = instr->key()->IsConstantOperand(); 3845 bool key_is_constant = instr->key()->IsConstantOperand();
3838 int constant_key = 0; 3846 int constant_key = 0;
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after
4124 instr->hydrogen()->kind()); 4132 instr->hydrogen()->kind());
4125 __ mov(a0, result); 4133 __ mov(a0, result);
4126 __ CallStub(&stub); 4134 __ CallStub(&stub);
4127 RecordSafepointWithLazyDeopt( 4135 RecordSafepointWithLazyDeopt(
4128 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4136 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4129 __ StoreToSafepointRegisterSlot(result, result); 4137 __ StoreToSafepointRegisterSlot(result, result);
4130 } 4138 }
4131 4139
4132 // Deopt on smi, which means the elements array changed to dictionary mode. 4140 // Deopt on smi, which means the elements array changed to dictionary mode.
4133 __ SmiTst(result, at); 4141 __ SmiTst(result, at);
4134 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); 4142 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4135 } 4143 }
4136 4144
4137 4145
4138 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4146 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4139 Register object_reg = ToRegister(instr->object()); 4147 Register object_reg = ToRegister(instr->object());
4140 Register scratch = scratch0(); 4148 Register scratch = scratch0();
4141 4149
4142 Handle<Map> from_map = instr->original_map(); 4150 Handle<Map> from_map = instr->original_map();
4143 Handle<Map> to_map = instr->transitioned_map(); 4151 Handle<Map> to_map = instr->transitioned_map();
4144 ElementsKind from_kind = instr->from_kind(); 4152 ElementsKind from_kind = instr->from_kind();
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
4491 } 4499 }
4492 4500
4493 4501
4494 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4502 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4495 HChange* hchange = instr->hydrogen(); 4503 HChange* hchange = instr->hydrogen();
4496 Register input = ToRegister(instr->value()); 4504 Register input = ToRegister(instr->value());
4497 Register output = ToRegister(instr->result()); 4505 Register output = ToRegister(instr->result());
4498 if (hchange->CheckFlag(HValue::kCanOverflow) && 4506 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4499 hchange->value()->CheckFlag(HValue::kUint32)) { 4507 hchange->value()->CheckFlag(HValue::kUint32)) {
4500 __ And(at, input, Operand(0xc0000000)); 4508 __ And(at, input, Operand(0xc0000000));
4501 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); 4509 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4502 } 4510 }
4503 if (hchange->CheckFlag(HValue::kCanOverflow) && 4511 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4504 !hchange->value()->CheckFlag(HValue::kUint32)) { 4512 !hchange->value()->CheckFlag(HValue::kUint32)) {
4505 __ SmiTagCheckOverflow(output, input, at); 4513 __ SmiTagCheckOverflow(output, input, at);
4506 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg)); 4514 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
4507 } else { 4515 } else {
4508 __ SmiTag(output, input); 4516 __ SmiTag(output, input);
4509 } 4517 }
4510 } 4518 }
4511 4519
4512 4520
4513 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4521 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4514 Register scratch = scratch0(); 4522 Register scratch = scratch0();
4515 Register input = ToRegister(instr->value()); 4523 Register input = ToRegister(instr->value());
4516 Register result = ToRegister(instr->result()); 4524 Register result = ToRegister(instr->result());
4517 if (instr->needs_check()) { 4525 if (instr->needs_check()) {
4518 STATIC_ASSERT(kHeapObjectTag == 1); 4526 STATIC_ASSERT(kHeapObjectTag == 1);
4519 // If the input is a HeapObject, value of scratch won't be zero. 4527 // If the input is a HeapObject, value of scratch won't be zero.
4520 __ And(scratch, input, Operand(kHeapObjectTag)); 4528 __ And(scratch, input, Operand(kHeapObjectTag));
4521 __ SmiUntag(result, input); 4529 __ SmiUntag(result, input);
4522 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg)); 4530 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
4531 Operand(zero_reg));
4523 } else { 4532 } else {
4524 __ SmiUntag(result, input); 4533 __ SmiUntag(result, input);
4525 } 4534 }
4526 } 4535 }
4527 4536
4528 4537
4529 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4538 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4530 DoubleRegister result_reg, 4539 DoubleRegister result_reg,
4531 NumberUntagDMode mode) { 4540 NumberUntagDMode mode) {
4532 bool can_convert_undefined_to_nan = 4541 bool can_convert_undefined_to_nan =
4533 instr->hydrogen()->can_convert_undefined_to_nan(); 4542 instr->hydrogen()->can_convert_undefined_to_nan();
4534 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4543 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4535 4544
4536 Register scratch = scratch0(); 4545 Register scratch = scratch0();
4537 Label convert, load_smi, done; 4546 Label convert, load_smi, done;
4538 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4547 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4539 // Smi check. 4548 // Smi check.
4540 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4549 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4541 // Heap number map check. 4550 // Heap number map check.
4542 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4551 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4543 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 4552 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4544 if (can_convert_undefined_to_nan) { 4553 if (can_convert_undefined_to_nan) {
4545 __ Branch(&convert, ne, scratch, Operand(at)); 4554 __ Branch(&convert, ne, scratch, Operand(at));
4546 } else { 4555 } else {
4547 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, 4556 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
4548 Operand(at)); 4557 Operand(at));
4549 } 4558 }
4550 // Load heap number. 4559 // Load heap number.
4551 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4560 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4552 if (deoptimize_on_minus_zero) { 4561 if (deoptimize_on_minus_zero) {
4553 __ mfc1(at, result_reg.low()); 4562 __ mfc1(at, result_reg.low());
4554 __ Branch(&done, ne, at, Operand(zero_reg)); 4563 __ Branch(&done, ne, at, Operand(zero_reg));
4555 __ Mfhc1(scratch, result_reg); 4564 __ Mfhc1(scratch, result_reg);
4556 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch, 4565 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
4557 Operand(HeapNumber::kSignMask)); 4566 Operand(HeapNumber::kSignMask));
4558 } 4567 }
4559 __ Branch(&done); 4568 __ Branch(&done);
4560 if (can_convert_undefined_to_nan) { 4569 if (can_convert_undefined_to_nan) {
4561 __ bind(&convert); 4570 __ bind(&convert);
4562 // Convert undefined (and hole) to NaN. 4571 // Convert undefined (and hole) to NaN.
4563 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 4572 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4564 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, 4573 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
4565 Operand(at)); 4574 input_reg, Operand(at));
4566 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4575 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4567 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4576 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4568 __ Branch(&done); 4577 __ Branch(&done);
4569 } 4578 }
4570 } else { 4579 } else {
4571 __ SmiUntag(scratch, input_reg); 4580 __ SmiUntag(scratch, input_reg);
4572 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4581 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4573 } 4582 }
4574 // Smi to double register conversion 4583 // Smi to double register conversion
4575 __ bind(&load_smi); 4584 __ bind(&load_smi);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
4619 __ mov(input_reg, zero_reg); // In delay slot. 4628 __ mov(input_reg, zero_reg); // In delay slot.
4620 4629
4621 __ bind(&check_bools); 4630 __ bind(&check_bools);
4622 __ LoadRoot(at, Heap::kTrueValueRootIndex); 4631 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4623 __ Branch(&check_false, ne, scratch2, Operand(at)); 4632 __ Branch(&check_false, ne, scratch2, Operand(at));
4624 __ Branch(USE_DELAY_SLOT, &done); 4633 __ Branch(USE_DELAY_SLOT, &done);
4625 __ li(input_reg, Operand(1)); // In delay slot. 4634 __ li(input_reg, Operand(1)); // In delay slot.
4626 4635
4627 __ bind(&check_false); 4636 __ bind(&check_false);
4628 __ LoadRoot(at, Heap::kFalseValueRootIndex); 4637 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4629 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean, 4638 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
4630 scratch2, Operand(at)); 4639 scratch2, Operand(at));
4631 __ Branch(USE_DELAY_SLOT, &done); 4640 __ Branch(USE_DELAY_SLOT, &done);
4632 __ mov(input_reg, zero_reg); // In delay slot. 4641 __ mov(input_reg, zero_reg); // In delay slot.
4633 } else { 4642 } else {
4634 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1, 4643 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
4635 Operand(at)); 4644 Operand(at));
4636 4645
4637 // Load the double value. 4646 // Load the double value.
4638 __ ldc1(double_scratch, 4647 __ ldc1(double_scratch,
4639 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4648 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4640 4649
4641 Register except_flag = scratch2; 4650 Register except_flag = scratch2;
4642 __ EmitFPUTruncate(kRoundToZero, 4651 __ EmitFPUTruncate(kRoundToZero,
4643 input_reg, 4652 input_reg,
4644 double_scratch, 4653 double_scratch,
4645 scratch1, 4654 scratch1,
4646 double_scratch2, 4655 double_scratch2,
4647 except_flag, 4656 except_flag,
4648 kCheckForInexactConversion); 4657 kCheckForInexactConversion);
4649 4658
4650 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, 4659 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4651 Operand(zero_reg)); 4660 Operand(zero_reg));
4652 4661
4653 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4662 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4654 __ Branch(&done, ne, input_reg, Operand(zero_reg)); 4663 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4655 4664
4656 __ Mfhc1(scratch1, double_scratch); 4665 __ Mfhc1(scratch1, double_scratch);
4657 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 4666 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4658 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, 4667 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4659 Operand(zero_reg)); 4668 Operand(zero_reg));
4660 } 4669 }
4661 } 4670 }
4662 __ bind(&done); 4671 __ bind(&done);
4663 } 4672 }
4664 4673
4665 4674
4666 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4675 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4667 class DeferredTaggedToI final : public LDeferredCode { 4676 class DeferredTaggedToI final : public LDeferredCode {
4668 public: 4677 public:
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
4725 4734
4726 __ EmitFPUTruncate(kRoundToMinusInf, 4735 __ EmitFPUTruncate(kRoundToMinusInf,
4727 result_reg, 4736 result_reg,
4728 double_input, 4737 double_input,
4729 scratch1, 4738 scratch1,
4730 double_scratch0(), 4739 double_scratch0(),
4731 except_flag, 4740 except_flag,
4732 kCheckForInexactConversion); 4741 kCheckForInexactConversion);
4733 4742
4734 // Deopt if the operation did not succeed (except_flag != 0). 4743 // Deopt if the operation did not succeed (except_flag != 0).
4735 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, 4744 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4736 Operand(zero_reg)); 4745 Operand(zero_reg));
4737 4746
4738 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4747 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4739 Label done; 4748 Label done;
4740 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 4749 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4741 __ Mfhc1(scratch1, double_input); 4750 __ Mfhc1(scratch1, double_input);
4742 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 4751 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4743 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, 4752 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4744 Operand(zero_reg)); 4753 Operand(zero_reg));
4745 __ bind(&done); 4754 __ bind(&done);
4746 } 4755 }
4747 } 4756 }
4748 } 4757 }
4749 4758
4750 4759
4751 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4760 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4752 Register result_reg = ToRegister(instr->result()); 4761 Register result_reg = ToRegister(instr->result());
4753 Register scratch1 = LCodeGen::scratch0(); 4762 Register scratch1 = LCodeGen::scratch0();
4754 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4763 DoubleRegister double_input = ToDoubleRegister(instr->value());
4755 4764
4756 if (instr->truncating()) { 4765 if (instr->truncating()) {
4757 __ TruncateDoubleToI(result_reg, double_input); 4766 __ TruncateDoubleToI(result_reg, double_input);
4758 } else { 4767 } else {
4759 Register except_flag = LCodeGen::scratch1(); 4768 Register except_flag = LCodeGen::scratch1();
4760 4769
4761 __ EmitFPUTruncate(kRoundToMinusInf, 4770 __ EmitFPUTruncate(kRoundToMinusInf,
4762 result_reg, 4771 result_reg,
4763 double_input, 4772 double_input,
4764 scratch1, 4773 scratch1,
4765 double_scratch0(), 4774 double_scratch0(),
4766 except_flag, 4775 except_flag,
4767 kCheckForInexactConversion); 4776 kCheckForInexactConversion);
4768 4777
4769 // Deopt if the operation did not succeed (except_flag != 0). 4778 // Deopt if the operation did not succeed (except_flag != 0).
4770 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag, 4779 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
4771 Operand(zero_reg)); 4780 Operand(zero_reg));
4772 4781
4773 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4782 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4774 Label done; 4783 Label done;
4775 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 4784 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4776 __ Mfhc1(scratch1, double_input); 4785 __ Mfhc1(scratch1, double_input);
4777 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 4786 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4778 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1, 4787 DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
4779 Operand(zero_reg)); 4788 Operand(zero_reg));
4780 __ bind(&done); 4789 __ bind(&done);
4781 } 4790 }
4782 } 4791 }
4783 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); 4792 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
4784 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg)); 4793 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1,
4794 Operand(zero_reg));
4785 } 4795 }
4786 4796
4787 4797
4788 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4798 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4789 LOperand* input = instr->value(); 4799 LOperand* input = instr->value();
4790 __ SmiTst(ToRegister(input), at); 4800 __ SmiTst(ToRegister(input), at);
4791 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg)); 4801 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
4792 } 4802 }
4793 4803
4794 4804
4795 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4805 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4796 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4806 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4797 LOperand* input = instr->value(); 4807 LOperand* input = instr->value();
4798 __ SmiTst(ToRegister(input), at); 4808 __ SmiTst(ToRegister(input), at);
4799 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg)); 4809 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
4800 } 4810 }
4801 } 4811 }
4802 4812
4803 4813
4804 void LCodeGen::DoCheckArrayBufferNotNeutered( 4814 void LCodeGen::DoCheckArrayBufferNotNeutered(
4805 LCheckArrayBufferNotNeutered* instr) { 4815 LCheckArrayBufferNotNeutered* instr) {
4806 Register view = ToRegister(instr->view()); 4816 Register view = ToRegister(instr->view());
4807 Register scratch = scratch0(); 4817 Register scratch = scratch0();
4808 4818
4809 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 4819 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
4810 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 4820 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
4811 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift); 4821 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
4812 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg)); 4822 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
4823 Operand(zero_reg));
4813 } 4824 }
4814 4825
4815 4826
4816 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 4827 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4817 Register input = ToRegister(instr->value()); 4828 Register input = ToRegister(instr->value());
4818 Register scratch = scratch0(); 4829 Register scratch = scratch0();
4819 4830
4820 __ GetObjectType(input, scratch, scratch); 4831 __ GetObjectType(input, scratch, scratch);
4821 4832
4822 if (instr->hydrogen()->is_interval_check()) { 4833 if (instr->hydrogen()->is_interval_check()) {
4823 InstanceType first; 4834 InstanceType first;
4824 InstanceType last; 4835 InstanceType last;
4825 instr->hydrogen()->GetCheckInterval(&first, &last); 4836 instr->hydrogen()->GetCheckInterval(&first, &last);
4826 4837
4827 // If there is only one type in the interval check for equality. 4838 // If there is only one type in the interval check for equality.
4828 if (first == last) { 4839 if (first == last) {
4829 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, 4840 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4830 Operand(first)); 4841 Operand(first));
4831 } else { 4842 } else {
4832 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch, 4843 DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4833 Operand(first)); 4844 Operand(first));
4834 // Omit check for the last type. 4845 // Omit check for the last type.
4835 if (last != LAST_TYPE) { 4846 if (last != LAST_TYPE) {
4836 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch, 4847 DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4837 Operand(last)); 4848 Operand(last));
4838 } 4849 }
4839 } 4850 }
4840 } else { 4851 } else {
4841 uint8_t mask; 4852 uint8_t mask;
4842 uint8_t tag; 4853 uint8_t tag;
4843 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 4854 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4844 4855
4845 if (base::bits::IsPowerOfTwo32(mask)) { 4856 if (base::bits::IsPowerOfTwo32(mask)) {
4846 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 4857 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4847 __ And(at, scratch, mask); 4858 __ And(at, scratch, mask);
4848 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, 4859 DeoptimizeIf(tag == 0 ? ne : eq, instr,
4849 at, Operand(zero_reg)); 4860 DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
4850 } else { 4861 } else {
4851 __ And(scratch, scratch, Operand(mask)); 4862 __ And(scratch, scratch, Operand(mask));
4852 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch, 4863 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
4853 Operand(tag)); 4864 Operand(tag));
4854 } 4865 }
4855 } 4866 }
4856 } 4867 }
4857 4868
4858 4869
4859 void LCodeGen::DoCheckValue(LCheckValue* instr) { 4870 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4860 Register reg = ToRegister(instr->value()); 4871 Register reg = ToRegister(instr->value());
4861 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 4872 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4862 AllowDeferredHandleDereference smi_check; 4873 AllowDeferredHandleDereference smi_check;
4863 if (isolate()->heap()->InNewSpace(*object)) { 4874 if (isolate()->heap()->InNewSpace(*object)) {
4864 Register reg = ToRegister(instr->value()); 4875 Register reg = ToRegister(instr->value());
4865 Handle<Cell> cell = isolate()->factory()->NewCell(object); 4876 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4866 __ li(at, Operand(cell)); 4877 __ li(at, Operand(cell));
4867 __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); 4878 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
4868 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at)); 4879 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
4869 } else { 4880 } else {
4870 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object)); 4881 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
4882 Operand(object));
4871 } 4883 }
4872 } 4884 }
4873 4885
4874 4886
4875 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 4887 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4876 { 4888 {
4877 PushSafepointRegistersScope scope(this); 4889 PushSafepointRegistersScope scope(this);
4878 __ push(object); 4890 __ push(object);
4879 __ mov(cp, zero_reg); 4891 __ mov(cp, zero_reg);
4880 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 4892 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4881 RecordSafepointWithRegisters( 4893 RecordSafepointWithRegisters(
4882 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 4894 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4883 __ StoreToSafepointRegisterSlot(v0, scratch0()); 4895 __ StoreToSafepointRegisterSlot(v0, scratch0());
4884 } 4896 }
4885 __ SmiTst(scratch0(), at); 4897 __ SmiTst(scratch0(), at);
4886 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at, 4898 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
4887 Operand(zero_reg)); 4899 Operand(zero_reg));
4888 } 4900 }
4889 4901
4890 4902
4891 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4903 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4892 class DeferredCheckMaps final : public LDeferredCode { 4904 class DeferredCheckMaps final : public LDeferredCode {
4893 public: 4905 public:
4894 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 4906 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4895 : LDeferredCode(codegen), instr_(instr), object_(object) { 4907 : LDeferredCode(codegen), instr_(instr), object_(object) {
4896 SetExit(check_maps()); 4908 SetExit(check_maps());
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
4931 Label success; 4943 Label success;
4932 for (int i = 0; i < maps->size() - 1; i++) { 4944 for (int i = 0; i < maps->size() - 1; i++) {
4933 Handle<Map> map = maps->at(i).handle(); 4945 Handle<Map> map = maps->at(i).handle();
4934 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); 4946 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
4935 } 4947 }
4936 Handle<Map> map = maps->at(maps->size() - 1).handle(); 4948 Handle<Map> map = maps->at(maps->size() - 1).handle();
4937 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). 4949 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
4938 if (instr->hydrogen()->HasMigrationTarget()) { 4950 if (instr->hydrogen()->HasMigrationTarget()) {
4939 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); 4951 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
4940 } else { 4952 } else {
4941 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map)); 4953 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
4942 } 4954 }
4943 4955
4944 __ bind(&success); 4956 __ bind(&success);
4945 } 4957 }
4946 4958
4947 4959
4948 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 4960 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4949 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 4961 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4950 Register result_reg = ToRegister(instr->result()); 4962 Register result_reg = ToRegister(instr->result());
4951 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 4963 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
(...skipping 17 matching lines...) Expand all
4969 4981
4970 // Both smi and heap number cases are handled. 4982 // Both smi and heap number cases are handled.
4971 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); 4983 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4972 4984
4973 // Check for heap number 4985 // Check for heap number
4974 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4986 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4975 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); 4987 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4976 4988
4977 // Check for undefined. Undefined is converted to zero for clamping 4989 // Check for undefined. Undefined is converted to zero for clamping
4978 // conversions. 4990 // conversions.
4979 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg, 4991 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
4980 Operand(factory()->undefined_value())); 4992 Operand(factory()->undefined_value()));
4981 __ mov(result_reg, zero_reg); 4993 __ mov(result_reg, zero_reg);
4982 __ jmp(&done); 4994 __ jmp(&done);
4983 4995
4984 // Heap number 4996 // Heap number
4985 __ bind(&heap_number); 4997 __ bind(&heap_number);
4986 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, 4998 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4987 HeapNumber::kValueOffset)); 4999 HeapNumber::kValueOffset));
4988 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); 5000 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4989 __ jmp(&done); 5001 __ jmp(&done);
(...skipping 445 matching lines...) Expand 10 before | Expand all | Expand 10 after
5435 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); 5447 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5436 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); 5448 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5437 __ jmp(&done); 5449 __ jmp(&done);
5438 5450
5439 __ bind(&load_cache); 5451 __ bind(&load_cache);
5440 __ LoadInstanceDescriptors(map, result); 5452 __ LoadInstanceDescriptors(map, result);
5441 __ lw(result, 5453 __ lw(result,
5442 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5454 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5443 __ lw(result, 5455 __ lw(result,
5444 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5456 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5445 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg)); 5457 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
5458 Operand(zero_reg));
5446 5459
5447 __ bind(&done); 5460 __ bind(&done);
5448 } 5461 }
5449 5462
5450 5463
5451 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5464 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5452 Register object = ToRegister(instr->value()); 5465 Register object = ToRegister(instr->value());
5453 Register map = ToRegister(instr->map()); 5466 Register map = ToRegister(instr->map());
5454 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5467 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5455 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0())); 5468 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
5469 Operand(scratch0()));
5456 } 5470 }
5457 5471
5458 5472
5459 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5473 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5460 Register result, 5474 Register result,
5461 Register object, 5475 Register object,
5462 Register index) { 5476 Register index) {
5463 PushSafepointRegistersScope scope(this); 5477 PushSafepointRegistersScope scope(this);
5464 __ Push(object, index); 5478 __ Push(object, index);
5465 __ mov(cp, zero_reg); 5479 __ mov(cp, zero_reg);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
5527 __ lw(result, FieldMemOperand(scratch, 5541 __ lw(result, FieldMemOperand(scratch,
5528 FixedArray::kHeaderSize - kPointerSize)); 5542 FixedArray::kHeaderSize - kPointerSize));
5529 __ bind(deferred->exit()); 5543 __ bind(deferred->exit());
5530 __ bind(&done); 5544 __ bind(&done);
5531 } 5545 }
5532 5546
5533 #undef __ 5547 #undef __
5534 5548
5535 } // namespace internal 5549 } // namespace internal
5536 } // namespace v8 5550 } // namespace v8
OLDNEW
« no previous file with comments | « src/crankshaft/mips/lithium-codegen-mips.h ('k') | src/crankshaft/mips64/lithium-codegen-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698