| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_X64 | 7 #if V8_TARGET_ARCH_X64 |
| 8 | 8 |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 717 int pc_offset = masm()->pc_offset(); | 717 int pc_offset = masm()->pc_offset(); |
| 718 environment->Register(deoptimization_index, | 718 environment->Register(deoptimization_index, |
| 719 translation.index(), | 719 translation.index(), |
| 720 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 720 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 721 deoptimizations_.Add(environment, environment->zone()); | 721 deoptimizations_.Add(environment, environment->zone()); |
| 722 } | 722 } |
| 723 } | 723 } |
| 724 | 724 |
| 725 | 725 |
| 726 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 726 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
| 727 const char* detail, | 727 Deoptimizer::DeoptReason deopt_reason, |
| 728 Deoptimizer::BailoutType bailout_type) { | 728 Deoptimizer::BailoutType bailout_type) { |
| 729 LEnvironment* environment = instr->environment(); | 729 LEnvironment* environment = instr->environment(); |
| 730 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 730 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 731 DCHECK(environment->HasBeenRegistered()); | 731 DCHECK(environment->HasBeenRegistered()); |
| 732 int id = environment->deoptimization_index(); | 732 int id = environment->deoptimization_index(); |
| 733 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 733 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 734 Address entry = | 734 Address entry = |
| 735 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 735 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 736 if (entry == NULL) { | 736 if (entry == NULL) { |
| 737 Abort(kBailoutWasNotPrepared); | 737 Abort(kBailoutWasNotPrepared); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 763 if (info()->ShouldTrapOnDeopt()) { | 763 if (info()->ShouldTrapOnDeopt()) { |
| 764 Label done; | 764 Label done; |
| 765 if (cc != no_condition) { | 765 if (cc != no_condition) { |
| 766 __ j(NegateCondition(cc), &done, Label::kNear); | 766 __ j(NegateCondition(cc), &done, Label::kNear); |
| 767 } | 767 } |
| 768 __ int3(); | 768 __ int3(); |
| 769 __ bind(&done); | 769 __ bind(&done); |
| 770 } | 770 } |
| 771 | 771 |
| 772 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 772 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
| 773 instr->Mnemonic(), detail); | 773 instr->Mnemonic(), deopt_reason); |
| 774 DCHECK(info()->IsStub() || frame_is_built_); | 774 DCHECK(info()->IsStub() || frame_is_built_); |
| 775 // Go through jump table if we need to handle condition, build frame, or | 775 // Go through jump table if we need to handle condition, build frame, or |
| 776 // restore caller doubles. | 776 // restore caller doubles. |
| 777 if (cc == no_condition && frame_is_built_ && | 777 if (cc == no_condition && frame_is_built_ && |
| 778 !info()->saves_caller_doubles()) { | 778 !info()->saves_caller_doubles()) { |
| 779 DeoptComment(reason); | 779 DeoptComment(reason); |
| 780 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 780 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 781 } else { | 781 } else { |
| 782 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 782 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
| 783 !frame_is_built_); | 783 !frame_is_built_); |
| 784 // We often have several deopts to the same entry, reuse the last | 784 // We often have several deopts to the same entry, reuse the last |
| 785 // jump entry if this is the case. | 785 // jump entry if this is the case. |
| 786 if (jump_table_.is_empty() || | 786 if (jump_table_.is_empty() || |
| 787 !table_entry.IsEquivalentTo(jump_table_.last())) { | 787 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 788 jump_table_.Add(table_entry, zone()); | 788 jump_table_.Add(table_entry, zone()); |
| 789 } | 789 } |
| 790 if (cc == no_condition) { | 790 if (cc == no_condition) { |
| 791 __ jmp(&jump_table_.last().label); | 791 __ jmp(&jump_table_.last().label); |
| 792 } else { | 792 } else { |
| 793 __ j(cc, &jump_table_.last().label); | 793 __ j(cc, &jump_table_.last().label); |
| 794 } | 794 } |
| 795 } | 795 } |
| 796 } | 796 } |
| 797 | 797 |
| 798 | 798 |
| 799 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, | 799 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr, |
| 800 const char* detail) { | 800 Deoptimizer::DeoptReason deopt_reason) { |
| 801 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 801 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 802 ? Deoptimizer::LAZY | 802 ? Deoptimizer::LAZY |
| 803 : Deoptimizer::EAGER; | 803 : Deoptimizer::EAGER; |
| 804 DeoptimizeIf(cc, instr, detail, bailout_type); | 804 DeoptimizeIf(cc, instr, deopt_reason, bailout_type); |
| 805 } | 805 } |
| 806 | 806 |
| 807 | 807 |
| 808 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 808 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 809 int length = deoptimizations_.length(); | 809 int length = deoptimizations_.length(); |
| 810 if (length == 0) return; | 810 if (length == 0) return; |
| 811 Handle<DeoptimizationInputData> data = | 811 Handle<DeoptimizationInputData> data = |
| 812 DeoptimizationInputData::New(isolate(), length, TENURED); | 812 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 813 | 813 |
| 814 Handle<ByteArray> translations = | 814 Handle<ByteArray> translations = |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1026 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1026 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1027 Label dividend_is_not_negative, done; | 1027 Label dividend_is_not_negative, done; |
| 1028 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1028 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1029 __ testl(dividend, dividend); | 1029 __ testl(dividend, dividend); |
| 1030 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); | 1030 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); |
| 1031 // Note that this is correct even for kMinInt operands. | 1031 // Note that this is correct even for kMinInt operands. |
| 1032 __ negl(dividend); | 1032 __ negl(dividend); |
| 1033 __ andl(dividend, Immediate(mask)); | 1033 __ andl(dividend, Immediate(mask)); |
| 1034 __ negl(dividend); | 1034 __ negl(dividend); |
| 1035 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1035 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1036 DeoptimizeIf(zero, instr, "minus zero"); | 1036 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1037 } | 1037 } |
| 1038 __ jmp(&done, Label::kNear); | 1038 __ jmp(&done, Label::kNear); |
| 1039 } | 1039 } |
| 1040 | 1040 |
| 1041 __ bind(÷nd_is_not_negative); | 1041 __ bind(÷nd_is_not_negative); |
| 1042 __ andl(dividend, Immediate(mask)); | 1042 __ andl(dividend, Immediate(mask)); |
| 1043 __ bind(&done); | 1043 __ bind(&done); |
| 1044 } | 1044 } |
| 1045 | 1045 |
| 1046 | 1046 |
| 1047 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1047 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1048 Register dividend = ToRegister(instr->dividend()); | 1048 Register dividend = ToRegister(instr->dividend()); |
| 1049 int32_t divisor = instr->divisor(); | 1049 int32_t divisor = instr->divisor(); |
| 1050 DCHECK(ToRegister(instr->result()).is(rax)); | 1050 DCHECK(ToRegister(instr->result()).is(rax)); |
| 1051 | 1051 |
| 1052 if (divisor == 0) { | 1052 if (divisor == 0) { |
| 1053 DeoptimizeIf(no_condition, instr, "division by zero"); | 1053 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1054 return; | 1054 return; |
| 1055 } | 1055 } |
| 1056 | 1056 |
| 1057 __ TruncatingDiv(dividend, Abs(divisor)); | 1057 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1058 __ imull(rdx, rdx, Immediate(Abs(divisor))); | 1058 __ imull(rdx, rdx, Immediate(Abs(divisor))); |
| 1059 __ movl(rax, dividend); | 1059 __ movl(rax, dividend); |
| 1060 __ subl(rax, rdx); | 1060 __ subl(rax, rdx); |
| 1061 | 1061 |
| 1062 // Check for negative zero. | 1062 // Check for negative zero. |
| 1063 HMod* hmod = instr->hydrogen(); | 1063 HMod* hmod = instr->hydrogen(); |
| 1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1064 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1065 Label remainder_not_zero; | 1065 Label remainder_not_zero; |
| 1066 __ j(not_zero, &remainder_not_zero, Label::kNear); | 1066 __ j(not_zero, &remainder_not_zero, Label::kNear); |
| 1067 __ cmpl(dividend, Immediate(0)); | 1067 __ cmpl(dividend, Immediate(0)); |
| 1068 DeoptimizeIf(less, instr, "minus zero"); | 1068 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
| 1069 __ bind(&remainder_not_zero); | 1069 __ bind(&remainder_not_zero); |
| 1070 } | 1070 } |
| 1071 } | 1071 } |
| 1072 | 1072 |
| 1073 | 1073 |
| 1074 void LCodeGen::DoModI(LModI* instr) { | 1074 void LCodeGen::DoModI(LModI* instr) { |
| 1075 HMod* hmod = instr->hydrogen(); | 1075 HMod* hmod = instr->hydrogen(); |
| 1076 | 1076 |
| 1077 Register left_reg = ToRegister(instr->left()); | 1077 Register left_reg = ToRegister(instr->left()); |
| 1078 DCHECK(left_reg.is(rax)); | 1078 DCHECK(left_reg.is(rax)); |
| 1079 Register right_reg = ToRegister(instr->right()); | 1079 Register right_reg = ToRegister(instr->right()); |
| 1080 DCHECK(!right_reg.is(rax)); | 1080 DCHECK(!right_reg.is(rax)); |
| 1081 DCHECK(!right_reg.is(rdx)); | 1081 DCHECK(!right_reg.is(rdx)); |
| 1082 Register result_reg = ToRegister(instr->result()); | 1082 Register result_reg = ToRegister(instr->result()); |
| 1083 DCHECK(result_reg.is(rdx)); | 1083 DCHECK(result_reg.is(rdx)); |
| 1084 | 1084 |
| 1085 Label done; | 1085 Label done; |
| 1086 // Check for x % 0, idiv would signal a divide error. We have to | 1086 // Check for x % 0, idiv would signal a divide error. We have to |
| 1087 // deopt in this case because we can't return a NaN. | 1087 // deopt in this case because we can't return a NaN. |
| 1088 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1088 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1089 __ testl(right_reg, right_reg); | 1089 __ testl(right_reg, right_reg); |
| 1090 DeoptimizeIf(zero, instr, "division by zero"); | 1090 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1091 } | 1091 } |
| 1092 | 1092 |
| 1093 // Check for kMinInt % -1, idiv would signal a divide error. We | 1093 // Check for kMinInt % -1, idiv would signal a divide error. We |
| 1094 // have to deopt if we care about -0, because we can't return that. | 1094 // have to deopt if we care about -0, because we can't return that. |
| 1095 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1095 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1096 Label no_overflow_possible; | 1096 Label no_overflow_possible; |
| 1097 __ cmpl(left_reg, Immediate(kMinInt)); | 1097 __ cmpl(left_reg, Immediate(kMinInt)); |
| 1098 __ j(not_zero, &no_overflow_possible, Label::kNear); | 1098 __ j(not_zero, &no_overflow_possible, Label::kNear); |
| 1099 __ cmpl(right_reg, Immediate(-1)); | 1099 __ cmpl(right_reg, Immediate(-1)); |
| 1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1100 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1101 DeoptimizeIf(equal, instr, "minus zero"); | 1101 DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero); |
| 1102 } else { | 1102 } else { |
| 1103 __ j(not_equal, &no_overflow_possible, Label::kNear); | 1103 __ j(not_equal, &no_overflow_possible, Label::kNear); |
| 1104 __ Set(result_reg, 0); | 1104 __ Set(result_reg, 0); |
| 1105 __ jmp(&done, Label::kNear); | 1105 __ jmp(&done, Label::kNear); |
| 1106 } | 1106 } |
| 1107 __ bind(&no_overflow_possible); | 1107 __ bind(&no_overflow_possible); |
| 1108 } | 1108 } |
| 1109 | 1109 |
| 1110 // Sign extend dividend in eax into edx:eax, since we are using only the low | 1110 // Sign extend dividend in eax into edx:eax, since we are using only the low |
| 1111 // 32 bits of the values. | 1111 // 32 bits of the values. |
| 1112 __ cdq(); | 1112 __ cdq(); |
| 1113 | 1113 |
| 1114 // If we care about -0, test if the dividend is <0 and the result is 0. | 1114 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1116 Label positive_left; | 1116 Label positive_left; |
| 1117 __ testl(left_reg, left_reg); | 1117 __ testl(left_reg, left_reg); |
| 1118 __ j(not_sign, &positive_left, Label::kNear); | 1118 __ j(not_sign, &positive_left, Label::kNear); |
| 1119 __ idivl(right_reg); | 1119 __ idivl(right_reg); |
| 1120 __ testl(result_reg, result_reg); | 1120 __ testl(result_reg, result_reg); |
| 1121 DeoptimizeIf(zero, instr, "minus zero"); | 1121 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1122 __ jmp(&done, Label::kNear); | 1122 __ jmp(&done, Label::kNear); |
| 1123 __ bind(&positive_left); | 1123 __ bind(&positive_left); |
| 1124 } | 1124 } |
| 1125 __ idivl(right_reg); | 1125 __ idivl(right_reg); |
| 1126 __ bind(&done); | 1126 __ bind(&done); |
| 1127 } | 1127 } |
| 1128 | 1128 |
| 1129 | 1129 |
| 1130 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | 1130 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 1131 Register dividend = ToRegister(instr->dividend()); | 1131 Register dividend = ToRegister(instr->dividend()); |
| 1132 int32_t divisor = instr->divisor(); | 1132 int32_t divisor = instr->divisor(); |
| 1133 DCHECK(dividend.is(ToRegister(instr->result()))); | 1133 DCHECK(dividend.is(ToRegister(instr->result()))); |
| 1134 | 1134 |
| 1135 // If the divisor is positive, things are easy: There can be no deopts and we | 1135 // If the divisor is positive, things are easy: There can be no deopts and we |
| 1136 // can simply do an arithmetic right shift. | 1136 // can simply do an arithmetic right shift. |
| 1137 if (divisor == 1) return; | 1137 if (divisor == 1) return; |
| 1138 int32_t shift = WhichPowerOf2Abs(divisor); | 1138 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1139 if (divisor > 1) { | 1139 if (divisor > 1) { |
| 1140 __ sarl(dividend, Immediate(shift)); | 1140 __ sarl(dividend, Immediate(shift)); |
| 1141 return; | 1141 return; |
| 1142 } | 1142 } |
| 1143 | 1143 |
| 1144 // If the divisor is negative, we have to negate and handle edge cases. | 1144 // If the divisor is negative, we have to negate and handle edge cases. |
| 1145 __ negl(dividend); | 1145 __ negl(dividend); |
| 1146 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1146 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1147 DeoptimizeIf(zero, instr, "minus zero"); | 1147 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1148 } | 1148 } |
| 1149 | 1149 |
| 1150 // Dividing by -1 is basically negation, unless we overflow. | 1150 // Dividing by -1 is basically negation, unless we overflow. |
| 1151 if (divisor == -1) { | 1151 if (divisor == -1) { |
| 1152 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1152 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1153 DeoptimizeIf(overflow, instr, "overflow"); | 1153 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1154 } | 1154 } |
| 1155 return; | 1155 return; |
| 1156 } | 1156 } |
| 1157 | 1157 |
| 1158 // If the negation could not overflow, simply shifting is OK. | 1158 // If the negation could not overflow, simply shifting is OK. |
| 1159 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1159 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1160 __ sarl(dividend, Immediate(shift)); | 1160 __ sarl(dividend, Immediate(shift)); |
| 1161 return; | 1161 return; |
| 1162 } | 1162 } |
| 1163 | 1163 |
| 1164 Label not_kmin_int, done; | 1164 Label not_kmin_int, done; |
| 1165 __ j(no_overflow, ¬_kmin_int, Label::kNear); | 1165 __ j(no_overflow, ¬_kmin_int, Label::kNear); |
| 1166 __ movl(dividend, Immediate(kMinInt / divisor)); | 1166 __ movl(dividend, Immediate(kMinInt / divisor)); |
| 1167 __ jmp(&done, Label::kNear); | 1167 __ jmp(&done, Label::kNear); |
| 1168 __ bind(¬_kmin_int); | 1168 __ bind(¬_kmin_int); |
| 1169 __ sarl(dividend, Immediate(shift)); | 1169 __ sarl(dividend, Immediate(shift)); |
| 1170 __ bind(&done); | 1170 __ bind(&done); |
| 1171 } | 1171 } |
| 1172 | 1172 |
| 1173 | 1173 |
| 1174 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1174 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1175 Register dividend = ToRegister(instr->dividend()); | 1175 Register dividend = ToRegister(instr->dividend()); |
| 1176 int32_t divisor = instr->divisor(); | 1176 int32_t divisor = instr->divisor(); |
| 1177 DCHECK(ToRegister(instr->result()).is(rdx)); | 1177 DCHECK(ToRegister(instr->result()).is(rdx)); |
| 1178 | 1178 |
| 1179 if (divisor == 0) { | 1179 if (divisor == 0) { |
| 1180 DeoptimizeIf(no_condition, instr, "division by zero"); | 1180 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1181 return; | 1181 return; |
| 1182 } | 1182 } |
| 1183 | 1183 |
| 1184 // Check for (0 / -x) that will produce negative zero. | 1184 // Check for (0 / -x) that will produce negative zero. |
| 1185 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1185 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1186 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1187 __ testl(dividend, dividend); | 1187 __ testl(dividend, dividend); |
| 1188 DeoptimizeIf(zero, instr, "minus zero"); | 1188 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1189 } | 1189 } |
| 1190 | 1190 |
| 1191 // Easy case: We need no dynamic check for the dividend and the flooring | 1191 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1192 // division is the same as the truncating division. | 1192 // division is the same as the truncating division. |
| 1193 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1193 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1194 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1194 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1195 __ TruncatingDiv(dividend, Abs(divisor)); | 1195 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1196 if (divisor < 0) __ negl(rdx); | 1196 if (divisor < 0) __ negl(rdx); |
| 1197 return; | 1197 return; |
| 1198 } | 1198 } |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1225 Register result = ToRegister(instr->result()); | 1225 Register result = ToRegister(instr->result()); |
| 1226 DCHECK(dividend.is(rax)); | 1226 DCHECK(dividend.is(rax)); |
| 1227 DCHECK(remainder.is(rdx)); | 1227 DCHECK(remainder.is(rdx)); |
| 1228 DCHECK(result.is(rax)); | 1228 DCHECK(result.is(rax)); |
| 1229 DCHECK(!divisor.is(rax)); | 1229 DCHECK(!divisor.is(rax)); |
| 1230 DCHECK(!divisor.is(rdx)); | 1230 DCHECK(!divisor.is(rdx)); |
| 1231 | 1231 |
| 1232 // Check for x / 0. | 1232 // Check for x / 0. |
| 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1233 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1234 __ testl(divisor, divisor); | 1234 __ testl(divisor, divisor); |
| 1235 DeoptimizeIf(zero, instr, "division by zero"); | 1235 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1236 } | 1236 } |
| 1237 | 1237 |
| 1238 // Check for (0 / -x) that will produce negative zero. | 1238 // Check for (0 / -x) that will produce negative zero. |
| 1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1239 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1240 Label dividend_not_zero; | 1240 Label dividend_not_zero; |
| 1241 __ testl(dividend, dividend); | 1241 __ testl(dividend, dividend); |
| 1242 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1242 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
| 1243 __ testl(divisor, divisor); | 1243 __ testl(divisor, divisor); |
| 1244 DeoptimizeIf(sign, instr, "minus zero"); | 1244 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1245 __ bind(÷nd_not_zero); | 1245 __ bind(÷nd_not_zero); |
| 1246 } | 1246 } |
| 1247 | 1247 |
| 1248 // Check for (kMinInt / -1). | 1248 // Check for (kMinInt / -1). |
| 1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1249 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 1250 Label dividend_not_min_int; | 1250 Label dividend_not_min_int; |
| 1251 __ cmpl(dividend, Immediate(kMinInt)); | 1251 __ cmpl(dividend, Immediate(kMinInt)); |
| 1252 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1252 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
| 1253 __ cmpl(divisor, Immediate(-1)); | 1253 __ cmpl(divisor, Immediate(-1)); |
| 1254 DeoptimizeIf(zero, instr, "overflow"); | 1254 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1255 __ bind(÷nd_not_min_int); | 1255 __ bind(÷nd_not_min_int); |
| 1256 } | 1256 } |
| 1257 | 1257 |
| 1258 // Sign extend to rdx (= remainder). | 1258 // Sign extend to rdx (= remainder). |
| 1259 __ cdq(); | 1259 __ cdq(); |
| 1260 __ idivl(divisor); | 1260 __ idivl(divisor); |
| 1261 | 1261 |
| 1262 Label done; | 1262 Label done; |
| 1263 __ testl(remainder, remainder); | 1263 __ testl(remainder, remainder); |
| 1264 __ j(zero, &done, Label::kNear); | 1264 __ j(zero, &done, Label::kNear); |
| 1265 __ xorl(remainder, divisor); | 1265 __ xorl(remainder, divisor); |
| 1266 __ sarl(remainder, Immediate(31)); | 1266 __ sarl(remainder, Immediate(31)); |
| 1267 __ addl(result, remainder); | 1267 __ addl(result, remainder); |
| 1268 __ bind(&done); | 1268 __ bind(&done); |
| 1269 } | 1269 } |
| 1270 | 1270 |
| 1271 | 1271 |
| 1272 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1272 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1273 Register dividend = ToRegister(instr->dividend()); | 1273 Register dividend = ToRegister(instr->dividend()); |
| 1274 int32_t divisor = instr->divisor(); | 1274 int32_t divisor = instr->divisor(); |
| 1275 Register result = ToRegister(instr->result()); | 1275 Register result = ToRegister(instr->result()); |
| 1276 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1276 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1277 DCHECK(!result.is(dividend)); | 1277 DCHECK(!result.is(dividend)); |
| 1278 | 1278 |
| 1279 // Check for (0 / -x) that will produce negative zero. | 1279 // Check for (0 / -x) that will produce negative zero. |
| 1280 HDiv* hdiv = instr->hydrogen(); | 1280 HDiv* hdiv = instr->hydrogen(); |
| 1281 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1281 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1282 __ testl(dividend, dividend); | 1282 __ testl(dividend, dividend); |
| 1283 DeoptimizeIf(zero, instr, "minus zero"); | 1283 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1284 } | 1284 } |
| 1285 // Check for (kMinInt / -1). | 1285 // Check for (kMinInt / -1). |
| 1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1286 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1287 __ cmpl(dividend, Immediate(kMinInt)); | 1287 __ cmpl(dividend, Immediate(kMinInt)); |
| 1288 DeoptimizeIf(zero, instr, "overflow"); | 1288 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1289 } | 1289 } |
| 1290 // Deoptimize if remainder will not be 0. | 1290 // Deoptimize if remainder will not be 0. |
| 1291 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1291 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1292 divisor != 1 && divisor != -1) { | 1292 divisor != 1 && divisor != -1) { |
| 1293 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1293 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1294 __ testl(dividend, Immediate(mask)); | 1294 __ testl(dividend, Immediate(mask)); |
| 1295 DeoptimizeIf(not_zero, instr, "lost precision"); | 1295 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
| 1296 } | 1296 } |
| 1297 __ Move(result, dividend); | 1297 __ Move(result, dividend); |
| 1298 int32_t shift = WhichPowerOf2Abs(divisor); | 1298 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1299 if (shift > 0) { | 1299 if (shift > 0) { |
| 1300 // The arithmetic shift is always OK, the 'if' is an optimization only. | 1300 // The arithmetic shift is always OK, the 'if' is an optimization only. |
| 1301 if (shift > 1) __ sarl(result, Immediate(31)); | 1301 if (shift > 1) __ sarl(result, Immediate(31)); |
| 1302 __ shrl(result, Immediate(32 - shift)); | 1302 __ shrl(result, Immediate(32 - shift)); |
| 1303 __ addl(result, dividend); | 1303 __ addl(result, dividend); |
| 1304 __ sarl(result, Immediate(shift)); | 1304 __ sarl(result, Immediate(shift)); |
| 1305 } | 1305 } |
| 1306 if (divisor < 0) __ negl(result); | 1306 if (divisor < 0) __ negl(result); |
| 1307 } | 1307 } |
| 1308 | 1308 |
| 1309 | 1309 |
| 1310 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1310 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1311 Register dividend = ToRegister(instr->dividend()); | 1311 Register dividend = ToRegister(instr->dividend()); |
| 1312 int32_t divisor = instr->divisor(); | 1312 int32_t divisor = instr->divisor(); |
| 1313 DCHECK(ToRegister(instr->result()).is(rdx)); | 1313 DCHECK(ToRegister(instr->result()).is(rdx)); |
| 1314 | 1314 |
| 1315 if (divisor == 0) { | 1315 if (divisor == 0) { |
| 1316 DeoptimizeIf(no_condition, instr, "division by zero"); | 1316 DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); |
| 1317 return; | 1317 return; |
| 1318 } | 1318 } |
| 1319 | 1319 |
| 1320 // Check for (0 / -x) that will produce negative zero. | 1320 // Check for (0 / -x) that will produce negative zero. |
| 1321 HDiv* hdiv = instr->hydrogen(); | 1321 HDiv* hdiv = instr->hydrogen(); |
| 1322 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1322 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1323 __ testl(dividend, dividend); | 1323 __ testl(dividend, dividend); |
| 1324 DeoptimizeIf(zero, instr, "minus zero"); | 1324 DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero); |
| 1325 } | 1325 } |
| 1326 | 1326 |
| 1327 __ TruncatingDiv(dividend, Abs(divisor)); | 1327 __ TruncatingDiv(dividend, Abs(divisor)); |
| 1328 if (divisor < 0) __ negl(rdx); | 1328 if (divisor < 0) __ negl(rdx); |
| 1329 | 1329 |
| 1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1331 __ movl(rax, rdx); | 1331 __ movl(rax, rdx); |
| 1332 __ imull(rax, rax, Immediate(divisor)); | 1332 __ imull(rax, rax, Immediate(divisor)); |
| 1333 __ subl(rax, dividend); | 1333 __ subl(rax, dividend); |
| 1334 DeoptimizeIf(not_equal, instr, "lost precision"); | 1334 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
| 1335 } | 1335 } |
| 1336 } | 1336 } |
| 1337 | 1337 |
| 1338 | 1338 |
| 1339 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1339 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1340 void LCodeGen::DoDivI(LDivI* instr) { | 1340 void LCodeGen::DoDivI(LDivI* instr) { |
| 1341 HBinaryOperation* hdiv = instr->hydrogen(); | 1341 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1342 Register dividend = ToRegister(instr->dividend()); | 1342 Register dividend = ToRegister(instr->dividend()); |
| 1343 Register divisor = ToRegister(instr->divisor()); | 1343 Register divisor = ToRegister(instr->divisor()); |
| 1344 Register remainder = ToRegister(instr->temp()); | 1344 Register remainder = ToRegister(instr->temp()); |
| 1345 DCHECK(dividend.is(rax)); | 1345 DCHECK(dividend.is(rax)); |
| 1346 DCHECK(remainder.is(rdx)); | 1346 DCHECK(remainder.is(rdx)); |
| 1347 DCHECK(ToRegister(instr->result()).is(rax)); | 1347 DCHECK(ToRegister(instr->result()).is(rax)); |
| 1348 DCHECK(!divisor.is(rax)); | 1348 DCHECK(!divisor.is(rax)); |
| 1349 DCHECK(!divisor.is(rdx)); | 1349 DCHECK(!divisor.is(rdx)); |
| 1350 | 1350 |
| 1351 // Check for x / 0. | 1351 // Check for x / 0. |
| 1352 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1352 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1353 __ testl(divisor, divisor); | 1353 __ testl(divisor, divisor); |
| 1354 DeoptimizeIf(zero, instr, "division by zero"); | 1354 DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero); |
| 1355 } | 1355 } |
| 1356 | 1356 |
| 1357 // Check for (0 / -x) that will produce negative zero. | 1357 // Check for (0 / -x) that will produce negative zero. |
| 1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1358 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1359 Label dividend_not_zero; | 1359 Label dividend_not_zero; |
| 1360 __ testl(dividend, dividend); | 1360 __ testl(dividend, dividend); |
| 1361 __ j(not_zero, ÷nd_not_zero, Label::kNear); | 1361 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
| 1362 __ testl(divisor, divisor); | 1362 __ testl(divisor, divisor); |
| 1363 DeoptimizeIf(sign, instr, "minus zero"); | 1363 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1364 __ bind(÷nd_not_zero); | 1364 __ bind(÷nd_not_zero); |
| 1365 } | 1365 } |
| 1366 | 1366 |
| 1367 // Check for (kMinInt / -1). | 1367 // Check for (kMinInt / -1). |
| 1368 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | 1368 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 1369 Label dividend_not_min_int; | 1369 Label dividend_not_min_int; |
| 1370 __ cmpl(dividend, Immediate(kMinInt)); | 1370 __ cmpl(dividend, Immediate(kMinInt)); |
| 1371 __ j(not_zero, ÷nd_not_min_int, Label::kNear); | 1371 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
| 1372 __ cmpl(divisor, Immediate(-1)); | 1372 __ cmpl(divisor, Immediate(-1)); |
| 1373 DeoptimizeIf(zero, instr, "overflow"); | 1373 DeoptimizeIf(zero, instr, Deoptimizer::kOverflow); |
| 1374 __ bind(÷nd_not_min_int); | 1374 __ bind(÷nd_not_min_int); |
| 1375 } | 1375 } |
| 1376 | 1376 |
| 1377 // Sign extend to rdx (= remainder). | 1377 // Sign extend to rdx (= remainder). |
| 1378 __ cdq(); | 1378 __ cdq(); |
| 1379 __ idivl(divisor); | 1379 __ idivl(divisor); |
| 1380 | 1380 |
| 1381 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1381 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1382 // Deoptimize if remainder is not 0. | 1382 // Deoptimize if remainder is not 0. |
| 1383 __ testl(remainder, remainder); | 1383 __ testl(remainder, remainder); |
| 1384 DeoptimizeIf(not_zero, instr, "lost precision"); | 1384 DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision); |
| 1385 } | 1385 } |
| 1386 } | 1386 } |
| 1387 | 1387 |
| 1388 | 1388 |
| 1389 void LCodeGen::DoMulI(LMulI* instr) { | 1389 void LCodeGen::DoMulI(LMulI* instr) { |
| 1390 Register left = ToRegister(instr->left()); | 1390 Register left = ToRegister(instr->left()); |
| 1391 LOperand* right = instr->right(); | 1391 LOperand* right = instr->right(); |
| 1392 | 1392 |
| 1393 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1393 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1394 if (instr->hydrogen_value()->representation().IsSmi()) { | 1394 if (instr->hydrogen_value()->representation().IsSmi()) { |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1451 } else { | 1451 } else { |
| 1452 if (instr->hydrogen_value()->representation().IsSmi()) { | 1452 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1453 __ SmiToInteger64(left, left); | 1453 __ SmiToInteger64(left, left); |
| 1454 __ imulp(left, ToRegister(right)); | 1454 __ imulp(left, ToRegister(right)); |
| 1455 } else { | 1455 } else { |
| 1456 __ imull(left, ToRegister(right)); | 1456 __ imull(left, ToRegister(right)); |
| 1457 } | 1457 } |
| 1458 } | 1458 } |
| 1459 | 1459 |
| 1460 if (can_overflow) { | 1460 if (can_overflow) { |
| 1461 DeoptimizeIf(overflow, instr, "overflow"); | 1461 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1462 } | 1462 } |
| 1463 | 1463 |
| 1464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1465 // Bail out if the result is supposed to be negative zero. | 1465 // Bail out if the result is supposed to be negative zero. |
| 1466 Label done; | 1466 Label done; |
| 1467 if (instr->hydrogen_value()->representation().IsSmi()) { | 1467 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1468 __ testp(left, left); | 1468 __ testp(left, left); |
| 1469 } else { | 1469 } else { |
| 1470 __ testl(left, left); | 1470 __ testl(left, left); |
| 1471 } | 1471 } |
| 1472 __ j(not_zero, &done, Label::kNear); | 1472 __ j(not_zero, &done, Label::kNear); |
| 1473 if (right->IsConstantOperand()) { | 1473 if (right->IsConstantOperand()) { |
| 1474 // Constant can't be represented as 32-bit Smi due to immediate size | 1474 // Constant can't be represented as 32-bit Smi due to immediate size |
| 1475 // limit. | 1475 // limit. |
| 1476 DCHECK(SmiValuesAre32Bits() | 1476 DCHECK(SmiValuesAre32Bits() |
| 1477 ? !instr->hydrogen_value()->representation().IsSmi() | 1477 ? !instr->hydrogen_value()->representation().IsSmi() |
| 1478 : SmiValuesAre31Bits()); | 1478 : SmiValuesAre31Bits()); |
| 1479 if (ToInteger32(LConstantOperand::cast(right)) < 0) { | 1479 if (ToInteger32(LConstantOperand::cast(right)) < 0) { |
| 1480 DeoptimizeIf(no_condition, instr, "minus zero"); | 1480 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 1481 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { | 1481 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { |
| 1482 __ cmpl(kScratchRegister, Immediate(0)); | 1482 __ cmpl(kScratchRegister, Immediate(0)); |
| 1483 DeoptimizeIf(less, instr, "minus zero"); | 1483 DeoptimizeIf(less, instr, Deoptimizer::kMinusZero); |
| 1484 } | 1484 } |
| 1485 } else if (right->IsStackSlot()) { | 1485 } else if (right->IsStackSlot()) { |
| 1486 if (instr->hydrogen_value()->representation().IsSmi()) { | 1486 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1487 __ orp(kScratchRegister, ToOperand(right)); | 1487 __ orp(kScratchRegister, ToOperand(right)); |
| 1488 } else { | 1488 } else { |
| 1489 __ orl(kScratchRegister, ToOperand(right)); | 1489 __ orl(kScratchRegister, ToOperand(right)); |
| 1490 } | 1490 } |
| 1491 DeoptimizeIf(sign, instr, "minus zero"); | 1491 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1492 } else { | 1492 } else { |
| 1493 // Test the non-zero operand for negative sign. | 1493 // Test the non-zero operand for negative sign. |
| 1494 if (instr->hydrogen_value()->representation().IsSmi()) { | 1494 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1495 __ orp(kScratchRegister, ToRegister(right)); | 1495 __ orp(kScratchRegister, ToRegister(right)); |
| 1496 } else { | 1496 } else { |
| 1497 __ orl(kScratchRegister, ToRegister(right)); | 1497 __ orl(kScratchRegister, ToRegister(right)); |
| 1498 } | 1498 } |
| 1499 DeoptimizeIf(sign, instr, "minus zero"); | 1499 DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero); |
| 1500 } | 1500 } |
| 1501 __ bind(&done); | 1501 __ bind(&done); |
| 1502 } | 1502 } |
| 1503 } | 1503 } |
| 1504 | 1504 |
| 1505 | 1505 |
| 1506 void LCodeGen::DoBitI(LBitI* instr) { | 1506 void LCodeGen::DoBitI(LBitI* instr) { |
| 1507 LOperand* left = instr->left(); | 1507 LOperand* left = instr->left(); |
| 1508 LOperand* right = instr->right(); | 1508 LOperand* right = instr->right(); |
| 1509 DCHECK(left->Equals(instr->result())); | 1509 DCHECK(left->Equals(instr->result())); |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1602 case Token::ROR: | 1602 case Token::ROR: |
| 1603 __ rorl_cl(ToRegister(left)); | 1603 __ rorl_cl(ToRegister(left)); |
| 1604 break; | 1604 break; |
| 1605 case Token::SAR: | 1605 case Token::SAR: |
| 1606 __ sarl_cl(ToRegister(left)); | 1606 __ sarl_cl(ToRegister(left)); |
| 1607 break; | 1607 break; |
| 1608 case Token::SHR: | 1608 case Token::SHR: |
| 1609 __ shrl_cl(ToRegister(left)); | 1609 __ shrl_cl(ToRegister(left)); |
| 1610 if (instr->can_deopt()) { | 1610 if (instr->can_deopt()) { |
| 1611 __ testl(ToRegister(left), ToRegister(left)); | 1611 __ testl(ToRegister(left), ToRegister(left)); |
| 1612 DeoptimizeIf(negative, instr, "negative value"); | 1612 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
| 1613 } | 1613 } |
| 1614 break; | 1614 break; |
| 1615 case Token::SHL: | 1615 case Token::SHL: |
| 1616 __ shll_cl(ToRegister(left)); | 1616 __ shll_cl(ToRegister(left)); |
| 1617 break; | 1617 break; |
| 1618 default: | 1618 default: |
| 1619 UNREACHABLE(); | 1619 UNREACHABLE(); |
| 1620 break; | 1620 break; |
| 1621 } | 1621 } |
| 1622 } else { | 1622 } else { |
| 1623 int32_t value = ToInteger32(LConstantOperand::cast(right)); | 1623 int32_t value = ToInteger32(LConstantOperand::cast(right)); |
| 1624 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); | 1624 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
| 1625 switch (instr->op()) { | 1625 switch (instr->op()) { |
| 1626 case Token::ROR: | 1626 case Token::ROR: |
| 1627 if (shift_count != 0) { | 1627 if (shift_count != 0) { |
| 1628 __ rorl(ToRegister(left), Immediate(shift_count)); | 1628 __ rorl(ToRegister(left), Immediate(shift_count)); |
| 1629 } | 1629 } |
| 1630 break; | 1630 break; |
| 1631 case Token::SAR: | 1631 case Token::SAR: |
| 1632 if (shift_count != 0) { | 1632 if (shift_count != 0) { |
| 1633 __ sarl(ToRegister(left), Immediate(shift_count)); | 1633 __ sarl(ToRegister(left), Immediate(shift_count)); |
| 1634 } | 1634 } |
| 1635 break; | 1635 break; |
| 1636 case Token::SHR: | 1636 case Token::SHR: |
| 1637 if (shift_count != 0) { | 1637 if (shift_count != 0) { |
| 1638 __ shrl(ToRegister(left), Immediate(shift_count)); | 1638 __ shrl(ToRegister(left), Immediate(shift_count)); |
| 1639 } else if (instr->can_deopt()) { | 1639 } else if (instr->can_deopt()) { |
| 1640 __ testl(ToRegister(left), ToRegister(left)); | 1640 __ testl(ToRegister(left), ToRegister(left)); |
| 1641 DeoptimizeIf(negative, instr, "negative value"); | 1641 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
| 1642 } | 1642 } |
| 1643 break; | 1643 break; |
| 1644 case Token::SHL: | 1644 case Token::SHL: |
| 1645 if (shift_count != 0) { | 1645 if (shift_count != 0) { |
| 1646 if (instr->hydrogen_value()->representation().IsSmi()) { | 1646 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1647 if (SmiValuesAre32Bits()) { | 1647 if (SmiValuesAre32Bits()) { |
| 1648 __ shlp(ToRegister(left), Immediate(shift_count)); | 1648 __ shlp(ToRegister(left), Immediate(shift_count)); |
| 1649 } else { | 1649 } else { |
| 1650 DCHECK(SmiValuesAre31Bits()); | 1650 DCHECK(SmiValuesAre31Bits()); |
| 1651 if (instr->can_deopt()) { | 1651 if (instr->can_deopt()) { |
| 1652 if (shift_count != 1) { | 1652 if (shift_count != 1) { |
| 1653 __ shll(ToRegister(left), Immediate(shift_count - 1)); | 1653 __ shll(ToRegister(left), Immediate(shift_count - 1)); |
| 1654 } | 1654 } |
| 1655 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); | 1655 __ Integer32ToSmi(ToRegister(left), ToRegister(left)); |
| 1656 DeoptimizeIf(overflow, instr, "overflow"); | 1656 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1657 } else { | 1657 } else { |
| 1658 __ shll(ToRegister(left), Immediate(shift_count)); | 1658 __ shll(ToRegister(left), Immediate(shift_count)); |
| 1659 } | 1659 } |
| 1660 } | 1660 } |
| 1661 } else { | 1661 } else { |
| 1662 __ shll(ToRegister(left), Immediate(shift_count)); | 1662 __ shll(ToRegister(left), Immediate(shift_count)); |
| 1663 } | 1663 } |
| 1664 } | 1664 } |
| 1665 break; | 1665 break; |
| 1666 default: | 1666 default: |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1689 } | 1689 } |
| 1690 } else { | 1690 } else { |
| 1691 if (instr->hydrogen_value()->representation().IsSmi()) { | 1691 if (instr->hydrogen_value()->representation().IsSmi()) { |
| 1692 __ subp(ToRegister(left), ToOperand(right)); | 1692 __ subp(ToRegister(left), ToOperand(right)); |
| 1693 } else { | 1693 } else { |
| 1694 __ subl(ToRegister(left), ToOperand(right)); | 1694 __ subl(ToRegister(left), ToOperand(right)); |
| 1695 } | 1695 } |
| 1696 } | 1696 } |
| 1697 | 1697 |
| 1698 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1698 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1699 DeoptimizeIf(overflow, instr, "overflow"); | 1699 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1700 } | 1700 } |
| 1701 } | 1701 } |
| 1702 | 1702 |
| 1703 | 1703 |
| 1704 void LCodeGen::DoConstantI(LConstantI* instr) { | 1704 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1705 Register dst = ToRegister(instr->result()); | 1705 Register dst = ToRegister(instr->result()); |
| 1706 if (instr->value() == 0) { | 1706 if (instr->value() == 0) { |
| 1707 __ xorl(dst, dst); | 1707 __ xorl(dst, dst); |
| 1708 } else { | 1708 } else { |
| 1709 __ movl(dst, Immediate(instr->value())); | 1709 __ movl(dst, Immediate(instr->value())); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1742 | 1742 |
| 1743 void LCodeGen::DoDateField(LDateField* instr) { | 1743 void LCodeGen::DoDateField(LDateField* instr) { |
| 1744 Register object = ToRegister(instr->date()); | 1744 Register object = ToRegister(instr->date()); |
| 1745 Register result = ToRegister(instr->result()); | 1745 Register result = ToRegister(instr->result()); |
| 1746 Smi* index = instr->index(); | 1746 Smi* index = instr->index(); |
| 1747 Label runtime, done, not_date_object; | 1747 Label runtime, done, not_date_object; |
| 1748 DCHECK(object.is(result)); | 1748 DCHECK(object.is(result)); |
| 1749 DCHECK(object.is(rax)); | 1749 DCHECK(object.is(rax)); |
| 1750 | 1750 |
| 1751 Condition cc = masm()->CheckSmi(object); | 1751 Condition cc = masm()->CheckSmi(object); |
| 1752 DeoptimizeIf(cc, instr, "Smi"); | 1752 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
| 1753 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); | 1753 __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); |
| 1754 DeoptimizeIf(not_equal, instr, "not a date object"); | 1754 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotADateObject); |
| 1755 | 1755 |
| 1756 if (index->value() == 0) { | 1756 if (index->value() == 0) { |
| 1757 __ movp(result, FieldOperand(object, JSDate::kValueOffset)); | 1757 __ movp(result, FieldOperand(object, JSDate::kValueOffset)); |
| 1758 } else { | 1758 } else { |
| 1759 if (index->value() < JSDate::kFirstUncachedField) { | 1759 if (index->value() < JSDate::kFirstUncachedField) { |
| 1760 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1760 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1761 Operand stamp_operand = __ ExternalOperand(stamp); | 1761 Operand stamp_operand = __ ExternalOperand(stamp); |
| 1762 __ movp(kScratchRegister, stamp_operand); | 1762 __ movp(kScratchRegister, stamp_operand); |
| 1763 __ cmpp(kScratchRegister, FieldOperand(object, | 1763 __ cmpp(kScratchRegister, FieldOperand(object, |
| 1764 JSDate::kCacheStampOffset)); | 1764 JSDate::kCacheStampOffset)); |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1908 __ addl(ToRegister(left), ToRegister(right)); | 1908 __ addl(ToRegister(left), ToRegister(right)); |
| 1909 } | 1909 } |
| 1910 } else { | 1910 } else { |
| 1911 if (is_p) { | 1911 if (is_p) { |
| 1912 __ addp(ToRegister(left), ToOperand(right)); | 1912 __ addp(ToRegister(left), ToOperand(right)); |
| 1913 } else { | 1913 } else { |
| 1914 __ addl(ToRegister(left), ToOperand(right)); | 1914 __ addl(ToRegister(left), ToOperand(right)); |
| 1915 } | 1915 } |
| 1916 } | 1916 } |
| 1917 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1917 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1918 DeoptimizeIf(overflow, instr, "overflow"); | 1918 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 1919 } | 1919 } |
| 1920 } | 1920 } |
| 1921 } | 1921 } |
| 1922 | 1922 |
| 1923 | 1923 |
| 1924 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1924 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1925 LOperand* left = instr->left(); | 1925 LOperand* left = instr->left(); |
| 1926 LOperand* right = instr->right(); | 1926 LOperand* right = instr->right(); |
| 1927 DCHECK(left->Equals(instr->result())); | 1927 DCHECK(left->Equals(instr->result())); |
| 1928 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1928 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2175 } | 2175 } |
| 2176 | 2176 |
| 2177 if (expected.Contains(ToBooleanStub::SMI)) { | 2177 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2178 // Smis: 0 -> false, all other -> true. | 2178 // Smis: 0 -> false, all other -> true. |
| 2179 __ Cmp(reg, Smi::FromInt(0)); | 2179 __ Cmp(reg, Smi::FromInt(0)); |
| 2180 __ j(equal, instr->FalseLabel(chunk_)); | 2180 __ j(equal, instr->FalseLabel(chunk_)); |
| 2181 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2181 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2182 } else if (expected.NeedsMap()) { | 2182 } else if (expected.NeedsMap()) { |
| 2183 // If we need a map later and have a Smi -> deopt. | 2183 // If we need a map later and have a Smi -> deopt. |
| 2184 __ testb(reg, Immediate(kSmiTagMask)); | 2184 __ testb(reg, Immediate(kSmiTagMask)); |
| 2185 DeoptimizeIf(zero, instr, "Smi"); | 2185 DeoptimizeIf(zero, instr, Deoptimizer::kSmi); |
| 2186 } | 2186 } |
| 2187 | 2187 |
| 2188 const Register map = kScratchRegister; | 2188 const Register map = kScratchRegister; |
| 2189 if (expected.NeedsMap()) { | 2189 if (expected.NeedsMap()) { |
| 2190 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); | 2190 __ movp(map, FieldOperand(reg, HeapObject::kMapOffset)); |
| 2191 | 2191 |
| 2192 if (expected.CanBeUndetectable()) { | 2192 if (expected.CanBeUndetectable()) { |
| 2193 // Undetectable -> false. | 2193 // Undetectable -> false. |
| 2194 __ testb(FieldOperand(map, Map::kBitFieldOffset), | 2194 __ testb(FieldOperand(map, Map::kBitFieldOffset), |
| 2195 Immediate(1 << Map::kIsUndetectable)); | 2195 Immediate(1 << Map::kIsUndetectable)); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2229 __ xorps(xmm_scratch, xmm_scratch); | 2229 __ xorps(xmm_scratch, xmm_scratch); |
| 2230 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | 2230 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
| 2231 __ j(zero, instr->FalseLabel(chunk_)); | 2231 __ j(zero, instr->FalseLabel(chunk_)); |
| 2232 __ jmp(instr->TrueLabel(chunk_)); | 2232 __ jmp(instr->TrueLabel(chunk_)); |
| 2233 __ bind(¬_heap_number); | 2233 __ bind(¬_heap_number); |
| 2234 } | 2234 } |
| 2235 | 2235 |
| 2236 if (!expected.IsGeneric()) { | 2236 if (!expected.IsGeneric()) { |
| 2237 // We've seen something for the first time -> deopt. | 2237 // We've seen something for the first time -> deopt. |
| 2238 // This can only happen if we are not generic already. | 2238 // This can only happen if we are not generic already. |
| 2239 DeoptimizeIf(no_condition, instr, "unexpected object"); | 2239 DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject); |
| 2240 } | 2240 } |
| 2241 } | 2241 } |
| 2242 } | 2242 } |
| 2243 } | 2243 } |
| 2244 | 2244 |
| 2245 | 2245 |
| 2246 void LCodeGen::EmitGoto(int block) { | 2246 void LCodeGen::EmitGoto(int block) { |
| 2247 if (!IsNextEmittedBlock(block)) { | 2247 if (!IsNextEmittedBlock(block)) { |
| 2248 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); | 2248 __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); |
| 2249 } | 2249 } |
| (...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2846 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); | 2846 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
| 2847 } | 2847 } |
| 2848 } | 2848 } |
| 2849 | 2849 |
| 2850 | 2850 |
| 2851 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2851 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2852 Register result = ToRegister(instr->result()); | 2852 Register result = ToRegister(instr->result()); |
| 2853 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); | 2853 __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); |
| 2854 if (instr->hydrogen()->RequiresHoleCheck()) { | 2854 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2855 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 2855 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| 2856 DeoptimizeIf(equal, instr, "hole"); | 2856 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2857 } | 2857 } |
| 2858 } | 2858 } |
| 2859 | 2859 |
| 2860 | 2860 |
| 2861 template <class T> | 2861 template <class T> |
| 2862 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2862 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 2863 DCHECK(FLAG_vector_ics); | 2863 DCHECK(FLAG_vector_ics); |
| 2864 Register vector_register = ToRegister(instr->temp_vector()); | 2864 Register vector_register = ToRegister(instr->temp_vector()); |
| 2865 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 2865 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
| 2866 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 2866 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2899 // If the cell we are storing to contains the hole it could have | 2899 // If the cell we are storing to contains the hole it could have |
| 2900 // been deleted from the property dictionary. In that case, we need | 2900 // been deleted from the property dictionary. In that case, we need |
| 2901 // to update the property details in the property dictionary to mark | 2901 // to update the property details in the property dictionary to mark |
| 2902 // it as no longer deleted. We deoptimize in that case. | 2902 // it as no longer deleted. We deoptimize in that case. |
| 2903 if (instr->hydrogen()->RequiresHoleCheck()) { | 2903 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2904 // We have a temp because CompareRoot might clobber kScratchRegister. | 2904 // We have a temp because CompareRoot might clobber kScratchRegister. |
| 2905 Register cell = ToRegister(instr->temp()); | 2905 Register cell = ToRegister(instr->temp()); |
| 2906 DCHECK(!value.is(cell)); | 2906 DCHECK(!value.is(cell)); |
| 2907 __ Move(cell, cell_handle, RelocInfo::CELL); | 2907 __ Move(cell, cell_handle, RelocInfo::CELL); |
| 2908 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); | 2908 __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); |
| 2909 DeoptimizeIf(equal, instr, "hole"); | 2909 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2910 // Store the value. | 2910 // Store the value. |
| 2911 __ movp(Operand(cell, 0), value); | 2911 __ movp(Operand(cell, 0), value); |
| 2912 } else { | 2912 } else { |
| 2913 // Store the value. | 2913 // Store the value. |
| 2914 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL); | 2914 __ Move(kScratchRegister, cell_handle, RelocInfo::CELL); |
| 2915 __ movp(Operand(kScratchRegister, 0), value); | 2915 __ movp(Operand(kScratchRegister, 0), value); |
| 2916 } | 2916 } |
| 2917 // Cells are always rescanned, so no write barrier here. | 2917 // Cells are always rescanned, so no write barrier here. |
| 2918 } | 2918 } |
| 2919 | 2919 |
| 2920 | 2920 |
| 2921 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2921 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2922 Register context = ToRegister(instr->context()); | 2922 Register context = ToRegister(instr->context()); |
| 2923 Register result = ToRegister(instr->result()); | 2923 Register result = ToRegister(instr->result()); |
| 2924 __ movp(result, ContextOperand(context, instr->slot_index())); | 2924 __ movp(result, ContextOperand(context, instr->slot_index())); |
| 2925 if (instr->hydrogen()->RequiresHoleCheck()) { | 2925 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2926 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 2926 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| 2927 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2927 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2928 DeoptimizeIf(equal, instr, "hole"); | 2928 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2929 } else { | 2929 } else { |
| 2930 Label is_not_hole; | 2930 Label is_not_hole; |
| 2931 __ j(not_equal, &is_not_hole, Label::kNear); | 2931 __ j(not_equal, &is_not_hole, Label::kNear); |
| 2932 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | 2932 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 2933 __ bind(&is_not_hole); | 2933 __ bind(&is_not_hole); |
| 2934 } | 2934 } |
| 2935 } | 2935 } |
| 2936 } | 2936 } |
| 2937 | 2937 |
| 2938 | 2938 |
| 2939 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 2939 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2940 Register context = ToRegister(instr->context()); | 2940 Register context = ToRegister(instr->context()); |
| 2941 Register value = ToRegister(instr->value()); | 2941 Register value = ToRegister(instr->value()); |
| 2942 | 2942 |
| 2943 Operand target = ContextOperand(context, instr->slot_index()); | 2943 Operand target = ContextOperand(context, instr->slot_index()); |
| 2944 | 2944 |
| 2945 Label skip_assignment; | 2945 Label skip_assignment; |
| 2946 if (instr->hydrogen()->RequiresHoleCheck()) { | 2946 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2947 __ CompareRoot(target, Heap::kTheHoleValueRootIndex); | 2947 __ CompareRoot(target, Heap::kTheHoleValueRootIndex); |
| 2948 if (instr->hydrogen()->DeoptimizesOnHole()) { | 2948 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 2949 DeoptimizeIf(equal, instr, "hole"); | 2949 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 2950 } else { | 2950 } else { |
| 2951 __ j(not_equal, &skip_assignment); | 2951 __ j(not_equal, &skip_assignment); |
| 2952 } | 2952 } |
| 2953 } | 2953 } |
| 2954 __ movp(target, value); | 2954 __ movp(target, value); |
| 2955 | 2955 |
| 2956 if (instr->hydrogen()->NeedsWriteBarrier()) { | 2956 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 2957 SmiCheck check_needed = | 2957 SmiCheck check_needed = |
| 2958 instr->hydrogen()->value()->type().IsHeapObject() | 2958 instr->hydrogen()->value()->type().IsHeapObject() |
| 2959 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 2959 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3039 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | 3039 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
| 3040 Register function = ToRegister(instr->function()); | 3040 Register function = ToRegister(instr->function()); |
| 3041 Register result = ToRegister(instr->result()); | 3041 Register result = ToRegister(instr->result()); |
| 3042 | 3042 |
| 3043 // Get the prototype or initial map from the function. | 3043 // Get the prototype or initial map from the function. |
| 3044 __ movp(result, | 3044 __ movp(result, |
| 3045 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3045 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3046 | 3046 |
| 3047 // Check that the function has a prototype or an initial map. | 3047 // Check that the function has a prototype or an initial map. |
| 3048 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 3048 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| 3049 DeoptimizeIf(equal, instr, "hole"); | 3049 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3050 | 3050 |
| 3051 // If the function does not have an initial map, we're done. | 3051 // If the function does not have an initial map, we're done. |
| 3052 Label done; | 3052 Label done; |
| 3053 __ CmpObjectType(result, MAP_TYPE, kScratchRegister); | 3053 __ CmpObjectType(result, MAP_TYPE, kScratchRegister); |
| 3054 __ j(not_equal, &done, Label::kNear); | 3054 __ j(not_equal, &done, Label::kNear); |
| 3055 | 3055 |
| 3056 // Get the prototype from the initial map. | 3056 // Get the prototype from the initial map. |
| 3057 __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); | 3057 __ movp(result, FieldOperand(result, Map::kPrototypeOffset)); |
| 3058 | 3058 |
| 3059 // All done. | 3059 // All done. |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3151 break; | 3151 break; |
| 3152 case EXTERNAL_INT32_ELEMENTS: | 3152 case EXTERNAL_INT32_ELEMENTS: |
| 3153 case INT32_ELEMENTS: | 3153 case INT32_ELEMENTS: |
| 3154 __ movl(result, operand); | 3154 __ movl(result, operand); |
| 3155 break; | 3155 break; |
| 3156 case EXTERNAL_UINT32_ELEMENTS: | 3156 case EXTERNAL_UINT32_ELEMENTS: |
| 3157 case UINT32_ELEMENTS: | 3157 case UINT32_ELEMENTS: |
| 3158 __ movl(result, operand); | 3158 __ movl(result, operand); |
| 3159 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3159 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3160 __ testl(result, result); | 3160 __ testl(result, result); |
| 3161 DeoptimizeIf(negative, instr, "negative value"); | 3161 DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue); |
| 3162 } | 3162 } |
| 3163 break; | 3163 break; |
| 3164 case EXTERNAL_FLOAT32_ELEMENTS: | 3164 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3165 case EXTERNAL_FLOAT64_ELEMENTS: | 3165 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3166 case FLOAT32_ELEMENTS: | 3166 case FLOAT32_ELEMENTS: |
| 3167 case FLOAT64_ELEMENTS: | 3167 case FLOAT64_ELEMENTS: |
| 3168 case FAST_ELEMENTS: | 3168 case FAST_ELEMENTS: |
| 3169 case FAST_SMI_ELEMENTS: | 3169 case FAST_SMI_ELEMENTS: |
| 3170 case FAST_DOUBLE_ELEMENTS: | 3170 case FAST_DOUBLE_ELEMENTS: |
| 3171 case FAST_HOLEY_ELEMENTS: | 3171 case FAST_HOLEY_ELEMENTS: |
| (...skipping 18 matching lines...) Expand all Loading... |
| 3190 __ movsxlq(ToRegister(key), ToRegister(key)); | 3190 __ movsxlq(ToRegister(key), ToRegister(key)); |
| 3191 } | 3191 } |
| 3192 if (instr->hydrogen()->RequiresHoleCheck()) { | 3192 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3193 Operand hole_check_operand = BuildFastArrayOperand( | 3193 Operand hole_check_operand = BuildFastArrayOperand( |
| 3194 instr->elements(), | 3194 instr->elements(), |
| 3195 key, | 3195 key, |
| 3196 instr->hydrogen()->key()->representation(), | 3196 instr->hydrogen()->key()->representation(), |
| 3197 FAST_DOUBLE_ELEMENTS, | 3197 FAST_DOUBLE_ELEMENTS, |
| 3198 instr->base_offset() + sizeof(kHoleNanLower32)); | 3198 instr->base_offset() + sizeof(kHoleNanLower32)); |
| 3199 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); | 3199 __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); |
| 3200 DeoptimizeIf(equal, instr, "hole"); | 3200 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3201 } | 3201 } |
| 3202 | 3202 |
| 3203 Operand double_load_operand = BuildFastArrayOperand( | 3203 Operand double_load_operand = BuildFastArrayOperand( |
| 3204 instr->elements(), | 3204 instr->elements(), |
| 3205 key, | 3205 key, |
| 3206 instr->hydrogen()->key()->representation(), | 3206 instr->hydrogen()->key()->representation(), |
| 3207 FAST_DOUBLE_ELEMENTS, | 3207 FAST_DOUBLE_ELEMENTS, |
| 3208 instr->base_offset()); | 3208 instr->base_offset()); |
| 3209 __ movsd(result, double_load_operand); | 3209 __ movsd(result, double_load_operand); |
| 3210 } | 3210 } |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3247 __ Load(result, | 3247 __ Load(result, |
| 3248 BuildFastArrayOperand(instr->elements(), key, | 3248 BuildFastArrayOperand(instr->elements(), key, |
| 3249 instr->hydrogen()->key()->representation(), | 3249 instr->hydrogen()->key()->representation(), |
| 3250 FAST_ELEMENTS, offset), | 3250 FAST_ELEMENTS, offset), |
| 3251 representation); | 3251 representation); |
| 3252 | 3252 |
| 3253 // Check for the hole value. | 3253 // Check for the hole value. |
| 3254 if (requires_hole_check) { | 3254 if (requires_hole_check) { |
| 3255 if (IsFastSmiElementsKind(hinstr->elements_kind())) { | 3255 if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
| 3256 Condition smi = __ CheckSmi(result); | 3256 Condition smi = __ CheckSmi(result); |
| 3257 DeoptimizeIf(NegateCondition(smi), instr, "not a Smi"); | 3257 DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi); |
| 3258 } else { | 3258 } else { |
| 3259 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 3259 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| 3260 DeoptimizeIf(equal, instr, "hole"); | 3260 DeoptimizeIf(equal, instr, Deoptimizer::kHole); |
| 3261 } | 3261 } |
| 3262 } | 3262 } |
| 3263 } | 3263 } |
| 3264 | 3264 |
| 3265 | 3265 |
| 3266 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3266 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3267 if (instr->is_typed_elements()) { | 3267 if (instr->is_typed_elements()) { |
| 3268 DoLoadKeyedExternalArray(instr); | 3268 DoLoadKeyedExternalArray(instr); |
| 3269 } else if (instr->hydrogen()->representation().IsDouble()) { | 3269 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3270 DoLoadKeyedFixedDoubleArray(instr); | 3270 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3397 } | 3397 } |
| 3398 | 3398 |
| 3399 // Normal function. Replace undefined or null with global receiver. | 3399 // Normal function. Replace undefined or null with global receiver. |
| 3400 __ CompareRoot(receiver, Heap::kNullValueRootIndex); | 3400 __ CompareRoot(receiver, Heap::kNullValueRootIndex); |
| 3401 __ j(equal, &global_object, Label::kNear); | 3401 __ j(equal, &global_object, Label::kNear); |
| 3402 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); | 3402 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); |
| 3403 __ j(equal, &global_object, Label::kNear); | 3403 __ j(equal, &global_object, Label::kNear); |
| 3404 | 3404 |
| 3405 // The receiver should be a JS object. | 3405 // The receiver should be a JS object. |
| 3406 Condition is_smi = __ CheckSmi(receiver); | 3406 Condition is_smi = __ CheckSmi(receiver); |
| 3407 DeoptimizeIf(is_smi, instr, "Smi"); | 3407 DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi); |
| 3408 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); | 3408 __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); |
| 3409 DeoptimizeIf(below, instr, "not a JavaScript object"); | 3409 DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject); |
| 3410 | 3410 |
| 3411 __ jmp(&receiver_ok, Label::kNear); | 3411 __ jmp(&receiver_ok, Label::kNear); |
| 3412 __ bind(&global_object); | 3412 __ bind(&global_object); |
| 3413 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); | 3413 __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset)); |
| 3414 __ movp(receiver, | 3414 __ movp(receiver, |
| 3415 Operand(receiver, | 3415 Operand(receiver, |
| 3416 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 3416 Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 3417 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset)); | 3417 __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset)); |
| 3418 | 3418 |
| 3419 __ bind(&receiver_ok); | 3419 __ bind(&receiver_ok); |
| 3420 } | 3420 } |
| 3421 | 3421 |
| 3422 | 3422 |
| 3423 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | 3423 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| 3424 Register receiver = ToRegister(instr->receiver()); | 3424 Register receiver = ToRegister(instr->receiver()); |
| 3425 Register function = ToRegister(instr->function()); | 3425 Register function = ToRegister(instr->function()); |
| 3426 Register length = ToRegister(instr->length()); | 3426 Register length = ToRegister(instr->length()); |
| 3427 Register elements = ToRegister(instr->elements()); | 3427 Register elements = ToRegister(instr->elements()); |
| 3428 DCHECK(receiver.is(rax)); // Used for parameter count. | 3428 DCHECK(receiver.is(rax)); // Used for parameter count. |
| 3429 DCHECK(function.is(rdi)); // Required by InvokeFunction. | 3429 DCHECK(function.is(rdi)); // Required by InvokeFunction. |
| 3430 DCHECK(ToRegister(instr->result()).is(rax)); | 3430 DCHECK(ToRegister(instr->result()).is(rax)); |
| 3431 | 3431 |
| 3432 // Copy the arguments to this function possibly from the | 3432 // Copy the arguments to this function possibly from the |
| 3433 // adaptor frame below it. | 3433 // adaptor frame below it. |
| 3434 const uint32_t kArgumentsLimit = 1 * KB; | 3434 const uint32_t kArgumentsLimit = 1 * KB; |
| 3435 __ cmpp(length, Immediate(kArgumentsLimit)); | 3435 __ cmpp(length, Immediate(kArgumentsLimit)); |
| 3436 DeoptimizeIf(above, instr, "too many arguments"); | 3436 DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments); |
| 3437 | 3437 |
| 3438 __ Push(receiver); | 3438 __ Push(receiver); |
| 3439 __ movp(receiver, length); | 3439 __ movp(receiver, length); |
| 3440 | 3440 |
| 3441 // Loop through the arguments pushing them onto the execution | 3441 // Loop through the arguments pushing them onto the execution |
| 3442 // stack. | 3442 // stack. |
| 3443 Label invoke, loop; | 3443 Label invoke, loop; |
| 3444 // length is a small non-negative integer, due to the test above. | 3444 // length is a small non-negative integer, due to the test above. |
| 3445 __ testl(length, length); | 3445 __ testl(length, length); |
| 3446 __ j(zero, &invoke, Label::kNear); | 3446 __ j(zero, &invoke, Label::kNear); |
| (...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3640 __ Call(target); | 3640 __ Call(target); |
| 3641 } | 3641 } |
| 3642 generator.AfterCall(); | 3642 generator.AfterCall(); |
| 3643 } | 3643 } |
| 3644 | 3644 |
| 3645 | 3645 |
| 3646 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { | 3646 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| 3647 Register input_reg = ToRegister(instr->value()); | 3647 Register input_reg = ToRegister(instr->value()); |
| 3648 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 3648 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 3649 Heap::kHeapNumberMapRootIndex); | 3649 Heap::kHeapNumberMapRootIndex); |
| 3650 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3650 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 3651 | 3651 |
| 3652 Label slow, allocated, done; | 3652 Label slow, allocated, done; |
| 3653 Register tmp = input_reg.is(rax) ? rcx : rax; | 3653 Register tmp = input_reg.is(rax) ? rcx : rax; |
| 3654 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; | 3654 Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; |
| 3655 | 3655 |
| 3656 // Preserve the value of all registers. | 3656 // Preserve the value of all registers. |
| 3657 PushSafepointRegistersScope scope(this); | 3657 PushSafepointRegistersScope scope(this); |
| 3658 | 3658 |
| 3659 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); | 3659 __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| 3660 // Check the sign of the argument. If the argument is positive, just | 3660 // Check the sign of the argument. If the argument is positive, just |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3686 __ bind(&done); | 3686 __ bind(&done); |
| 3687 } | 3687 } |
| 3688 | 3688 |
| 3689 | 3689 |
| 3690 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3690 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3691 Register input_reg = ToRegister(instr->value()); | 3691 Register input_reg = ToRegister(instr->value()); |
| 3692 __ testl(input_reg, input_reg); | 3692 __ testl(input_reg, input_reg); |
| 3693 Label is_positive; | 3693 Label is_positive; |
| 3694 __ j(not_sign, &is_positive, Label::kNear); | 3694 __ j(not_sign, &is_positive, Label::kNear); |
| 3695 __ negl(input_reg); // Sets flags. | 3695 __ negl(input_reg); // Sets flags. |
| 3696 DeoptimizeIf(negative, instr, "overflow"); | 3696 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); |
| 3697 __ bind(&is_positive); | 3697 __ bind(&is_positive); |
| 3698 } | 3698 } |
| 3699 | 3699 |
| 3700 | 3700 |
| 3701 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { | 3701 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { |
| 3702 Register input_reg = ToRegister(instr->value()); | 3702 Register input_reg = ToRegister(instr->value()); |
| 3703 __ testp(input_reg, input_reg); | 3703 __ testp(input_reg, input_reg); |
| 3704 Label is_positive; | 3704 Label is_positive; |
| 3705 __ j(not_sign, &is_positive, Label::kNear); | 3705 __ j(not_sign, &is_positive, Label::kNear); |
| 3706 __ negp(input_reg); // Sets flags. | 3706 __ negp(input_reg); // Sets flags. |
| 3707 DeoptimizeIf(negative, instr, "overflow"); | 3707 DeoptimizeIf(negative, instr, Deoptimizer::kOverflow); |
| 3708 __ bind(&is_positive); | 3708 __ bind(&is_positive); |
| 3709 } | 3709 } |
| 3710 | 3710 |
| 3711 | 3711 |
| 3712 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3712 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3713 // Class for deferred case. | 3713 // Class for deferred case. |
| 3714 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3714 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3715 public: | 3715 public: |
| 3716 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3716 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3717 : LDeferredCode(codegen), instr_(instr) { } | 3717 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3753 XMMRegister xmm_scratch = double_scratch0(); | 3753 XMMRegister xmm_scratch = double_scratch0(); |
| 3754 Register output_reg = ToRegister(instr->result()); | 3754 Register output_reg = ToRegister(instr->result()); |
| 3755 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3755 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3756 | 3756 |
| 3757 if (CpuFeatures::IsSupported(SSE4_1)) { | 3757 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 3758 CpuFeatureScope scope(masm(), SSE4_1); | 3758 CpuFeatureScope scope(masm(), SSE4_1); |
| 3759 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3759 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3760 // Deoptimize if minus zero. | 3760 // Deoptimize if minus zero. |
| 3761 __ movq(output_reg, input_reg); | 3761 __ movq(output_reg, input_reg); |
| 3762 __ subq(output_reg, Immediate(1)); | 3762 __ subq(output_reg, Immediate(1)); |
| 3763 DeoptimizeIf(overflow, instr, "minus zero"); | 3763 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero); |
| 3764 } | 3764 } |
| 3765 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); | 3765 __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
| 3766 __ cvttsd2si(output_reg, xmm_scratch); | 3766 __ cvttsd2si(output_reg, xmm_scratch); |
| 3767 __ cmpl(output_reg, Immediate(0x1)); | 3767 __ cmpl(output_reg, Immediate(0x1)); |
| 3768 DeoptimizeIf(overflow, instr, "overflow"); | 3768 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3769 } else { | 3769 } else { |
| 3770 Label negative_sign, done; | 3770 Label negative_sign, done; |
| 3771 // Deoptimize on unordered. | 3771 // Deoptimize on unordered. |
| 3772 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3772 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| 3773 __ ucomisd(input_reg, xmm_scratch); | 3773 __ ucomisd(input_reg, xmm_scratch); |
| 3774 DeoptimizeIf(parity_even, instr, "NaN"); | 3774 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
| 3775 __ j(below, &negative_sign, Label::kNear); | 3775 __ j(below, &negative_sign, Label::kNear); |
| 3776 | 3776 |
| 3777 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3777 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3778 // Check for negative zero. | 3778 // Check for negative zero. |
| 3779 Label positive_sign; | 3779 Label positive_sign; |
| 3780 __ j(above, &positive_sign, Label::kNear); | 3780 __ j(above, &positive_sign, Label::kNear); |
| 3781 __ movmskpd(output_reg, input_reg); | 3781 __ movmskpd(output_reg, input_reg); |
| 3782 __ testq(output_reg, Immediate(1)); | 3782 __ testq(output_reg, Immediate(1)); |
| 3783 DeoptimizeIf(not_zero, instr, "minus zero"); | 3783 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 3784 __ Set(output_reg, 0); | 3784 __ Set(output_reg, 0); |
| 3785 __ jmp(&done); | 3785 __ jmp(&done); |
| 3786 __ bind(&positive_sign); | 3786 __ bind(&positive_sign); |
| 3787 } | 3787 } |
| 3788 | 3788 |
| 3789 // Use truncating instruction (OK because input is positive). | 3789 // Use truncating instruction (OK because input is positive). |
| 3790 __ cvttsd2si(output_reg, input_reg); | 3790 __ cvttsd2si(output_reg, input_reg); |
| 3791 // Overflow is signalled with minint. | 3791 // Overflow is signalled with minint. |
| 3792 __ cmpl(output_reg, Immediate(0x1)); | 3792 __ cmpl(output_reg, Immediate(0x1)); |
| 3793 DeoptimizeIf(overflow, instr, "overflow"); | 3793 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3794 __ jmp(&done, Label::kNear); | 3794 __ jmp(&done, Label::kNear); |
| 3795 | 3795 |
| 3796 // Non-zero negative reaches here. | 3796 // Non-zero negative reaches here. |
| 3797 __ bind(&negative_sign); | 3797 __ bind(&negative_sign); |
| 3798 // Truncate, then compare and compensate. | 3798 // Truncate, then compare and compensate. |
| 3799 __ cvttsd2si(output_reg, input_reg); | 3799 __ cvttsd2si(output_reg, input_reg); |
| 3800 __ Cvtlsi2sd(xmm_scratch, output_reg); | 3800 __ Cvtlsi2sd(xmm_scratch, output_reg); |
| 3801 __ ucomisd(input_reg, xmm_scratch); | 3801 __ ucomisd(input_reg, xmm_scratch); |
| 3802 __ j(equal, &done, Label::kNear); | 3802 __ j(equal, &done, Label::kNear); |
| 3803 __ subl(output_reg, Immediate(1)); | 3803 __ subl(output_reg, Immediate(1)); |
| 3804 DeoptimizeIf(overflow, instr, "overflow"); | 3804 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3805 | 3805 |
| 3806 __ bind(&done); | 3806 __ bind(&done); |
| 3807 } | 3807 } |
| 3808 } | 3808 } |
| 3809 | 3809 |
| 3810 | 3810 |
| 3811 void LCodeGen::DoMathRound(LMathRound* instr) { | 3811 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3812 const XMMRegister xmm_scratch = double_scratch0(); | 3812 const XMMRegister xmm_scratch = double_scratch0(); |
| 3813 Register output_reg = ToRegister(instr->result()); | 3813 Register output_reg = ToRegister(instr->result()); |
| 3814 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3814 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3815 XMMRegister input_temp = ToDoubleRegister(instr->temp()); | 3815 XMMRegister input_temp = ToDoubleRegister(instr->temp()); |
| 3816 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 | 3816 static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
| 3817 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 | 3817 static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
| 3818 | 3818 |
| 3819 Label done, round_to_zero, below_one_half; | 3819 Label done, round_to_zero, below_one_half; |
| 3820 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 3820 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 3821 __ movq(kScratchRegister, one_half); | 3821 __ movq(kScratchRegister, one_half); |
| 3822 __ movq(xmm_scratch, kScratchRegister); | 3822 __ movq(xmm_scratch, kScratchRegister); |
| 3823 __ ucomisd(xmm_scratch, input_reg); | 3823 __ ucomisd(xmm_scratch, input_reg); |
| 3824 __ j(above, &below_one_half, Label::kNear); | 3824 __ j(above, &below_one_half, Label::kNear); |
| 3825 | 3825 |
| 3826 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). | 3826 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| 3827 __ addsd(xmm_scratch, input_reg); | 3827 __ addsd(xmm_scratch, input_reg); |
| 3828 __ cvttsd2si(output_reg, xmm_scratch); | 3828 __ cvttsd2si(output_reg, xmm_scratch); |
| 3829 // Overflow is signalled with minint. | 3829 // Overflow is signalled with minint. |
| 3830 __ cmpl(output_reg, Immediate(0x1)); | 3830 __ cmpl(output_reg, Immediate(0x1)); |
| 3831 DeoptimizeIf(overflow, instr, "overflow"); | 3831 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3832 __ jmp(&done, dist); | 3832 __ jmp(&done, dist); |
| 3833 | 3833 |
| 3834 __ bind(&below_one_half); | 3834 __ bind(&below_one_half); |
| 3835 __ movq(kScratchRegister, minus_one_half); | 3835 __ movq(kScratchRegister, minus_one_half); |
| 3836 __ movq(xmm_scratch, kScratchRegister); | 3836 __ movq(xmm_scratch, kScratchRegister); |
| 3837 __ ucomisd(xmm_scratch, input_reg); | 3837 __ ucomisd(xmm_scratch, input_reg); |
| 3838 __ j(below_equal, &round_to_zero, Label::kNear); | 3838 __ j(below_equal, &round_to_zero, Label::kNear); |
| 3839 | 3839 |
| 3840 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then | 3840 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| 3841 // compare and compensate. | 3841 // compare and compensate. |
| 3842 __ movq(input_temp, input_reg); // Do not alter input_reg. | 3842 __ movq(input_temp, input_reg); // Do not alter input_reg. |
| 3843 __ subsd(input_temp, xmm_scratch); | 3843 __ subsd(input_temp, xmm_scratch); |
| 3844 __ cvttsd2si(output_reg, input_temp); | 3844 __ cvttsd2si(output_reg, input_temp); |
| 3845 // Catch minint due to overflow, and to prevent overflow when compensating. | 3845 // Catch minint due to overflow, and to prevent overflow when compensating. |
| 3846 __ cmpl(output_reg, Immediate(0x1)); | 3846 __ cmpl(output_reg, Immediate(0x1)); |
| 3847 DeoptimizeIf(overflow, instr, "overflow"); | 3847 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 3848 | 3848 |
| 3849 __ Cvtlsi2sd(xmm_scratch, output_reg); | 3849 __ Cvtlsi2sd(xmm_scratch, output_reg); |
| 3850 __ ucomisd(xmm_scratch, input_temp); | 3850 __ ucomisd(xmm_scratch, input_temp); |
| 3851 __ j(equal, &done, dist); | 3851 __ j(equal, &done, dist); |
| 3852 __ subl(output_reg, Immediate(1)); | 3852 __ subl(output_reg, Immediate(1)); |
| 3853 // No overflow because we already ruled out minint. | 3853 // No overflow because we already ruled out minint. |
| 3854 __ jmp(&done, dist); | 3854 __ jmp(&done, dist); |
| 3855 | 3855 |
| 3856 __ bind(&round_to_zero); | 3856 __ bind(&round_to_zero); |
| 3857 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if | 3857 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
| 3858 // we can ignore the difference between a result of -0 and +0. | 3858 // we can ignore the difference between a result of -0 and +0. |
| 3859 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3859 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3860 __ movq(output_reg, input_reg); | 3860 __ movq(output_reg, input_reg); |
| 3861 __ testq(output_reg, output_reg); | 3861 __ testq(output_reg, output_reg); |
| 3862 DeoptimizeIf(negative, instr, "minus zero"); | 3862 DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero); |
| 3863 } | 3863 } |
| 3864 __ Set(output_reg, 0); | 3864 __ Set(output_reg, 0); |
| 3865 __ bind(&done); | 3865 __ bind(&done); |
| 3866 } | 3866 } |
| 3867 | 3867 |
| 3868 | 3868 |
| 3869 void LCodeGen::DoMathFround(LMathFround* instr) { | 3869 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3870 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3870 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3871 XMMRegister output_reg = ToDoubleRegister(instr->result()); | 3871 XMMRegister output_reg = ToDoubleRegister(instr->result()); |
| 3872 __ cvtsd2ss(output_reg, input_reg); | 3872 __ cvtsd2ss(output_reg, input_reg); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3931 DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); | 3931 DCHECK(ToDoubleRegister(instr->left()).is(xmm2)); |
| 3932 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); | 3932 DCHECK(ToDoubleRegister(instr->result()).is(xmm3)); |
| 3933 | 3933 |
| 3934 if (exponent_type.IsSmi()) { | 3934 if (exponent_type.IsSmi()) { |
| 3935 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3935 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3936 __ CallStub(&stub); | 3936 __ CallStub(&stub); |
| 3937 } else if (exponent_type.IsTagged()) { | 3937 } else if (exponent_type.IsTagged()) { |
| 3938 Label no_deopt; | 3938 Label no_deopt; |
| 3939 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); | 3939 __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear); |
| 3940 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); | 3940 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx); |
| 3941 DeoptimizeIf(not_equal, instr, "not a heap number"); | 3941 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 3942 __ bind(&no_deopt); | 3942 __ bind(&no_deopt); |
| 3943 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3943 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3944 __ CallStub(&stub); | 3944 __ CallStub(&stub); |
| 3945 } else if (exponent_type.IsInteger32()) { | 3945 } else if (exponent_type.IsInteger32()) { |
| 3946 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3946 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3947 __ CallStub(&stub); | 3947 __ CallStub(&stub); |
| 3948 } else { | 3948 } else { |
| 3949 DCHECK(exponent_type.IsDouble()); | 3949 DCHECK(exponent_type.IsDouble()); |
| 3950 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3950 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3951 __ CallStub(&stub); | 3951 __ CallStub(&stub); |
| (...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4325 __ cmpl(length, index); | 4325 __ cmpl(length, index); |
| 4326 } | 4326 } |
| 4327 } | 4327 } |
| 4328 } | 4328 } |
| 4329 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4329 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4330 Label done; | 4330 Label done; |
| 4331 __ j(NegateCondition(cc), &done, Label::kNear); | 4331 __ j(NegateCondition(cc), &done, Label::kNear); |
| 4332 __ int3(); | 4332 __ int3(); |
| 4333 __ bind(&done); | 4333 __ bind(&done); |
| 4334 } else { | 4334 } else { |
| 4335 DeoptimizeIf(cc, instr, "out of bounds"); | 4335 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
| 4336 } | 4336 } |
| 4337 } | 4337 } |
| 4338 | 4338 |
| 4339 | 4339 |
| 4340 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4340 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4341 ElementsKind elements_kind = instr->elements_kind(); | 4341 ElementsKind elements_kind = instr->elements_kind(); |
| 4342 LOperand* key = instr->key(); | 4342 LOperand* key = instr->key(); |
| 4343 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { | 4343 if (kPointerSize == kInt32Size && !key->IsConstantOperand()) { |
| 4344 Register key_reg = ToRegister(key); | 4344 Register key_reg = ToRegister(key); |
| 4345 Representation key_representation = | 4345 Representation key_representation = |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4566 } | 4566 } |
| 4567 __ bind(¬_applicable); | 4567 __ bind(¬_applicable); |
| 4568 } | 4568 } |
| 4569 | 4569 |
| 4570 | 4570 |
| 4571 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4571 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4572 Register object = ToRegister(instr->object()); | 4572 Register object = ToRegister(instr->object()); |
| 4573 Register temp = ToRegister(instr->temp()); | 4573 Register temp = ToRegister(instr->temp()); |
| 4574 Label no_memento_found; | 4574 Label no_memento_found; |
| 4575 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4575 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4576 DeoptimizeIf(equal, instr, "memento found"); | 4576 DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound); |
| 4577 __ bind(&no_memento_found); | 4577 __ bind(&no_memento_found); |
| 4578 } | 4578 } |
| 4579 | 4579 |
| 4580 | 4580 |
| 4581 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4581 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4582 DCHECK(ToRegister(instr->context()).is(rsi)); | 4582 DCHECK(ToRegister(instr->context()).is(rsi)); |
| 4583 DCHECK(ToRegister(instr->left()).is(rdx)); | 4583 DCHECK(ToRegister(instr->left()).is(rdx)); |
| 4584 DCHECK(ToRegister(instr->right()).is(rax)); | 4584 DCHECK(ToRegister(instr->right()).is(rax)); |
| 4585 StringAddStub stub(isolate(), | 4585 StringAddStub stub(isolate(), |
| 4586 instr->hydrogen()->flags(), | 4586 instr->hydrogen()->flags(), |
| (...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4886 } | 4886 } |
| 4887 | 4887 |
| 4888 | 4888 |
| 4889 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4889 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4890 HChange* hchange = instr->hydrogen(); | 4890 HChange* hchange = instr->hydrogen(); |
| 4891 Register input = ToRegister(instr->value()); | 4891 Register input = ToRegister(instr->value()); |
| 4892 Register output = ToRegister(instr->result()); | 4892 Register output = ToRegister(instr->result()); |
| 4893 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4893 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4894 hchange->value()->CheckFlag(HValue::kUint32)) { | 4894 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4895 Condition is_smi = __ CheckUInteger32ValidSmiValue(input); | 4895 Condition is_smi = __ CheckUInteger32ValidSmiValue(input); |
| 4896 DeoptimizeIf(NegateCondition(is_smi), instr, "overflow"); | 4896 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow); |
| 4897 } | 4897 } |
| 4898 __ Integer32ToSmi(output, input); | 4898 __ Integer32ToSmi(output, input); |
| 4899 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4899 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4900 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4900 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4901 DeoptimizeIf(overflow, instr, "overflow"); | 4901 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 4902 } | 4902 } |
| 4903 } | 4903 } |
| 4904 | 4904 |
| 4905 | 4905 |
| 4906 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4906 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4907 DCHECK(instr->value()->Equals(instr->result())); | 4907 DCHECK(instr->value()->Equals(instr->result())); |
| 4908 Register input = ToRegister(instr->value()); | 4908 Register input = ToRegister(instr->value()); |
| 4909 if (instr->needs_check()) { | 4909 if (instr->needs_check()) { |
| 4910 Condition is_smi = __ CheckSmi(input); | 4910 Condition is_smi = __ CheckSmi(input); |
| 4911 DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi"); | 4911 DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi); |
| 4912 } else { | 4912 } else { |
| 4913 __ AssertSmi(input); | 4913 __ AssertSmi(input); |
| 4914 } | 4914 } |
| 4915 __ SmiToInteger32(input, input); | 4915 __ SmiToInteger32(input, input); |
| 4916 } | 4916 } |
| 4917 | 4917 |
| 4918 | 4918 |
| 4919 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4919 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4920 XMMRegister result_reg, NumberUntagDMode mode) { | 4920 XMMRegister result_reg, NumberUntagDMode mode) { |
| 4921 bool can_convert_undefined_to_nan = | 4921 bool can_convert_undefined_to_nan = |
| (...skipping 10 matching lines...) Expand all Loading... |
| 4932 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 4932 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4933 Heap::kHeapNumberMapRootIndex); | 4933 Heap::kHeapNumberMapRootIndex); |
| 4934 | 4934 |
| 4935 // On x64 it is safe to load at heap number offset before evaluating the map | 4935 // On x64 it is safe to load at heap number offset before evaluating the map |
| 4936 // check, since all heap objects are at least two words long. | 4936 // check, since all heap objects are at least two words long. |
| 4937 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4937 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4938 | 4938 |
| 4939 if (can_convert_undefined_to_nan) { | 4939 if (can_convert_undefined_to_nan) { |
| 4940 __ j(not_equal, &convert, Label::kNear); | 4940 __ j(not_equal, &convert, Label::kNear); |
| 4941 } else { | 4941 } else { |
| 4942 DeoptimizeIf(not_equal, instr, "not a heap number"); | 4942 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 4943 } | 4943 } |
| 4944 | 4944 |
| 4945 if (deoptimize_on_minus_zero) { | 4945 if (deoptimize_on_minus_zero) { |
| 4946 XMMRegister xmm_scratch = double_scratch0(); | 4946 XMMRegister xmm_scratch = double_scratch0(); |
| 4947 __ xorps(xmm_scratch, xmm_scratch); | 4947 __ xorps(xmm_scratch, xmm_scratch); |
| 4948 __ ucomisd(xmm_scratch, result_reg); | 4948 __ ucomisd(xmm_scratch, result_reg); |
| 4949 __ j(not_equal, &done, Label::kNear); | 4949 __ j(not_equal, &done, Label::kNear); |
| 4950 __ movmskpd(kScratchRegister, result_reg); | 4950 __ movmskpd(kScratchRegister, result_reg); |
| 4951 __ testq(kScratchRegister, Immediate(1)); | 4951 __ testq(kScratchRegister, Immediate(1)); |
| 4952 DeoptimizeIf(not_zero, instr, "minus zero"); | 4952 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 4953 } | 4953 } |
| 4954 __ jmp(&done, Label::kNear); | 4954 __ jmp(&done, Label::kNear); |
| 4955 | 4955 |
| 4956 if (can_convert_undefined_to_nan) { | 4956 if (can_convert_undefined_to_nan) { |
| 4957 __ bind(&convert); | 4957 __ bind(&convert); |
| 4958 | 4958 |
| 4959 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. | 4959 // Convert undefined (and hole) to NaN. Compute NaN as 0/0. |
| 4960 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); | 4960 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| 4961 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 4961 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 4962 | 4962 |
| 4963 __ pcmpeqd(result_reg, result_reg); | 4963 __ pcmpeqd(result_reg, result_reg); |
| 4964 __ jmp(&done, Label::kNear); | 4964 __ jmp(&done, Label::kNear); |
| 4965 } | 4965 } |
| 4966 } else { | 4966 } else { |
| 4967 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4967 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4968 } | 4968 } |
| 4969 | 4969 |
| 4970 // Smi to XMM conversion | 4970 // Smi to XMM conversion |
| 4971 __ bind(&load_smi); | 4971 __ bind(&load_smi); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 4997 __ jmp(done); | 4997 __ jmp(done); |
| 4998 | 4998 |
| 4999 __ bind(&check_bools); | 4999 __ bind(&check_bools); |
| 5000 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); | 5000 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); |
| 5001 __ j(not_equal, &check_false, Label::kNear); | 5001 __ j(not_equal, &check_false, Label::kNear); |
| 5002 __ Set(input_reg, 1); | 5002 __ Set(input_reg, 1); |
| 5003 __ jmp(done); | 5003 __ jmp(done); |
| 5004 | 5004 |
| 5005 __ bind(&check_false); | 5005 __ bind(&check_false); |
| 5006 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); | 5006 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); |
| 5007 DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false"); | 5007 DeoptimizeIf(not_equal, instr, |
| 5008 Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
| 5008 __ Set(input_reg, 0); | 5009 __ Set(input_reg, 0); |
| 5009 } else { | 5010 } else { |
| 5010 XMMRegister scratch = ToDoubleRegister(instr->temp()); | 5011 XMMRegister scratch = ToDoubleRegister(instr->temp()); |
| 5011 DCHECK(!scratch.is(xmm0)); | 5012 DCHECK(!scratch.is(xmm0)); |
| 5012 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 5013 __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5013 Heap::kHeapNumberMapRootIndex); | 5014 Heap::kHeapNumberMapRootIndex); |
| 5014 DeoptimizeIf(not_equal, instr, "not a heap number"); | 5015 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber); |
| 5015 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5016 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5016 __ cvttsd2si(input_reg, xmm0); | 5017 __ cvttsd2si(input_reg, xmm0); |
| 5017 __ Cvtlsi2sd(scratch, input_reg); | 5018 __ Cvtlsi2sd(scratch, input_reg); |
| 5018 __ ucomisd(xmm0, scratch); | 5019 __ ucomisd(xmm0, scratch); |
| 5019 DeoptimizeIf(not_equal, instr, "lost precision"); | 5020 DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision); |
| 5020 DeoptimizeIf(parity_even, instr, "NaN"); | 5021 DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN); |
| 5021 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | 5022 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { |
| 5022 __ testl(input_reg, input_reg); | 5023 __ testl(input_reg, input_reg); |
| 5023 __ j(not_zero, done); | 5024 __ j(not_zero, done); |
| 5024 __ movmskpd(input_reg, xmm0); | 5025 __ movmskpd(input_reg, xmm0); |
| 5025 __ andl(input_reg, Immediate(1)); | 5026 __ andl(input_reg, Immediate(1)); |
| 5026 DeoptimizeIf(not_zero, instr, "minus zero"); | 5027 DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero); |
| 5027 } | 5028 } |
| 5028 } | 5029 } |
| 5029 } | 5030 } |
| 5030 | 5031 |
| 5031 | 5032 |
| 5032 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5033 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5033 class DeferredTaggedToI FINAL : public LDeferredCode { | 5034 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 5034 public: | 5035 public: |
| 5035 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5036 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 5036 : LDeferredCode(codegen), instr_(instr) { } | 5037 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5087 __ TruncateDoubleToI(result_reg, input_reg); | 5088 __ TruncateDoubleToI(result_reg, input_reg); |
| 5088 } else { | 5089 } else { |
| 5089 Label lost_precision, is_nan, minus_zero, done; | 5090 Label lost_precision, is_nan, minus_zero, done; |
| 5090 XMMRegister xmm_scratch = double_scratch0(); | 5091 XMMRegister xmm_scratch = double_scratch0(); |
| 5091 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 5092 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 5092 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 5093 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| 5093 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, | 5094 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, |
| 5094 &is_nan, &minus_zero, dist); | 5095 &is_nan, &minus_zero, dist); |
| 5095 __ jmp(&done, dist); | 5096 __ jmp(&done, dist); |
| 5096 __ bind(&lost_precision); | 5097 __ bind(&lost_precision); |
| 5097 DeoptimizeIf(no_condition, instr, "lost precision"); | 5098 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
| 5098 __ bind(&is_nan); | 5099 __ bind(&is_nan); |
| 5099 DeoptimizeIf(no_condition, instr, "NaN"); | 5100 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
| 5100 __ bind(&minus_zero); | 5101 __ bind(&minus_zero); |
| 5101 DeoptimizeIf(no_condition, instr, "minus zero"); | 5102 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 5102 __ bind(&done); | 5103 __ bind(&done); |
| 5103 } | 5104 } |
| 5104 } | 5105 } |
| 5105 | 5106 |
| 5106 | 5107 |
| 5107 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5108 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5108 LOperand* input = instr->value(); | 5109 LOperand* input = instr->value(); |
| 5109 DCHECK(input->IsDoubleRegister()); | 5110 DCHECK(input->IsDoubleRegister()); |
| 5110 LOperand* result = instr->result(); | 5111 LOperand* result = instr->result(); |
| 5111 DCHECK(result->IsRegister()); | 5112 DCHECK(result->IsRegister()); |
| 5112 | 5113 |
| 5113 XMMRegister input_reg = ToDoubleRegister(input); | 5114 XMMRegister input_reg = ToDoubleRegister(input); |
| 5114 Register result_reg = ToRegister(result); | 5115 Register result_reg = ToRegister(result); |
| 5115 | 5116 |
| 5116 Label lost_precision, is_nan, minus_zero, done; | 5117 Label lost_precision, is_nan, minus_zero, done; |
| 5117 XMMRegister xmm_scratch = double_scratch0(); | 5118 XMMRegister xmm_scratch = double_scratch0(); |
| 5118 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; | 5119 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| 5119 __ DoubleToI(result_reg, input_reg, xmm_scratch, | 5120 __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| 5120 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, | 5121 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan, |
| 5121 &minus_zero, dist); | 5122 &minus_zero, dist); |
| 5122 __ jmp(&done, dist); | 5123 __ jmp(&done, dist); |
| 5123 __ bind(&lost_precision); | 5124 __ bind(&lost_precision); |
| 5124 DeoptimizeIf(no_condition, instr, "lost precision"); | 5125 DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision); |
| 5125 __ bind(&is_nan); | 5126 __ bind(&is_nan); |
| 5126 DeoptimizeIf(no_condition, instr, "NaN"); | 5127 DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN); |
| 5127 __ bind(&minus_zero); | 5128 __ bind(&minus_zero); |
| 5128 DeoptimizeIf(no_condition, instr, "minus zero"); | 5129 DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero); |
| 5129 __ bind(&done); | 5130 __ bind(&done); |
| 5130 __ Integer32ToSmi(result_reg, result_reg); | 5131 __ Integer32ToSmi(result_reg, result_reg); |
| 5131 DeoptimizeIf(overflow, instr, "overflow"); | 5132 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow); |
| 5132 } | 5133 } |
| 5133 | 5134 |
| 5134 | 5135 |
| 5135 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5136 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5136 LOperand* input = instr->value(); | 5137 LOperand* input = instr->value(); |
| 5137 Condition cc = masm()->CheckSmi(ToRegister(input)); | 5138 Condition cc = masm()->CheckSmi(ToRegister(input)); |
| 5138 DeoptimizeIf(NegateCondition(cc), instr, "not a Smi"); | 5139 DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi); |
| 5139 } | 5140 } |
| 5140 | 5141 |
| 5141 | 5142 |
| 5142 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5143 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5143 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5144 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5144 LOperand* input = instr->value(); | 5145 LOperand* input = instr->value(); |
| 5145 Condition cc = masm()->CheckSmi(ToRegister(input)); | 5146 Condition cc = masm()->CheckSmi(ToRegister(input)); |
| 5146 DeoptimizeIf(cc, instr, "Smi"); | 5147 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
| 5147 } | 5148 } |
| 5148 } | 5149 } |
| 5149 | 5150 |
| 5150 | 5151 |
| 5151 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5152 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5152 Register input = ToRegister(instr->value()); | 5153 Register input = ToRegister(instr->value()); |
| 5153 | 5154 |
| 5154 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); | 5155 __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
| 5155 | 5156 |
| 5156 if (instr->hydrogen()->is_interval_check()) { | 5157 if (instr->hydrogen()->is_interval_check()) { |
| 5157 InstanceType first; | 5158 InstanceType first; |
| 5158 InstanceType last; | 5159 InstanceType last; |
| 5159 instr->hydrogen()->GetCheckInterval(&first, &last); | 5160 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5160 | 5161 |
| 5161 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5162 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| 5162 Immediate(static_cast<int8_t>(first))); | 5163 Immediate(static_cast<int8_t>(first))); |
| 5163 | 5164 |
| 5164 // If there is only one type in the interval check for equality. | 5165 // If there is only one type in the interval check for equality. |
| 5165 if (first == last) { | 5166 if (first == last) { |
| 5166 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 5167 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
| 5167 } else { | 5168 } else { |
| 5168 DeoptimizeIf(below, instr, "wrong instance type"); | 5169 DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType); |
| 5169 // Omit check for the last type. | 5170 // Omit check for the last type. |
| 5170 if (last != LAST_TYPE) { | 5171 if (last != LAST_TYPE) { |
| 5171 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5172 __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| 5172 Immediate(static_cast<int8_t>(last))); | 5173 Immediate(static_cast<int8_t>(last))); |
| 5173 DeoptimizeIf(above, instr, "wrong instance type"); | 5174 DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType); |
| 5174 } | 5175 } |
| 5175 } | 5176 } |
| 5176 } else { | 5177 } else { |
| 5177 uint8_t mask; | 5178 uint8_t mask; |
| 5178 uint8_t tag; | 5179 uint8_t tag; |
| 5179 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5180 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5180 | 5181 |
| 5181 if (base::bits::IsPowerOfTwo32(mask)) { | 5182 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5182 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5183 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5183 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), | 5184 __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| 5184 Immediate(mask)); | 5185 Immediate(mask)); |
| 5185 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type"); | 5186 DeoptimizeIf(tag == 0 ? not_zero : zero, instr, |
| 5187 Deoptimizer::kWrongInstanceType); |
| 5186 } else { | 5188 } else { |
| 5187 __ movzxbl(kScratchRegister, | 5189 __ movzxbl(kScratchRegister, |
| 5188 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); | 5190 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
| 5189 __ andb(kScratchRegister, Immediate(mask)); | 5191 __ andb(kScratchRegister, Immediate(mask)); |
| 5190 __ cmpb(kScratchRegister, Immediate(tag)); | 5192 __ cmpb(kScratchRegister, Immediate(tag)); |
| 5191 DeoptimizeIf(not_equal, instr, "wrong instance type"); | 5193 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType); |
| 5192 } | 5194 } |
| 5193 } | 5195 } |
| 5194 } | 5196 } |
| 5195 | 5197 |
| 5196 | 5198 |
| 5197 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5199 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5198 Register reg = ToRegister(instr->value()); | 5200 Register reg = ToRegister(instr->value()); |
| 5199 __ Cmp(reg, instr->hydrogen()->object().handle()); | 5201 __ Cmp(reg, instr->hydrogen()->object().handle()); |
| 5200 DeoptimizeIf(not_equal, instr, "value mismatch"); | 5202 DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch); |
| 5201 } | 5203 } |
| 5202 | 5204 |
| 5203 | 5205 |
| 5204 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5206 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5205 { | 5207 { |
| 5206 PushSafepointRegistersScope scope(this); | 5208 PushSafepointRegistersScope scope(this); |
| 5207 __ Push(object); | 5209 __ Push(object); |
| 5208 __ Set(rsi, 0); | 5210 __ Set(rsi, 0); |
| 5209 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5211 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5210 RecordSafepointWithRegisters( | 5212 RecordSafepointWithRegisters( |
| 5211 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5213 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5212 | 5214 |
| 5213 __ testp(rax, Immediate(kSmiTagMask)); | 5215 __ testp(rax, Immediate(kSmiTagMask)); |
| 5214 } | 5216 } |
| 5215 DeoptimizeIf(zero, instr, "instance migration failed"); | 5217 DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed); |
| 5216 } | 5218 } |
| 5217 | 5219 |
| 5218 | 5220 |
| 5219 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5221 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5220 class DeferredCheckMaps FINAL : public LDeferredCode { | 5222 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5221 public: | 5223 public: |
| 5222 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5224 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5223 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5225 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5224 SetExit(check_maps()); | 5226 SetExit(check_maps()); |
| 5225 } | 5227 } |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5259 Handle<Map> map = maps->at(i).handle(); | 5261 Handle<Map> map = maps->at(i).handle(); |
| 5260 __ CompareMap(reg, map); | 5262 __ CompareMap(reg, map); |
| 5261 __ j(equal, &success, Label::kNear); | 5263 __ j(equal, &success, Label::kNear); |
| 5262 } | 5264 } |
| 5263 | 5265 |
| 5264 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5266 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5265 __ CompareMap(reg, map); | 5267 __ CompareMap(reg, map); |
| 5266 if (instr->hydrogen()->HasMigrationTarget()) { | 5268 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5267 __ j(not_equal, deferred->entry()); | 5269 __ j(not_equal, deferred->entry()); |
| 5268 } else { | 5270 } else { |
| 5269 DeoptimizeIf(not_equal, instr, "wrong map"); | 5271 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5270 } | 5272 } |
| 5271 | 5273 |
| 5272 __ bind(&success); | 5274 __ bind(&success); |
| 5273 } | 5275 } |
| 5274 | 5276 |
| 5275 | 5277 |
| 5276 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5278 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5277 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5279 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5278 XMMRegister xmm_scratch = double_scratch0(); | 5280 XMMRegister xmm_scratch = double_scratch0(); |
| 5279 Register result_reg = ToRegister(instr->result()); | 5281 Register result_reg = ToRegister(instr->result()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5298 __ JumpIfSmi(input_reg, &is_smi, dist); | 5300 __ JumpIfSmi(input_reg, &is_smi, dist); |
| 5299 | 5301 |
| 5300 // Check for heap number | 5302 // Check for heap number |
| 5301 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5303 __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5302 factory()->heap_number_map()); | 5304 factory()->heap_number_map()); |
| 5303 __ j(equal, &heap_number, Label::kNear); | 5305 __ j(equal, &heap_number, Label::kNear); |
| 5304 | 5306 |
| 5305 // Check for undefined. Undefined is converted to zero for clamping | 5307 // Check for undefined. Undefined is converted to zero for clamping |
| 5306 // conversions. | 5308 // conversions. |
| 5307 __ Cmp(input_reg, factory()->undefined_value()); | 5309 __ Cmp(input_reg, factory()->undefined_value()); |
| 5308 DeoptimizeIf(not_equal, instr, "not a heap number/undefined"); | 5310 DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 5309 __ xorl(input_reg, input_reg); | 5311 __ xorl(input_reg, input_reg); |
| 5310 __ jmp(&done, Label::kNear); | 5312 __ jmp(&done, Label::kNear); |
| 5311 | 5313 |
| 5312 // Heap number | 5314 // Heap number |
| 5313 __ bind(&heap_number); | 5315 __ bind(&heap_number); |
| 5314 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 5316 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 5315 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); | 5317 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
| 5316 __ jmp(&done, Label::kNear); | 5318 __ jmp(&done, Label::kNear); |
| 5317 | 5319 |
| 5318 // smi | 5320 // smi |
| (...skipping 458 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5777 DCHECK(!environment->HasBeenRegistered()); | 5779 DCHECK(!environment->HasBeenRegistered()); |
| 5778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5780 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5779 | 5781 |
| 5780 GenerateOsrPrologue(); | 5782 GenerateOsrPrologue(); |
| 5781 } | 5783 } |
| 5782 | 5784 |
| 5783 | 5785 |
| 5784 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5786 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5785 DCHECK(ToRegister(instr->context()).is(rsi)); | 5787 DCHECK(ToRegister(instr->context()).is(rsi)); |
| 5786 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | 5788 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
| 5787 DeoptimizeIf(equal, instr, "undefined"); | 5789 DeoptimizeIf(equal, instr, Deoptimizer::kUndefined); |
| 5788 | 5790 |
| 5789 Register null_value = rdi; | 5791 Register null_value = rdi; |
| 5790 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5792 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5791 __ cmpp(rax, null_value); | 5793 __ cmpp(rax, null_value); |
| 5792 DeoptimizeIf(equal, instr, "null"); | 5794 DeoptimizeIf(equal, instr, Deoptimizer::kNull); |
| 5793 | 5795 |
| 5794 Condition cc = masm()->CheckSmi(rax); | 5796 Condition cc = masm()->CheckSmi(rax); |
| 5795 DeoptimizeIf(cc, instr, "Smi"); | 5797 DeoptimizeIf(cc, instr, Deoptimizer::kSmi); |
| 5796 | 5798 |
| 5797 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5799 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5798 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); | 5800 __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); |
| 5799 DeoptimizeIf(below_equal, instr, "wrong instance type"); | 5801 DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType); |
| 5800 | 5802 |
| 5801 Label use_cache, call_runtime; | 5803 Label use_cache, call_runtime; |
| 5802 __ CheckEnumCache(null_value, &call_runtime); | 5804 __ CheckEnumCache(null_value, &call_runtime); |
| 5803 | 5805 |
| 5804 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); | 5806 __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset)); |
| 5805 __ jmp(&use_cache, Label::kNear); | 5807 __ jmp(&use_cache, Label::kNear); |
| 5806 | 5808 |
| 5807 // Get the set of properties to enumerate. | 5809 // Get the set of properties to enumerate. |
| 5808 __ bind(&call_runtime); | 5810 __ bind(&call_runtime); |
| 5809 __ Push(rax); | 5811 __ Push(rax); |
| 5810 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5812 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5811 | 5813 |
| 5812 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), | 5814 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
| 5813 Heap::kMetaMapRootIndex); | 5815 Heap::kMetaMapRootIndex); |
| 5814 DeoptimizeIf(not_equal, instr, "wrong map"); | 5816 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5815 __ bind(&use_cache); | 5817 __ bind(&use_cache); |
| 5816 } | 5818 } |
| 5817 | 5819 |
| 5818 | 5820 |
| 5819 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5821 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5820 Register map = ToRegister(instr->map()); | 5822 Register map = ToRegister(instr->map()); |
| 5821 Register result = ToRegister(instr->result()); | 5823 Register result = ToRegister(instr->result()); |
| 5822 Label load_cache, done; | 5824 Label load_cache, done; |
| 5823 __ EnumLength(result, map); | 5825 __ EnumLength(result, map); |
| 5824 __ Cmp(result, Smi::FromInt(0)); | 5826 __ Cmp(result, Smi::FromInt(0)); |
| 5825 __ j(not_equal, &load_cache, Label::kNear); | 5827 __ j(not_equal, &load_cache, Label::kNear); |
| 5826 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); | 5828 __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); |
| 5827 __ jmp(&done, Label::kNear); | 5829 __ jmp(&done, Label::kNear); |
| 5828 __ bind(&load_cache); | 5830 __ bind(&load_cache); |
| 5829 __ LoadInstanceDescriptors(map, result); | 5831 __ LoadInstanceDescriptors(map, result); |
| 5830 __ movp(result, | 5832 __ movp(result, |
| 5831 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); | 5833 FieldOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5832 __ movp(result, | 5834 __ movp(result, |
| 5833 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); | 5835 FieldOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5834 __ bind(&done); | 5836 __ bind(&done); |
| 5835 Condition cc = masm()->CheckSmi(result); | 5837 Condition cc = masm()->CheckSmi(result); |
| 5836 DeoptimizeIf(cc, instr, "no cache"); | 5838 DeoptimizeIf(cc, instr, Deoptimizer::kNoCache); |
| 5837 } | 5839 } |
| 5838 | 5840 |
| 5839 | 5841 |
| 5840 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5842 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5841 Register object = ToRegister(instr->value()); | 5843 Register object = ToRegister(instr->value()); |
| 5842 __ cmpp(ToRegister(instr->map()), | 5844 __ cmpp(ToRegister(instr->map()), |
| 5843 FieldOperand(object, HeapObject::kMapOffset)); | 5845 FieldOperand(object, HeapObject::kMapOffset)); |
| 5844 DeoptimizeIf(not_equal, instr, "wrong map"); | 5846 DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap); |
| 5845 } | 5847 } |
| 5846 | 5848 |
| 5847 | 5849 |
| 5848 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5850 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5849 Register object, | 5851 Register object, |
| 5850 Register index) { | 5852 Register index) { |
| 5851 PushSafepointRegistersScope scope(this); | 5853 PushSafepointRegistersScope scope(this); |
| 5852 __ Push(object); | 5854 __ Push(object); |
| 5853 __ Push(index); | 5855 __ Push(index); |
| 5854 __ xorp(rsi, rsi); | 5856 __ xorp(rsi, rsi); |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5930 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5932 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5931 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5933 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5932 } | 5934 } |
| 5933 | 5935 |
| 5934 | 5936 |
| 5935 #undef __ | 5937 #undef __ |
| 5936 | 5938 |
| 5937 } } // namespace v8::internal | 5939 } } // namespace v8::internal |
| 5938 | 5940 |
| 5939 #endif // V8_TARGET_ARCH_X64 | 5941 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |