Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1087 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1098 overwrite_name, | 1098 overwrite_name, |
| 1099 TRBinaryOpIC::GetName(operands_type_)); | 1099 TRBinaryOpIC::GetName(operands_type_)); |
| 1100 return name_; | 1100 return name_; |
| 1101 } | 1101 } |
| 1102 | 1102 |
| 1103 | 1103 |
| 1104 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 1104 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 1105 Label* slow, | 1105 Label* slow, |
| 1106 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1106 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 1107 | 1107 |
| 1108 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. | |
| 1109 Register left = rdx; | |
| 1110 Register right = rax; | |
| 1111 | |
| 1108 // We only generate heapnumber answers for overflowing calculations | 1112 // We only generate heapnumber answers for overflowing calculations |
| 1109 // for the four basic arithmetic operations. | 1113 // for the four basic arithmetic operations and logical right shift by 0. |
| 1110 bool generate_inline_heapnumber_results = | 1114 bool generate_inline_heapnumber_results = |
| 1111 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && | 1115 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
| 1112 (op_ == Token::ADD || op_ == Token::SUB || | 1116 (op_ == Token::ADD || op_ == Token::SUB || |
| 1113 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); | 1117 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
| 1114 | 1118 |
| 1115 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. | |
| 1116 Register left = rdx; | |
| 1117 Register right = rax; | |
| 1118 | |
| 1119 | |
| 1120 // Smi check of both operands. If op is BIT_OR, the check is delayed | 1119 // Smi check of both operands. If op is BIT_OR, the check is delayed |
| 1121 // until after the OR operation. | 1120 // until after the OR operation. |
| 1122 Label not_smis; | 1121 Label not_smis; |
| 1123 Label use_fp_on_smis; | 1122 Label use_fp_on_smis; |
| 1124 Label restore_MOD_registers; // Only used if op_ == Token::MOD. | 1123 Label fail; |
| 1125 | 1124 |
| 1126 if (op_ != Token::BIT_OR) { | 1125 Comment smi_check_comment(masm, "-- Smi check arguments"); |
| 1127 Comment smi_check_comment(masm, "-- Smi check arguments"); | 1126 __ JumpIfNotBothSmi(left, right, ¬_smis); |
| 1128 __ JumpIfNotBothSmi(left, right, ¬_smis); | |
| 1129 } | |
| 1130 | 1127 |
| 1128 Label smi_values; | |
| 1129 __ bind(&smi_values); | |
| 1131 // Perform the operation. | 1130 // Perform the operation. |
| 1132 Comment perform_smi(masm, "-- Perform smi operation"); | 1131 Comment perform_smi(masm, "-- Perform smi operation"); |
| 1133 switch (op_) { | 1132 switch (op_) { |
| 1134 case Token::ADD: | 1133 case Token::ADD: |
| 1135 ASSERT(right.is(rax)); | 1134 ASSERT(right.is(rax)); |
| 1136 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. | 1135 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
| 1137 break; | 1136 break; |
| 1138 | 1137 |
| 1139 case Token::SUB: | 1138 case Token::SUB: |
| 1140 __ SmiSub(left, left, right, &use_fp_on_smis); | 1139 __ SmiSub(left, left, right, &use_fp_on_smis); |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 1159 // SmiMod will not accept left in rdx or right in rax. | 1158 // SmiMod will not accept left in rdx or right in rax. |
| 1160 left = rcx; | 1159 left = rcx; |
| 1161 right = rbx; | 1160 right = rbx; |
| 1162 __ movq(rbx, rax); | 1161 __ movq(rbx, rax); |
| 1163 __ movq(rcx, rdx); | 1162 __ movq(rcx, rdx); |
| 1164 __ SmiMod(rax, left, right, &use_fp_on_smis); | 1163 __ SmiMod(rax, left, right, &use_fp_on_smis); |
| 1165 break; | 1164 break; |
| 1166 | 1165 |
| 1167 case Token::BIT_OR: { | 1166 case Token::BIT_OR: { |
| 1168 ASSERT(right.is(rax)); | 1167 ASSERT(right.is(rax)); |
| 1169 __ movq(rcx, right); // Save the right operand. | |
| 1170 __ SmiOr(right, right, left); // BIT_OR is commutative. | 1168 __ SmiOr(right, right, left); // BIT_OR is commutative. |
|
William Hesse
2011/04/08 09:27:58
Change to SmiOrAndJumpIfNotBothSmi(right, right, l
| |
| 1171 __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR. | |
| 1172 break; | 1169 break; |
| 1173 } | 1170 } |
| 1174 case Token::BIT_XOR: | 1171 case Token::BIT_XOR: |
| 1175 ASSERT(right.is(rax)); | 1172 ASSERT(right.is(rax)); |
| 1176 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 1173 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
| 1177 break; | 1174 break; |
| 1178 | 1175 |
| 1179 case Token::BIT_AND: | 1176 case Token::BIT_AND: |
| 1180 ASSERT(right.is(rax)); | 1177 ASSERT(right.is(rax)); |
| 1181 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 1178 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1226 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1223 case Token::ADD: __ addsd(xmm0, xmm1); break; |
| 1227 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1224 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1228 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1225 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1229 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1226 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1230 default: UNREACHABLE(); | 1227 default: UNREACHABLE(); |
| 1231 } | 1228 } |
| 1232 } | 1229 } |
| 1233 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | 1230 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| 1234 __ movq(rax, rcx); | 1231 __ movq(rax, rcx); |
| 1235 __ ret(0); | 1232 __ ret(0); |
| 1233 } else { | |
| 1234 __ jmp(&fail); | |
| 1236 } | 1235 } |
| 1237 } | 1236 } |
| 1238 | 1237 |
| 1239 // 7. Non-smi operands reach the end of the code generated by | 1238 // 7. Non-smi operands reach the end of the code generated by |
| 1240 // GenerateSmiCode, and fall through to subsequent code, | 1239 // GenerateSmiCode, and fall through to subsequent code, |
| 1241 // with the operands in rdx and rax. | 1240 // with the operands in rdx and rax. |
| 1241 // But first we check if non-smi values are HeapNumbers holding | |
| 1242 // values that could be smi. | |
| 1242 Comment done_comment(masm, "-- Enter non-smi code"); | 1243 Comment done_comment(masm, "-- Enter non-smi code"); |
| 1243 __ bind(¬_smis); | 1244 __ bind(¬_smis); |
| 1244 if (op_ == Token::BIT_OR) { | 1245 { // See if there were smi values stored in HeapNumbers. |
|
William Hesse
2011/04/08 09:27:58
I would use LoadAsIntegers (really LoadUnknownsAsI
Lasse Reichstein
2011/04/08 11:18:10
LoadNumberAsIntegers will do truncating conversion
| |
| 1245 __ movq(right, rcx); | 1246 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); |
| 1247 NearLabel left_smi, check_right; | |
| 1248 __ JumpIfSmi(left, &left_smi); | |
| 1249 __ cmpq(FieldOperand(left, HeapObject::kMapOffset), rcx); | |
| 1250 __ j(not_equal, &fail); | |
| 1251 // Convert HeapNumber to smi if possible. | |
| 1252 __ movsd(xmm0, FieldOperand(left, HeapNumber::kValueOffset)); | |
| 1253 __ movq(rbx, xmm0); | |
| 1254 __ cvttsd2siq(rdi, xmm0); | |
| 1255 // Check if conversion was successful by converting back and | |
| 1256 // comparing to the original double's bits. | |
| 1257 __ cvtlsi2sd(xmm1, rdi); | |
| 1258 __ movq(kScratchRegister, xmm1); | |
| 1259 __ cmpq(rbx, kScratchRegister); | |
| 1260 __ j(not_equal, &fail); | |
| 1261 __ Integer32ToSmi(left, rdi); | |
| 1262 | |
| 1263 __ bind(&check_right); | |
| 1264 __ JumpIfSmi(right, &smi_values); | |
| 1265 __ bind(&left_smi); | |
| 1266 if (FLAG_debug_code) { | |
| 1267 // One of left or right should be non-smi if we get here. | |
| 1268 __ AbortIfSmi(right); | |
| 1269 } | |
| 1270 __ cmpq(FieldOperand(right, HeapObject::kMapOffset), rcx); | |
| 1271 __ j(not_equal, &fail); | |
| 1272 // Convert right to smi, if possible. | |
| 1273 __ movsd(xmm0, FieldOperand(right, HeapNumber::kValueOffset)); | |
| 1274 __ movq(rbx, xmm0); | |
| 1275 __ cvttsd2siq(rdi, xmm0); | |
| 1276 __ cvtlsi2sd(xmm1, rdi); | |
| 1277 __ movq(kScratchRegister, xmm1); | |
| 1278 __ cmpq(rbx, kScratchRegister); | |
| 1279 __ j(not_equal, &fail); | |
| 1280 __ Integer32ToSmi(right, rdi); | |
| 1281 __ jmp(&smi_values); | |
| 1246 } | 1282 } |
| 1283 __ bind(&fail); | |
| 1247 } | 1284 } |
| 1248 | 1285 |
| 1249 | 1286 |
| 1250 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( | 1287 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode( |
| 1251 MacroAssembler* masm, | 1288 MacroAssembler* masm, |
| 1252 Label* allocation_failure, | 1289 Label* allocation_failure, |
| 1253 Label* non_numeric_failure) { | 1290 Label* non_numeric_failure) { |
| 1254 switch (op_) { | 1291 switch (op_) { |
| 1255 case Token::ADD: | 1292 case Token::ADD: |
| 1256 case Token::SUB: | 1293 case Token::SUB: |
| (...skipping 3885 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5142 // Do a tail call to the rewritten stub. | 5179 // Do a tail call to the rewritten stub. |
| 5143 __ jmp(rdi); | 5180 __ jmp(rdi); |
| 5144 } | 5181 } |
| 5145 | 5182 |
| 5146 | 5183 |
| 5147 #undef __ | 5184 #undef __ |
| 5148 | 5185 |
| 5149 } } // namespace v8::internal | 5186 } } // namespace v8::internal |
| 5150 | 5187 |
| 5151 #endif // V8_TARGET_ARCH_X64 | 5188 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |