OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1092 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1103 | 1103 |
1104 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 1104 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
1105 Label* slow, | 1105 Label* slow, |
1106 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 1106 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
1107 | 1107 |
1108 // We only generate heapnumber answers for overflowing calculations | 1108 // We only generate heapnumber answers for overflowing calculations |
1109 // for the four basic arithmetic operations. | 1109 // for the four basic arithmetic operations. |
1110 bool generate_inline_heapnumber_results = | 1110 bool generate_inline_heapnumber_results = |
1111 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && | 1111 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
1112 (op_ == Token::ADD || op_ == Token::SUB || | 1112 (op_ == Token::ADD || op_ == Token::SUB || |
1113 op_ == Token::MUL || op_ == Token::DIV); | 1113 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
1114 | 1114 |
1115 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. | 1115 // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
1116 Register left = rdx; | 1116 Register left = rdx; |
1117 Register right = rax; | 1117 Register right = rax; |
1118 | 1118 |
1119 | 1119 |
1120 // Smi check of both operands. If op is BIT_OR, the check is delayed | 1120 // Smi check of both operands. If op is BIT_OR, the check is delayed |
1121 // until after the OR operation. | 1121 // until after the OR operation. |
1122 Label not_smis; | 1122 Label not_smis; |
1123 Label use_fp_on_smis; | 1123 Label use_fp_on_smis; |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1185 __ SmiShiftLeft(left, left, right); | 1185 __ SmiShiftLeft(left, left, right); |
1186 __ movq(rax, left); | 1186 __ movq(rax, left); |
1187 break; | 1187 break; |
1188 | 1188 |
1189 case Token::SAR: | 1189 case Token::SAR: |
1190 __ SmiShiftArithmeticRight(left, left, right); | 1190 __ SmiShiftArithmeticRight(left, left, right); |
1191 __ movq(rax, left); | 1191 __ movq(rax, left); |
1192 break; | 1192 break; |
1193 | 1193 |
1194 case Token::SHR: | 1194 case Token::SHR: |
1195 __ SmiShiftLogicalRight(left, left, right, ¬_smis); | 1195 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
1196 __ movq(rax, left); | 1196 __ movq(rax, left); |
1197 break; | 1197 break; |
1198 | 1198 |
1199 default: | 1199 default: |
1200 UNREACHABLE(); | 1200 UNREACHABLE(); |
1201 } | 1201 } |
1202 | 1202 |
1203 // 5. Emit return of result in rax. Some operations have registers pushed. | 1203 // 5. Emit return of result in rax. Some operations have registers pushed. |
1204 __ ret(0); | 1204 __ ret(0); |
1205 | 1205 |
1206 // 6. For some operations emit inline code to perform floating point | 1206 if (use_fp_on_smis.is_linked()) { |
1207 // operations on known smis (e.g., if the result of the operation | 1207 // 6. For some operations emit inline code to perform floating point |
1208 // overflowed the smi range). | 1208 // operations on known smis (e.g., if the result of the operation |
1209 __ bind(&use_fp_on_smis); | 1209 // overflowed the smi range). |
1210 if (op_ == Token::DIV || op_ == Token::MOD) { | 1210 __ bind(&use_fp_on_smis); |
1211 // Restore left and right to rdx and rax. | 1211 if (op_ == Token::DIV || op_ == Token::MOD) { |
1212 __ movq(rdx, rcx); | 1212 // Restore left and right to rdx and rax. |
1213 __ movq(rax, rbx); | 1213 __ movq(rdx, rcx); |
1214 } | 1214 __ movq(rax, rbx); |
| 1215 } |
1215 | 1216 |
1216 | 1217 if (generate_inline_heapnumber_results) { |
1217 if (generate_inline_heapnumber_results) { | 1218 __ AllocateHeapNumber(rcx, rbx, slow); |
1218 __ AllocateHeapNumber(rcx, rbx, slow); | 1219 Comment perform_float(masm, "-- Perform float operation on smis"); |
1219 Comment perform_float(masm, "-- Perform float operation on smis"); | 1220 if (op_ == Token::SHR) { |
1220 FloatingPointHelper::LoadSSE2SmiOperands(masm); | 1221 __ SmiToInteger32(left, left); |
1221 switch (op_) { | 1222 __ cvtqsi2sd(xmm0, left); |
1222 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1223 } else { |
1223 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1224 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
1224 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1225 switch (op_) { |
1225 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1226 case Token::ADD: __ addsd(xmm0, xmm1); break; |
1226 default: UNREACHABLE(); | 1227 case Token::SUB: __ subsd(xmm0, xmm1); break; |
| 1228 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| 1229 case Token::DIV: __ divsd(xmm0, xmm1); break; |
| 1230 default: UNREACHABLE(); |
| 1231 } |
| 1232 } |
| 1233 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| 1234 __ movq(rax, rcx); |
| 1235 __ ret(0); |
1227 } | 1236 } |
1228 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | |
1229 __ movq(rax, rcx); | |
1230 __ ret(0); | |
1231 } | 1237 } |
1232 | 1238 |
1233 // 7. Non-smi operands reach the end of the code generated by | 1239 // 7. Non-smi operands reach the end of the code generated by |
1234 // GenerateSmiCode, and fall through to subsequent code, | 1240 // GenerateSmiCode, and fall through to subsequent code, |
1235 // with the operands in rdx and rax. | 1241 // with the operands in rdx and rax. |
1236 Comment done_comment(masm, "-- Enter non-smi code"); | 1242 Comment done_comment(masm, "-- Enter non-smi code"); |
1237 __ bind(¬_smis); | 1243 __ bind(¬_smis); |
1238 if (op_ == Token::BIT_OR) { | 1244 if (op_ == Token::BIT_OR) { |
1239 __ movq(right, rcx); | 1245 __ movq(right, rcx); |
1240 } | 1246 } |
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1430 } else { | 1436 } else { |
1431 // Allow heap number result and don't make a transition if a heap number | 1437 // Allow heap number result and don't make a transition if a heap number |
1432 // cannot be allocated. | 1438 // cannot be allocated. |
1433 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 1439 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
1434 } | 1440 } |
1435 | 1441 |
1436 // Code falls through if the result is not returned as either a smi or heap | 1442 // Code falls through if the result is not returned as either a smi or heap |
1437 // number. | 1443 // number. |
1438 GenerateTypeTransition(masm); | 1444 GenerateTypeTransition(masm); |
1439 | 1445 |
1440 __ bind(&call_runtime); | 1446 if (call_runtime.is_linked()) { |
1441 GenerateCallRuntimeCode(masm); | 1447 __ bind(&call_runtime); |
| 1448 GenerateCallRuntimeCode(masm); |
| 1449 } |
1442 } | 1450 } |
1443 | 1451 |
1444 | 1452 |
1445 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | 1453 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
1446 ASSERT(operands_type_ == TRBinaryOpIC::STRING); | 1454 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
1447 ASSERT(op_ == Token::ADD); | 1455 ASSERT(op_ == Token::ADD); |
1448 GenerateStringAddCode(masm); | 1456 GenerateStringAddCode(masm); |
1449 // Try to add arguments as strings, otherwise, transition to the generic | 1457 // Try to add arguments as strings, otherwise, transition to the generic |
1450 // TRBinaryOpIC type. | 1458 // TRBinaryOpIC type. |
1451 GenerateTypeTransition(masm); | 1459 GenerateTypeTransition(masm); |
(...skipping 3682 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5134 // Do a tail call to the rewritten stub. | 5142 // Do a tail call to the rewritten stub. |
5135 __ jmp(rdi); | 5143 __ jmp(rdi); |
5136 } | 5144 } |
5137 | 5145 |
5138 | 5146 |
5139 #undef __ | 5147 #undef __ |
5140 | 5148 |
5141 } } // namespace v8::internal | 5149 } } // namespace v8::internal |
5142 | 5150 |
5143 #endif // V8_TARGET_ARCH_X64 | 5151 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |