Index: src/x64/code-stubs-x64.cc |
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc |
index 0f633874e6750a33411dd3449e9ed03d01f28e26..222c1cc64eff5daa34ce8b11decc54025b230aa8 100644 |
--- a/src/x64/code-stubs-x64.cc |
+++ b/src/x64/code-stubs-x64.cc |
@@ -1110,7 +1110,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
bool generate_inline_heapnumber_results = |
(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
(op_ == Token::ADD || op_ == Token::SUB || |
- op_ == Token::MUL || op_ == Token::DIV); |
+ op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
// Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
Register left = rdx; |
@@ -1192,7 +1192,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
break; |
case Token::SHR: |
- __ SmiShiftLogicalRight(left, left, right, ¬_smis); |
+ __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
__ movq(rax, left); |
break; |
@@ -1203,31 +1203,37 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
// 5. Emit return of result in rax. Some operations have registers pushed. |
__ ret(0); |
- // 6. For some operations emit inline code to perform floating point |
- // operations on known smis (e.g., if the result of the operation |
- // overflowed the smi range). |
- __ bind(&use_fp_on_smis); |
- if (op_ == Token::DIV || op_ == Token::MOD) { |
- // Restore left and right to rdx and rax. |
- __ movq(rdx, rcx); |
- __ movq(rax, rbx); |
- } |
- |
+ if (use_fp_on_smis.is_linked()) { |
+ // 6. For some operations emit inline code to perform floating point |
+ // operations on known smis (e.g., if the result of the operation |
+ // overflowed the smi range). |
+ __ bind(&use_fp_on_smis); |
+ if (op_ == Token::DIV || op_ == Token::MOD) { |
+ // Restore left and right to rdx and rax. |
+ __ movq(rdx, rcx); |
+ __ movq(rax, rbx); |
+ } |
- if (generate_inline_heapnumber_results) { |
- __ AllocateHeapNumber(rcx, rbx, slow); |
- Comment perform_float(masm, "-- Perform float operation on smis"); |
- FloatingPointHelper::LoadSSE2SmiOperands(masm); |
- switch (op_) { |
- case Token::ADD: __ addsd(xmm0, xmm1); break; |
- case Token::SUB: __ subsd(xmm0, xmm1); break; |
- case Token::MUL: __ mulsd(xmm0, xmm1); break; |
- case Token::DIV: __ divsd(xmm0, xmm1); break; |
- default: UNREACHABLE(); |
+ if (generate_inline_heapnumber_results) { |
+ __ AllocateHeapNumber(rcx, rbx, slow); |
+ Comment perform_float(masm, "-- Perform float operation on smis"); |
+ if (op_ == Token::SHR) { |
+ __ SmiToInteger32(left, left); |
+ __ cvtqsi2sd(xmm0, left); |
+ } else { |
+ FloatingPointHelper::LoadSSE2SmiOperands(masm); |
+ switch (op_) { |
+ case Token::ADD: __ addsd(xmm0, xmm1); break; |
+ case Token::SUB: __ subsd(xmm0, xmm1); break; |
+ case Token::MUL: __ mulsd(xmm0, xmm1); break; |
+ case Token::DIV: __ divsd(xmm0, xmm1); break; |
+ default: UNREACHABLE(); |
+ } |
+ } |
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
+ __ movq(rax, rcx); |
+ __ ret(0); |
} |
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
- __ movq(rax, rcx); |
- __ ret(0); |
} |
// 7. Non-smi operands reach the end of the code generated by |
@@ -1437,8 +1443,10 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
// number. |
GenerateTypeTransition(masm); |
- __ bind(&call_runtime); |
- GenerateCallRuntimeCode(masm); |
+ if (call_runtime.is_linked()) { |
+ __ bind(&call_runtime); |
+ GenerateCallRuntimeCode(masm); |
+ } |
} |