Index: src/x64/code-stubs-x64.cc |
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc |
index 2098ed12f5de2684889d8187c61624903989a86d..09c4609f1752b91ed2eb9e747f0c55b30853c53d 100644 |
--- a/src/x64/code-stubs-x64.cc |
+++ b/src/x64/code-stubs-x64.cc |
@@ -1105,29 +1105,28 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
Label* slow, |
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
+ // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
+ Register left = rdx; |
+ Register right = rax; |
+ |
// We only generate heapnumber answers for overflowing calculations |
- // for the four basic arithmetic operations. |
+ // for the four basic arithmetic operations and logical right shift by 0. |
bool generate_inline_heapnumber_results = |
(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && |
(op_ == Token::ADD || op_ == Token::SUB || |
op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); |
- // Arguments to TypeRecordingBinaryOpStub are in rdx and rax. |
- Register left = rdx; |
- Register right = rax; |
- |
- |
// Smi check of both operands. If op is BIT_OR, the check is delayed |
// until after the OR operation. |
Label not_smis; |
Label use_fp_on_smis; |
- Label restore_MOD_registers; // Only used if op_ == Token::MOD. |
+ Label fail; |
- if (op_ != Token::BIT_OR) { |
- Comment smi_check_comment(masm, "-- Smi check arguments"); |
- __ JumpIfNotBothSmi(left, right, ¬_smis); |
- } |
+ Comment smi_check_comment(masm, "-- Smi check arguments"); |
+ __ JumpIfNotBothSmi(left, right, ¬_smis); |
+ Label smi_values; |
+ __ bind(&smi_values); |
// Perform the operation. |
Comment perform_smi(masm, "-- Perform smi operation"); |
switch (op_) { |
@@ -1166,9 +1165,7 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
case Token::BIT_OR: { |
ASSERT(right.is(rax)); |
- __ movq(rcx, right); // Save the right operand. |
__ SmiOr(right, right, left); // BIT_OR is commutative. |
William Hesse
2011/04/08 09:27:58
Change to SmiOrAndJumpIfNotBothSmi(right, right, l
|
- __ JumpIfNotSmi(right, ¬_smis); // Test delayed until after BIT_OR. |
break; |
} |
case Token::BIT_XOR: |
@@ -1233,17 +1230,57 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
__ movq(rax, rcx); |
__ ret(0); |
+ } else { |
+ __ jmp(&fail); |
} |
} |
// 7. Non-smi operands reach the end of the code generated by |
// GenerateSmiCode, and fall through to subsequent code, |
// with the operands in rdx and rax. |
+ // But first we check if non-smi values are HeapNumbers holding |
+ // values that could be smi. |
Comment done_comment(masm, "-- Enter non-smi code"); |
__ bind(¬_smis); |
- if (op_ == Token::BIT_OR) { |
- __ movq(right, rcx); |
+ { // See if there were smi values stored in HeapNumbers. |
William Hesse
2011/04/08 09:27:58
I would use LoadAsIntegers (really LoadUnknownsAsI
Lasse Reichstein
2011/04/08 11:18:10
LoadNumberAsIntegers will do truncating conversion
|
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); |
+ NearLabel left_smi, check_right; |
+ __ JumpIfSmi(left, &left_smi); |
+ __ cmpq(FieldOperand(left, HeapObject::kMapOffset), rcx); |
+ __ j(not_equal, &fail); |
+ // Convert HeapNumber to smi if possible. |
+ __ movsd(xmm0, FieldOperand(left, HeapNumber::kValueOffset)); |
+ __ movq(rbx, xmm0); |
+ __ cvttsd2siq(rdi, xmm0); |
+ // Check if conversion was successful by converting back and |
+ // comparing to the original double's bits. |
+ __ cvtlsi2sd(xmm1, rdi); |
+ __ movq(kScratchRegister, xmm1); |
+ __ cmpq(rbx, kScratchRegister); |
+ __ j(not_equal, &fail); |
+ __ Integer32ToSmi(left, rdi); |
+ |
+ __ bind(&check_right); |
+ __ JumpIfSmi(right, &smi_values); |
+ __ bind(&left_smi); |
+ if (FLAG_debug_code) { |
+ // One of left or right should be non-smi if we get here. |
+ __ AbortIfSmi(right); |
+ } |
+ __ cmpq(FieldOperand(right, HeapObject::kMapOffset), rcx); |
+ __ j(not_equal, &fail); |
+ // Convert right to smi, if possible. |
+ __ movsd(xmm0, FieldOperand(right, HeapNumber::kValueOffset)); |
+ __ movq(rbx, xmm0); |
+ __ cvttsd2siq(rdi, xmm0); |
+ __ cvtlsi2sd(xmm1, rdi); |
+ __ movq(kScratchRegister, xmm1); |
+ __ cmpq(rbx, kScratchRegister); |
+ __ j(not_equal, &fail); |
+ __ Integer32ToSmi(right, rdi); |
+ __ jmp(&smi_values); |
} |
+ __ bind(&fail); |
} |