Index: src/x64/code-stubs-x64.cc |
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc |
index 76479d3e2ebec5dc63fa69a001f818fae339b7fc..c90a2ee1626e48a02d41bcab129761b9ed4bd30d 100644 |
--- a/src/x64/code-stubs-x64.cc |
+++ b/src/x64/code-stubs-x64.cc |
@@ -721,13 +721,12 @@ static void BinaryOpStub_GenerateSmiCode( |
// Arguments to BinaryOpStub are in rdx and rax. |
const Register left = rdx; |
const Register right = rax; |
+ const Register shift_op_result = r9; |
- // We only generate heapnumber answers for overflowing calculations |
- // for the four basic arithmetic operations and logical right shift by 0. |
+ // We only generate heapnumber answers for overflowing calculations. |
bool generate_inline_heapnumber_results = |
(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
- (op == Token::ADD || op == Token::SUB || |
- op == Token::MUL || op == Token::DIV || op == Token::SHR); |
+ MacroAssembler::IsUnsafeSmiOperator(op); |
// Smi check of both operands. If op is BIT_OR, the check is delayed |
// until after the OR operation. |
@@ -790,18 +789,18 @@ static void BinaryOpStub_GenerateSmiCode( |
break; |
case Token::SHL: |
- __ SmiShiftLeft(left, left, right); |
- __ movq(rax, left); |
+ __ SmiShiftLeft(shift_op_result, left, right, &use_fp_on_smis); |
+ __ movq(rax, shift_op_result); |
break; |
case Token::SAR: |
- __ SmiShiftArithmeticRight(left, left, right); |
- __ movq(rax, left); |
+ __ SmiShiftArithmeticRight(shift_op_result, left, right); |
+ __ movq(rax, shift_op_result); |
break; |
case Token::SHR: |
- __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
- __ movq(rax, left); |
+ __ SmiShiftLogicalRight(shift_op_result, left, right, &use_fp_on_smis); |
+ __ movq(rax, shift_op_result); |
break; |
default: |
@@ -826,16 +825,18 @@ static void BinaryOpStub_GenerateSmiCode( |
__ AllocateHeapNumber(rcx, rbx, slow); |
Comment perform_float(masm, "-- Perform float operation on smis"); |
if (op == Token::SHR) { |
- __ SmiToInteger32(left, left); |
- __ cvtqsi2sd(xmm0, left); |
+ __ cvtqsi2sd(xmm0, shift_op_result); |
+ } else if (op == Token::SHL) { |
+ ASSERT(kSmiValueSize == 31); |
+ __ cvtlsi2sd(xmm0, shift_op_result); |
} else { |
FloatingPointHelper::LoadSSE2SmiOperands(masm); |
switch (op) { |
- case Token::ADD: __ addsd(xmm0, xmm1); break; |
- case Token::SUB: __ subsd(xmm0, xmm1); break; |
- case Token::MUL: __ mulsd(xmm0, xmm1); break; |
- case Token::DIV: __ divsd(xmm0, xmm1); break; |
- default: UNREACHABLE(); |
+ case Token::ADD: __ addsd(xmm0, xmm1); break; |
+ case Token::SUB: __ subsd(xmm0, xmm1); break; |
+ case Token::MUL: __ mulsd(xmm0, xmm1); break; |
+ case Token::DIV: __ divsd(xmm0, xmm1); break; |
+ default: UNREACHABLE(); |
} |
} |
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
@@ -876,11 +877,14 @@ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
OverwriteMode mode); |
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
- Label* allocation_failure, |
- Label* non_numeric_failure, |
- Token::Value op, |
- OverwriteMode mode) { |
+static void BinaryOpStub_GenerateFloatingPointCode( |
+ MacroAssembler* masm, |
+ Label* allocation_failure, |
+ Label* non_numeric_failure, |
+ Token::Value op, |
+ BinaryOpIC::TypeInfo result_type, |
+ Label* non_int32_failure, |
+ OverwriteMode mode) { |
switch (op) { |
case Token::ADD: |
case Token::SUB: |
@@ -895,6 +899,18 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
case Token::DIV: __ divsd(xmm0, xmm1); break; |
default: UNREACHABLE(); |
} |
+ |
+ if (kSmiValueSize == 31 && non_int32_failure != NULL) { |
+ if (result_type <= BinaryOpIC::INT32) { |
danno
2013/08/07 18:41:26
Why is UNITIALIZED also included? It's much cleare
haitao.feng
2013/08/12 09:54:24
Actually this code was taken from https://chromium
|
+ __ cvttsd2si(kScratchRegister, xmm0); |
danno
2013/08/07 18:41:26
Don't you need to check that bit 30 and 31 of scra
haitao.feng
2013/08/12 09:54:24
The test here is for Int32, instead of SMI.
|
+ __ cvtlsi2sd(xmm2, kScratchRegister); |
+ __ pcmpeqd(xmm2, xmm0); |
+ __ movmskpd(rcx, xmm2); |
+ __ testl(rcx, Immediate(1)); |
+ __ j(zero, non_int32_failure); |
danno
2013/08/07 18:41:26
Again, why is this non_int32_failure and not non_i
haitao.feng
2013/08/12 09:54:24
The main logic is at https://code.google.com/p/v8/
|
+ } |
+ } |
+ |
BinaryOpStub_GenerateHeapResultAllocation( |
masm, allocation_failure, mode); |
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
@@ -912,8 +928,12 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
case Token::SAR: |
case Token::SHL: |
case Token::SHR: { |
- Label non_smi_shr_result; |
+ Label non_smi_result; |
Register heap_number_map = r9; |
+ if (kSmiValueSize == 31 || (kSmiValueSize == 32 && op == Token::SHR)) { |
+ // Save rax in r11, rdx is un-modified below. |
+ __ movq(r11, rax); |
danno
2013/08/07 18:41:26
Can you give r11 an alias, like saved_right?
haitao.feng
2013/08/12 09:54:24
Done.
|
+ } |
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
heap_number_map); |
@@ -923,48 +943,96 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
case Token::BIT_XOR: __ xorl(rax, rcx); break; |
case Token::SAR: __ sarl_cl(rax); break; |
case Token::SHL: __ shll_cl(rax); break; |
- case Token::SHR: { |
- __ shrl_cl(rax); |
- // Check if result is negative. This can only happen for a shift |
- // by zero. |
- __ testl(rax, rax); |
- __ j(negative, &non_smi_shr_result); |
- break; |
- } |
+ case Token::SHR: __ shrl_cl(rax); break; |
default: UNREACHABLE(); |
} |
- STATIC_ASSERT(kSmiValueSize == 32); |
+ |
+ if (op == Token::SHR) { |
+ __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
+ } else { |
+ if (kSmiValueSize == 31) { |
+ __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
+ } |
+ } |
+ |
// Tag smi result and return. |
__ Integer32ToSmi(rax, rax); |
__ Ret(); |
- // Logical shift right can produce an unsigned int32 that is not |
- // an int32, and so is not in the smi range. Allocate a heap number |
- // in that case. |
- if (op == Token::SHR) { |
- __ bind(&non_smi_shr_result); |
+ if (kSmiValueSize == 31 || (kSmiValueSize == 32 && op == Token::SHR)) { |
+ __ bind(&non_smi_result); |
+ __ movl(rbx, rax); // rbx holds result value. |
Label allocation_failed; |
- __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
- // Allocate heap number in new space. |
- // Not using AllocateHeapNumber macro in order to reuse |
- // already loaded heap_number_map. |
- __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
- TAG_OBJECT); |
- // Set the map. |
- __ AssertRootValue(heap_number_map, |
- Heap::kHeapNumberMapRootIndex, |
- kHeapNumberMapRegisterClobbered); |
- __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
- heap_number_map); |
- __ cvtqsi2sd(xmm0, rbx); |
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
- __ Ret(); |
+ if (kSmiValueSize == 32) { |
+ ASSERT(op == Token::SHR); |
danno
2013/08/07 18:41:26
It looks like all three four below are identical (
haitao.feng
2013/08/12 09:54:24
Done.
|
+ // Allocate heap number in new space. |
+ // Not using AllocateHeapNumber macro in order to reuse |
+ // already loaded heap_number_map. |
+ __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg, |
+ &allocation_failed, TAG_OBJECT); |
+ // Set the map. |
+ __ AssertRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ kHeapNumberMapRegisterClobbered); |
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
+ // Logical shift right can produce an unsigned int32 that is not |
+ // an int32, and so is not in the smi range. |
+ __ cvtqsi2sd(xmm0, rbx); |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ Ret(); |
+ } else { |
+ ASSERT(kSmiValueSize == 31); |
+ Label skip_allocation; |
+ switch (mode) { |
+ case OVERWRITE_LEFT: { |
+ __ movq(rax, rdx); |
+ __ JumpIfNotSmi(rax, &skip_allocation); |
+ __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg, |
+ &allocation_failed, TAG_OBJECT); |
+ // Set the map. |
+ __ AssertRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ kHeapNumberMapRegisterClobbered); |
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
+ heap_number_map); |
+ __ bind(&skip_allocation); |
+ break; |
+ } |
+ case OVERWRITE_RIGHT: |
+ __ movq(rax, r11); |
+ __ JumpIfNotSmi(rax, &skip_allocation); |
+ // Fall through! |
+ case NO_OVERWRITE: |
+ __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg, |
+ &allocation_failed, TAG_OBJECT); |
+ // Set the map. |
+ __ AssertRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ kHeapNumberMapRegisterClobbered); |
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
+ heap_number_map); |
+ __ bind(&skip_allocation); |
+ break; |
+ default: UNREACHABLE(); |
+ } |
+ |
+ if (op == Token::SHR) { |
+ // Logical shift right can produce an unsigned int32 that is not |
+ // an int32, and so is not in the smi range. |
+ __ cvtqsi2sd(xmm0, rbx); |
+ } else { |
+ // All other operations returns a signed int32, so we |
+ // use lsi2sd here to retain the sign bit. |
+ __ cvtlsi2sd(xmm0, rbx); |
+ } |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ Ret(); |
+ } |
__ bind(&allocation_failed); |
// We need tagged values in rdx and rax for the following code, |
- // not int32 in rax and rcx. |
- __ Integer32ToSmi(rax, rcx); |
- __ Integer32ToSmi(rdx, rbx); |
+ // rdx is un-changed and rax is saved in r11. |
+ __ movq(rax, r11); |
__ jmp(allocation_failure); |
} |
break; |
@@ -1061,9 +1129,30 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
- // The int32 case is identical to the Smi case. We avoid creating this |
- // ic state on x64. |
- UNREACHABLE(); |
+ if (kSmiValueSize == 32) { |
danno
2013/08/07 18:41:26
Remove the if and the "then" part of this code, ju
haitao.feng
2013/08/12 09:54:24
Done.
|
+ // The int32 case is identical to the Smi case. We avoid creating this |
+ // ic state on x64. |
+ UNREACHABLE(); |
+ } else { |
+ ASSERT(kSmiValueSize == 31); |
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
+ |
+ Label gc_required, not_number, not_int32; |
+ BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, op_, |
+ result_type_, ¬_int32, mode_); |
+ |
+ __ bind(¬_number); |
+ __ bind(¬_int32); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&gc_required); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+ } |
} |
@@ -1169,7 +1258,7 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
} |
BinaryOpStub_GenerateFloatingPointCode( |
- masm, &gc_required, ¬_number, op_, mode_); |
+ masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
__ bind(¬_number); |
GenerateTypeTransition(masm); |
@@ -1191,7 +1280,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
BinaryOpStub_GenerateFloatingPointCode( |
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
+ masm, &call_runtime, &call_string_add_or_runtime, op_, |
+ result_type_, NULL, mode_); |
__ bind(&call_string_add_or_runtime); |
if (op_ == Token::ADD) { |
@@ -1742,6 +1832,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, |
__ movq(kScratchRegister, xmm1); |
__ cmpq(scratch2, kScratchRegister); |
__ j(not_equal, on_not_smis); |
+ __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
__ Integer32ToSmi(first, smi_result); |
__ bind(&first_done); |
@@ -1761,6 +1852,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, |
__ movq(kScratchRegister, xmm1); |
__ cmpq(scratch2, kScratchRegister); |
__ j(not_equal, on_not_smis); |
+ __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
__ Integer32ToSmi(second, smi_result); |
if (on_success != NULL) { |
__ jmp(on_success); |
@@ -4569,8 +4661,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
// Look at the length of the result of adding the two strings. |
- STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
- __ SmiAdd(rbx, rbx, rcx); |
+ if (kSmiValueSize == 32) { |
+ ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
+ __ SmiAdd(rbx, rbx, rcx); |
+ } else { |
+ ASSERT(kSmiValueSize == 31); |
+ __ SmiAdd(rbx, rbx, rcx, &call_runtime); |
danno
2013/08/07 18:41:26
I still think it's OK to use the overflow-checking
haitao.feng
2013/08/12 09:54:24
Done.
|
+ } |
+ |
// Use the string table when adding two one character strings, as it |
// helps later optimizations to return an internalized string here. |
__ SmiCompare(rbx, Smi::FromInt(2)); |