Chromium Code Reviews| Index: src/x64/code-stubs-x64.cc |
| diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc |
| index 551a71690e7a45400ec4d078ef7fcf4e89e39f0b..988d19ec0a331eb845782134e9e862567203b446 100644 |
| --- a/src/x64/code-stubs-x64.cc |
| +++ b/src/x64/code-stubs-x64.cc |
| @@ -724,10 +724,13 @@ static void BinaryOpStub_GenerateSmiCode( |
| const Register right = rax; |
| // We only generate heapnumber answers for overflowing calculations |
| - // for the four basic arithmetic operations and logical right shift by 0. |
| + // for the four basic arithmetic operations and logical right shift by 0 |
| + // for 32 bit SMI value, for 31 bit SMI value, we plus SHL and logical right |
| + // shift 1. |
| bool generate_inline_heapnumber_results = |
| (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
| (op == Token::ADD || op == Token::SUB || |
| + (kSmiValueSize == 31 && op == Token::SHL) || |
|
danno
2013/08/01 16:45:41
Is think you should turn this into a predicate:
S
haitao.feng
2013/08/02 09:35:51
Done. Add IsUnsafeSmiOperator function in the macr
|
| op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| // Smi check of both operands. If op is BIT_OR, the check is delayed |
| @@ -791,8 +794,15 @@ static void BinaryOpStub_GenerateSmiCode( |
| break; |
| case Token::SHL: |
| - __ SmiShiftLeft(left, left, right); |
| + if (kSmiValueSize == 31) { |
|
danno
2013/08/01 16:45:41
Why can't you put this extra stack manipulation in
haitao.feng
2013/08/02 09:35:51
Done.
|
| + __ push(left); |
| + __ push(right); |
| + } |
| + __ SmiShiftLeft(left, left, right, &use_fp_on_smis); |
| __ movq(rax, left); |
| + if (kSmiValueSize == 31) { |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| + } |
| break; |
| case Token::SAR: |
| @@ -801,8 +811,15 @@ static void BinaryOpStub_GenerateSmiCode( |
| break; |
| case Token::SHR: |
| + if (kSmiValueSize == 31) { |
|
danno
2013/08/01 16:45:41
Same here.
haitao.feng
2013/08/02 09:35:51
Done.
|
| + __ push(left); |
| + __ push(right); |
| + } |
| __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
| __ movq(rax, left); |
| + if (kSmiValueSize == 31) { |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| + } |
| break; |
| default: |
| @@ -824,25 +841,70 @@ static void BinaryOpStub_GenerateSmiCode( |
| } |
| if (generate_inline_heapnumber_results) { |
| - __ AllocateHeapNumber(rcx, rbx, slow); |
| - Comment perform_float(masm, "-- Perform float operation on smis"); |
| - if (op == Token::SHR) { |
| - __ SmiToInteger32(left, left); |
| - __ cvtqsi2sd(xmm0, left); |
| + if (kSmiValueSize == 32) { |
| + __ AllocateHeapNumber(rcx, rbx, slow); |
| + Comment perform_float(masm, "-- Perform float operation on smis"); |
| + if (op == Token::SHR) { |
| + __ SmiToInteger32(left, left); |
| + __ cvtqsi2sd(xmm0, left); |
| + } else { |
| + FloatingPointHelper::LoadSSE2SmiOperands(masm); |
| + switch (op) { |
| + case Token::ADD: __ addsd(xmm0, xmm1); break; |
| + case Token::SUB: __ subsd(xmm0, xmm1); break; |
| + case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| + case Token::DIV: __ divsd(xmm0, xmm1); break; |
| + default: UNREACHABLE(); |
| + } |
| + } |
| + __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| + __ movq(rax, rcx); |
| + __ ret(0); |
| } else { |
| - FloatingPointHelper::LoadSSE2SmiOperands(masm); |
| - switch (op) { |
| - case Token::ADD: __ addsd(xmm0, xmm1); break; |
| - case Token::SUB: __ subsd(xmm0, xmm1); break; |
| - case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| - case Token::DIV: __ divsd(xmm0, xmm1); break; |
| - default: UNREACHABLE(); |
| + ASSERT(kSmiValueSize == 31); |
| + Label goto_slow; |
| + __ AllocateHeapNumber(rcx, rbx, &goto_slow); |
| + Comment perform_float(masm, "-- Perform float operation on smis"); |
| + if (op == Token::SHL) { |
| + __ cvtlsi2sd(xmm0, left); |
| + // drop left and right on the stack. |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| + } else if (op == Token::SHR) { |
| + // The value of left is from MacroAssembler::SmiShiftLogicalRight |
| + // We allow logical shift value: |
| + // 0 : might turn a signed integer into unsigned integer |
| + // 1 : the value might be above 2^30 - 1 |
| + __ cvtqsi2sd(xmm0, left); |
| + // drop left and right on the stack. |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| + } else { |
| + FloatingPointHelper::LoadSSE2SmiOperands(masm); |
| + switch (op) { |
| + case Token::ADD: __ addsd(xmm0, xmm1); break; |
| + case Token::SUB: __ subsd(xmm0, xmm1); break; |
| + case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| + case Token::DIV: __ divsd(xmm0, xmm1); break; |
| + default: UNREACHABLE(); |
|
danno
2013/08/01 16:45:41
Also, I think you can share much of this code with
haitao.feng
2013/08/02 09:35:51
Done.
|
| + } |
| + } |
| + __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| + __ movq(rax, rcx); |
| + __ ret(0); |
| + |
| + __ bind(&goto_slow); |
| + if (op == Token::SHL || op == Token::SHR) { |
| + // Restore left and right from stack. |
| + __ pop(right); |
| + __ pop(left); |
| } |
| + __ jmp(slow); |
| } |
| - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
| - __ movq(rax, rcx); |
| - __ ret(0); |
| } else { |
| + if (kSmiValueSize == 31 && (op == Token::SHL || op == Token::SHR)) { |
| + // Restore left and right from stack. |
| + __ pop(right); |
| + __ pop(left); |
| + } |
| __ jmp(&fail); |
| } |
| } |
| @@ -881,6 +943,9 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
| Label* allocation_failure, |
| Label* non_numeric_failure, |
| Token::Value op, |
| + BinaryOpIC::TypeInfo |
| + result_type, |
| + Label* non_int32_failure, |
| OverwriteMode mode) { |
| switch (op) { |
| case Token::ADD: |
| @@ -896,6 +961,18 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
| case Token::DIV: __ divsd(xmm0, xmm1); break; |
| default: UNREACHABLE(); |
| } |
| + |
| + if (kSmiValueSize == 31 && non_int32_failure != NULL) { |
| + if (result_type <= BinaryOpIC::INT32) { |
| + __ cvttsd2si(kScratchRegister, xmm0); |
| + __ cvtlsi2sd(xmm2, kScratchRegister); |
| + __ pcmpeqd(xmm2, xmm0); |
| + __ movmskpd(rcx, xmm2); |
| + __ testl(rcx, Immediate(1)); |
| + __ j(zero, non_int32_failure); |
| + } |
| + } |
| + |
| BinaryOpStub_GenerateHeapResultAllocation( |
| masm, allocation_failure, mode); |
| __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| @@ -913,11 +990,23 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
| case Token::SAR: |
| case Token::SHL: |
| case Token::SHR: { |
| - Label non_smi_shr_result; |
| + Label non_smi_result; |
| + Label goto_non_numeric_failure; |
| Register heap_number_map = r9; |
| + if (kSmiValueSize == 31) { |
| + // Push arguments on stack |
| + __ push(rdx); |
| + __ push(rax); |
| + } |
| __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| - FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
| - heap_number_map); |
| + if (kSmiValueSize == 32) { |
| + FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
| + heap_number_map); |
| + } else { |
| + ASSERT(kSmiValueSize == 31); |
| + FloatingPointHelper::LoadAsIntegers(masm, &goto_non_numeric_failure, |
| + heap_number_map); |
| + } |
| switch (op) { |
| case Token::BIT_OR: __ orl(rax, rcx); break; |
| case Token::BIT_AND: __ andl(rax, rcx); break; |
| @@ -926,46 +1015,122 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
| case Token::SHL: __ shll_cl(rax); break; |
| case Token::SHR: { |
| __ shrl_cl(rax); |
| - // Check if result is negative. This can only happen for a shift |
| - // by zero. |
| - __ testl(rax, rax); |
| - __ j(negative, &non_smi_shr_result); |
| + if (kSmiValueSize == 32) { |
| + // Check if result is negative. This can only happen for a shift |
| + // by zero. |
| + __ testl(rax, rax); |
| + __ j(negative, &non_smi_result); |
| + } |
| break; |
| } |
| default: UNREACHABLE(); |
| } |
| - STATIC_ASSERT(kSmiValueSize == 32); |
| + |
| + if (kSmiValueSize == 31) { |
| + if (op == Token::SHR) { |
| + __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
| + } else { |
| + __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear); |
| + } |
| + // Drop saved arguments. |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| + } |
| + |
| // Tag smi result and return. |
| __ Integer32ToSmi(rax, rax); |
| __ Ret(); |
| - // Logical shift right can produce an unsigned int32 that is not |
| - // an int32, and so is not in the smi range. Allocate a heap number |
| - // in that case. |
| - if (op == Token::SHR) { |
| - __ bind(&non_smi_shr_result); |
| + if (kSmiValueSize == 31) { |
| + __ bind(&goto_non_numeric_failure); |
| + // Restore arguments. |
| + __ pop(rax); |
| + __ pop(rdx); |
| + __ jmp(non_numeric_failure); |
| + } |
| + |
| + if (kSmiValueSize == 32) { |
| + if (op == Token::SHR) { |
| + // Logical shift right can produce an unsigned int32 that is not |
| + // an int32, and so is not in the smi range. Allocate a heap number |
| + // in that case. |
| + __ bind(&non_smi_result); |
| + Label allocation_failed; |
| + __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| + // Allocate heap number in new space. |
| + // Not using AllocateHeapNumber macro in order to reuse |
| + // already loaded heap_number_map. |
| + __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
| + TAG_OBJECT); |
| + // Set the map. |
| + __ AssertRootValue(heap_number_map, |
| + Heap::kHeapNumberMapRootIndex, |
| + "HeapNumberMap register clobbered."); |
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| + heap_number_map); |
| + __ cvtqsi2sd(xmm0, rbx); |
| + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| + __ Ret(); |
| + |
| + __ bind(&allocation_failed); |
| + // We need tagged values in rdx and rax for the following code, |
| + // not int32 in rax and rcx. |
| + __ Integer32ToSmi(rax, rcx); |
| + __ Integer32ToSmi(rdx, rbx); |
| + __ jmp(allocation_failure); |
| + } |
| + } else { |
| + ASSERT(kSmiValueSize == 31); |
| + __ bind(&non_smi_result); |
| Label allocation_failed; |
| __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| // Allocate heap number in new space. |
| // Not using AllocateHeapNumber macro in order to reuse |
| // already loaded heap_number_map. |
| - __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
| - TAG_OBJECT); |
| + Label skip_allocation; |
| + switch (mode) { |
| + case OVERWRITE_LEFT: { |
| + __ movq(rax, Operand(rsp, 1 * kRegisterSize)); |
| + __ JumpIfNotSmi(rax, &skip_allocation); |
| + __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
| + TAG_OBJECT); |
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| + heap_number_map); |
| + __ bind(&skip_allocation); |
| + break; |
| + } |
| + case OVERWRITE_RIGHT: |
| + __ movq(rax, Operand(rsp, 0 * kRegisterSize)); |
| + __ JumpIfNotSmi(rax, &skip_allocation); |
| + // Fall through! |
| + case NO_OVERWRITE: |
| + __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
| + TAG_OBJECT); |
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| + heap_number_map); |
| + __ bind(&skip_allocation); |
| + break; |
| + default: UNREACHABLE(); |
| + } |
| // Set the map. |
| __ AssertRootValue(heap_number_map, |
| Heap::kHeapNumberMapRootIndex, |
| "HeapNumberMap register clobbered."); |
| - __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
| - heap_number_map); |
| - __ cvtqsi2sd(xmm0, rbx); |
| + if (op == Token::SHR) { |
| + __ cvtqsi2sd(xmm0, rbx); |
| + } else { |
| + // All other operations returns a signed int32, so we |
| + // use lsi2sd here to retain the sign bit. |
| + __ cvtlsi2sd(xmm0, rbx); |
| + } |
| __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| + // Drop saved arguments. |
| + __ addq(rsp, Immediate(2 * kRegisterSize)); |
| __ Ret(); |
| __ bind(&allocation_failed); |
| - // We need tagged values in rdx and rax for the following code, |
| - // not int32 in rax and rcx. |
| - __ Integer32ToSmi(rax, rcx); |
| - __ Integer32ToSmi(rdx, rbx); |
| + // Restore arguments from stack. |
| + __ pop(rax); |
| + __ pop(rdx); |
| __ jmp(allocation_failure); |
| } |
| break; |
| @@ -1063,9 +1228,30 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| - // The int32 case is identical to the Smi case. We avoid creating this |
| - // ic state on x64. |
| - UNREACHABLE(); |
| + if (kSmiValueSize == 32) { |
| + // The int32 case is identical to the Smi case. We avoid creating this |
| + // ic state on x64. |
| + UNREACHABLE(); |
| + } else { |
| + ASSERT(kSmiValueSize == 31); |
| + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| + |
| + Label gc_required, not_number, not_int32; |
| + BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, |
| + op_, result_type_, ¬_int32, mode_); |
|
danno
2013/08/01 16:45:41
strange indentation
haitao.feng
2013/08/02 09:35:51
Done.
|
| + |
| + __ bind(¬_number); |
| + __ bind(¬_int32); |
| + GenerateTypeTransition(masm); |
| + |
| + __ bind(&gc_required); |
| + { |
| + FrameScope scope(masm, StackFrame::INTERNAL); |
| + GenerateRegisterArgsPush(masm); |
| + GenerateCallRuntime(masm); |
| + } |
| + __ Ret(); |
| + } |
| } |
| @@ -1171,7 +1357,7 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
| } |
| BinaryOpStub_GenerateFloatingPointCode( |
| - masm, &gc_required, ¬_number, op_, mode_); |
| + masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
| __ bind(¬_number); |
| GenerateTypeTransition(masm); |
| @@ -1193,7 +1379,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
| BinaryOpStub_GenerateFloatingPointCode( |
| - masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
| + masm, &call_runtime, &call_string_add_or_runtime, op_, |
| + result_type_, NULL, mode_); |
| __ bind(&call_string_add_or_runtime); |
| if (op_ == Token::ADD) { |
| @@ -1744,6 +1931,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, |
| __ movq(kScratchRegister, xmm1); |
| __ cmpq(scratch2, kScratchRegister); |
| __ j(not_equal, on_not_smis); |
| + __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
| __ Integer32ToSmi(first, smi_result); |
| __ bind(&first_done); |
| @@ -1763,6 +1951,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, |
| __ movq(kScratchRegister, xmm1); |
| __ cmpq(scratch2, kScratchRegister); |
| __ j(not_equal, on_not_smis); |
| + __ JumpIfNotValidSmiValue(smi_result, on_not_smis); |
| __ Integer32ToSmi(second, smi_result); |
| if (on_success != NULL) { |
| __ jmp(on_success); |
| @@ -4572,8 +4761,14 @@ void StringAddStub::Generate(MacroAssembler* masm) { |
| __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
| // Look at the length of the result of adding the two strings. |
| - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
| - __ SmiAdd(rbx, rbx, rcx); |
| + if (kSmiValueSize == 32) { |
| + ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
| + __ SmiAdd(rbx, rbx, rcx); |
|
danno
2013/08/01 16:45:41
As noted above, I think you should change SmiAdd t
haitao.feng
2013/08/02 09:35:51
There are two SmiAdd interfaces. SmiAdd (with no f
|
| + } else { |
| + ASSERT(kSmiValueSize == 31); |
| + __ SmiAdd(rbx, rbx, rcx, &call_runtime); |
| + } |
| + |
| // Use the string table when adding two one character strings, as it |
| // helps later optimizations to return an internalized string here. |
| __ SmiCompare(rbx, Smi::FromInt(2)); |
| @@ -5528,7 +5723,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { |
| // Inline comparison of ASCII strings. |
| __ IncrementCounter(counters->string_compare_native(), 1); |
| - // Drop arguments from the stack |
| + // Drop saved arguments. |
| __ pop(rcx); |
| __ addq(rsp, Immediate(2 * kPointerSize)); |
| __ push(rcx); |