Index: src/x64/code-stubs-x64.cc |
=================================================================== |
--- src/x64/code-stubs-x64.cc (revision 6938) |
+++ src/x64/code-stubs-x64.cc (working copy) |
@@ -1506,39 +1506,58 @@ |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
- // Input on stack: |
- // rsp[8]: argument (should be number). |
- // rsp[0]: return address. |
+ // TAGGED case: |
+ // Input: |
+ // rsp[8]: argument (should be number). |
+ // rsp[0]: return address. |
+ // Output: |
+ // rax: tagged double result. |
+ // UNTAGGED case: |
+ // Input:: |
+ // rsp[0]: return address. |
+ // xmm1: untagged double input argument |
+ // Output: |
+ // xmm1: untagged double result. |
+ |
Label runtime_call; |
Label runtime_call_clear_stack; |
- Label input_not_smi; |
- NearLabel loaded; |
- // Test that rax is a number. |
- __ movq(rax, Operand(rsp, kPointerSize)); |
- __ JumpIfNotSmi(rax, &input_not_smi); |
- // Input is a smi. Untag and load it onto the FPU stack. |
- // Then load the bits of the double into rbx. |
- __ SmiToInteger32(rax, rax); |
- __ subq(rsp, Immediate(kPointerSize)); |
- __ cvtlsi2sd(xmm1, rax); |
- __ movsd(Operand(rsp, 0), xmm1); |
- __ movq(rbx, xmm1); |
- __ movq(rdx, xmm1); |
- __ fld_d(Operand(rsp, 0)); |
- __ addq(rsp, Immediate(kPointerSize)); |
- __ jmp(&loaded); |
+ Label skip_cache; |
+ const bool tagged = (argument_type_ == TAGGED); |
+ if (tagged) { |
+ NearLabel input_not_smi; |
+ NearLabel loaded; |
+ // Test that rax is a number. |
+ __ movq(rax, Operand(rsp, kPointerSize)); |
+ __ JumpIfNotSmi(rax, &input_not_smi); |
+ // Input is a smi. Untag and load it onto the FPU stack. |
+ // Then load the bits of the double into rbx. |
+ __ SmiToInteger32(rax, rax); |
+ __ subq(rsp, Immediate(kPointerSize)); |
Lasse Reichstein
2011/02/25 06:49:26
Use kDoubleSize instead (id it exists, otherwise i
William Hesse
2011/02/25 12:12:38
Done.
|
+ __ cvtlsi2sd(xmm1, rax); |
+ __ movsd(Operand(rsp, 0), xmm1); |
+ __ movq(rbx, xmm1); |
+ __ movq(rdx, xmm1); |
+ __ fld_d(Operand(rsp, 0)); |
+ __ addq(rsp, Immediate(kPointerSize)); |
+ __ jmp(&loaded); |
- __ bind(&input_not_smi); |
- // Check if input is a HeapNumber. |
- __ Move(rbx, Factory::heap_number_map()); |
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
- __ j(not_equal, &runtime_call); |
- // Input is a HeapNumber. Push it on the FPU stack and load its |
- // bits into rbx. |
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); |
- __ movq(rdx, rbx); |
- __ bind(&loaded); |
+ __ bind(&input_not_smi); |
+ // Check if input is a HeapNumber. |
+ __ Move(rbx, Factory::heap_number_map()); |
Lasse Reichstein
2011/02/25 06:49:26
Use LoadRoot.
William Hesse
2011/02/25 12:12:38
Done.
|
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
+ __ j(not_equal, &runtime_call); |
+ // Input is a HeapNumber. Push it on the FPU stack and load its |
+ // bits into rbx. |
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __ movq(rdx, rbx); |
+ |
+ __ bind(&loaded); |
+ } else { // UNTAGGED. |
+ __ movq(rbx, xmm1); |
+ __ movq(rdx, rbx); |
Lasse Reichstein
2011/02/25 06:49:26
Use movq(rdx,xmm1) to avoid dependencies.
William Hesse
2011/02/25 12:12:38
Done.
|
+ } |
+ |
// ST[0] == double value |
Lasse Reichstein
2011/02/25 06:49:26
The untagged case haven't pushed the value on the
William Hesse
2011/02/25 12:12:38
Added comment to this effect. All code paths corr
|
// rbx = bits of double value. |
// rdx = also bits of double value. |
@@ -1597,30 +1616,72 @@ |
__ j(not_equal, &cache_miss); |
// Cache hit! |
__ movq(rax, Operand(rcx, 2 * kIntSize)); |
- __ fstp(0); // Clear FPU stack. |
- __ ret(kPointerSize); |
+ if (tagged) { |
+ __ fstp(0); // Clear FPU stack. |
+ __ ret(kPointerSize); |
+ } else { // UNTAGGED. |
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __ Ret(); |
+ } |
__ bind(&cache_miss); |
// Update cache with new value. |
- Label nan_result; |
- GenerateOperation(masm, &nan_result); |
+ if (tagged) { |
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); |
+ } else { // UNTAGGED. |
+ __ AllocateHeapNumber(rax, rdi, &skip_cache); |
+ __ subq(rsp, Immediate(kDoubleSize)); |
+ __ movsd(Operand(rsp, 0), xmm1); |
+ __ fld_d(Operand(rsp, 0)); |
+ __ addq(rsp, Immediate(kDoubleSize)); |
+ } |
+ GenerateOperation(masm); |
__ movq(Operand(rcx, 0), rbx); |
__ movq(Operand(rcx, 2 * kIntSize), rax); |
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
- __ ret(kPointerSize); |
+ if (tagged) { |
+ __ ret(kPointerSize); |
+ } else { // UNTAGGED. |
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __ Ret(); |
- __ bind(&runtime_call_clear_stack); |
- __ fstp(0); |
- __ bind(&runtime_call); |
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); |
+ // Skip cache and return answer directly, only in untagged case. |
+ __ bind(&skip_cache); |
+ __ subq(rsp, Immediate(kDoubleSize)); |
+ __ movsd(Operand(rsp, 0), xmm1); |
+ __ fld_d(Operand(rsp, 0)); |
+ GenerateOperation(masm); |
+ __ fstp_d(Operand(rsp, 0)); |
+ __ movsd(xmm1, Operand(rsp, 0)); |
+ __ addq(rsp, Immediate(kDoubleSize)); |
+ // We return the value in xmm1 without adding it to the cache, but |
+ // we cause a scavenging GC so that future allocations will succeed. |
Lasse Reichstein
2011/02/25 06:49:26
Why don't we just allocate the size of a HeapNumbe
William Hesse
2011/02/25 12:12:38
Because this was added to the code after it was do
|
+ __ EnterInternalFrame(); |
+ // Allocate an unused object bigger than a HeapNumber. |
+ __ Push(Smi::FromInt(2 * kDoubleSize)); |
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
+ __ LeaveInternalFrame(); |
+ __ Ret(); |
+ } |
- __ bind(&nan_result); |
- __ fstp(0); // Remove argument from FPU stack. |
- __ LoadRoot(rax, Heap::kNanValueRootIndex); |
- __ movq(Operand(rcx, 0), rbx); |
- __ movq(Operand(rcx, 2 * kIntSize), rax); |
- __ ret(kPointerSize); |
+ // Call runtime, doing whatever allocation and cleanup is necessary. |
+ if (tagged) { |
+ __ bind(&runtime_call_clear_stack); |
+ __ fstp(0); |
+ __ bind(&runtime_call); |
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); |
+ } else { // UNTAGGED. |
+ __ bind(&runtime_call_clear_stack); |
+ __ bind(&runtime_call); |
+ __ AllocateHeapNumber(rax, rdi, &skip_cache); |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); |
+ __ EnterInternalFrame(); |
+ __ push(rax); |
+ __ CallRuntime(RuntimeFunction(), 1); |
+ __ LeaveInternalFrame(); |
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
+ __ Ret(); |
+ } |
} |
@@ -1637,8 +1698,7 @@ |
} |
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, |
- Label* on_nan_result) { |
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { |
// Registers: |
// rbx: Bits of input double. Must be preserved. |
// rcx: Pointer to cache entry. Must be preserved. |
@@ -1661,8 +1721,18 @@ |
__ j(below, &in_range); |
// Check for infinity and NaN. Both return NaN for sin. |
__ cmpl(rdi, Immediate(0x7ff)); |
- __ j(equal, on_nan_result); |
+ NearLabel non_nan_result; |
+ __ j(not_equal, &non_nan_result); |
+ // Input is +/-Infinity or NaN. Result is NaN. |
+ __ fstp(0); |
+ __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex); |
+ __ push(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); |
+ __ fld_d(Operand(rsp, 0)); |
Lasse Reichstein
2011/02/25 06:49:26
Can't you just fld_d(FieldOperand(kScratchRegister
William Hesse
2011/02/25 12:12:38
Done.
|
+ __ addq(rsp, Immediate(2 * kPointerSize)); |
+ __ jmp(&done); |
+ __ bind(&non_nan_result); |
+ |
// Use fpmod to restrict argument to the range +/-2*PI. |
__ fldpi(); |
__ fadd(0); |