| Index: src/x64/code-stubs-x64.cc
|
| diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
|
| index d04e976e0dbe3e21c7434d3c2ad51c9591f2cd9e..5b5212c7e07c9cc2b2e8522897b11c600030a739 100644
|
| --- a/src/x64/code-stubs-x64.cc
|
| +++ b/src/x64/code-stubs-x64.cc
|
| @@ -597,220 +597,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| -void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| - // TAGGED case:
|
| - // Input:
|
| - // rsp[8] : argument (should be number).
|
| - // rsp[0] : return address.
|
| - // Output:
|
| - // rax: tagged double result.
|
| - // UNTAGGED case:
|
| - // Input::
|
| - // rsp[0] : return address.
|
| - // xmm1 : untagged double input argument
|
| - // Output:
|
| - // xmm1 : untagged double result.
|
| -
|
| - Label runtime_call;
|
| - Label runtime_call_clear_stack;
|
| - Label skip_cache;
|
| - const bool tagged = (argument_type_ == TAGGED);
|
| - if (tagged) {
|
| - Label input_not_smi, loaded;
|
| -
|
| - // Test that rax is a number.
|
| - StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
|
| - __ movq(rax, args.GetArgumentOperand(0));
|
| - __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
|
| - // Input is a smi. Untag and load it onto the FPU stack.
|
| - // Then load the bits of the double into rbx.
|
| - __ SmiToInteger32(rax, rax);
|
| - __ subq(rsp, Immediate(kDoubleSize));
|
| - __ Cvtlsi2sd(xmm1, rax);
|
| - __ movsd(Operand(rsp, 0), xmm1);
|
| - __ movq(rbx, xmm1);
|
| - __ movq(rdx, xmm1);
|
| - __ fld_d(Operand(rsp, 0));
|
| - __ addq(rsp, Immediate(kDoubleSize));
|
| - __ jmp(&loaded, Label::kNear);
|
| -
|
| - __ bind(&input_not_smi);
|
| - // Check if input is a HeapNumber.
|
| - __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
|
| - __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
|
| - __ j(not_equal, &runtime_call);
|
| - // Input is a HeapNumber. Push it on the FPU stack and load its
|
| - // bits into rbx.
|
| - __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ movq(rdx, rbx);
|
| -
|
| - __ bind(&loaded);
|
| - } else { // UNTAGGED.
|
| - __ movq(rbx, xmm1);
|
| - __ movq(rdx, xmm1);
|
| - }
|
| -
|
| - // ST[0] == double value, if TAGGED.
|
| - // rbx = bits of double value.
|
| - // rdx = also bits of double value.
|
| - // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
|
| - // h = h0 = bits ^ (bits >> 32);
|
| - // h ^= h >> 16;
|
| - // h ^= h >> 8;
|
| - // h = h & (cacheSize - 1);
|
| - // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
|
| - __ sar(rdx, Immediate(32));
|
| - __ xorl(rdx, rbx);
|
| - __ movl(rcx, rdx);
|
| - __ movl(rax, rdx);
|
| - __ movl(rdi, rdx);
|
| - __ sarl(rdx, Immediate(8));
|
| - __ sarl(rcx, Immediate(16));
|
| - __ sarl(rax, Immediate(24));
|
| - __ xorl(rcx, rdx);
|
| - __ xorl(rax, rdi);
|
| - __ xorl(rcx, rax);
|
| - ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
|
| - __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
|
| -
|
| - // ST[0] == double value.
|
| - // rbx = bits of double value.
|
| - // rcx = TranscendentalCache::hash(double value).
|
| - ExternalReference cache_array =
|
| - ExternalReference::transcendental_cache_array_address(masm->isolate());
|
| - __ Move(rax, cache_array);
|
| - int cache_array_index =
|
| - type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
|
| - __ movq(rax, Operand(rax, cache_array_index));
|
| - // rax points to the cache for the type type_.
|
| - // If NULL, the cache hasn't been initialized yet, so go through runtime.
|
| - __ testq(rax, rax);
|
| - __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
|
| -#ifdef DEBUG
|
| - // Check that the layout of cache elements match expectations.
|
| - { // NOLINT - doesn't like a single brace on a line.
|
| - TranscendentalCache::SubCache::Element test_elem[2];
|
| - char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
|
| - char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
|
| - char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
|
| - char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
|
| - char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
|
| - // Two uint_32's and a pointer per element.
|
| - CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
|
| - static_cast<int>(elem2_start - elem_start));
|
| - CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
|
| - CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
|
| - CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
|
| - }
|
| -#endif
|
| - // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
|
| - __ addl(rcx, rcx);
|
| - __ lea(rcx, Operand(rax, rcx, times_8, 0));
|
| - // Check if cache matches: Double value is stored in uint32_t[2] array.
|
| - Label cache_miss;
|
| - __ cmpq(rbx, Operand(rcx, 0));
|
| - __ j(not_equal, &cache_miss, Label::kNear);
|
| - // Cache hit!
|
| - Counters* counters = masm->isolate()->counters();
|
| - __ IncrementCounter(counters->transcendental_cache_hit(), 1);
|
| - __ movq(rax, Operand(rcx, 2 * kIntSize));
|
| - if (tagged) {
|
| - __ fstp(0); // Clear FPU stack.
|
| - __ ret(kPointerSize);
|
| - } else { // UNTAGGED.
|
| - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ Ret();
|
| - }
|
| -
|
| - __ bind(&cache_miss);
|
| - __ IncrementCounter(counters->transcendental_cache_miss(), 1);
|
| - // Update cache with new value.
|
| - if (tagged) {
|
| - __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
|
| - } else { // UNTAGGED.
|
| - __ AllocateHeapNumber(rax, rdi, &skip_cache);
|
| - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
|
| - __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
| - }
|
| - GenerateOperation(masm, type_);
|
| - __ movq(Operand(rcx, 0), rbx);
|
| - __ movq(Operand(rcx, 2 * kIntSize), rax);
|
| - __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
| - if (tagged) {
|
| - __ ret(kPointerSize);
|
| - } else { // UNTAGGED.
|
| - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ Ret();
|
| -
|
| - // Skip cache and return answer directly, only in untagged case.
|
| - __ bind(&skip_cache);
|
| - __ subq(rsp, Immediate(kDoubleSize));
|
| - __ movsd(Operand(rsp, 0), xmm1);
|
| - __ fld_d(Operand(rsp, 0));
|
| - GenerateOperation(masm, type_);
|
| - __ fstp_d(Operand(rsp, 0));
|
| - __ movsd(xmm1, Operand(rsp, 0));
|
| - __ addq(rsp, Immediate(kDoubleSize));
|
| - // We return the value in xmm1 without adding it to the cache, but
|
| - // we cause a scavenging GC so that future allocations will succeed.
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - // Allocate an unused object bigger than a HeapNumber.
|
| - __ Push(Smi::FromInt(2 * kDoubleSize));
|
| - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
|
| - }
|
| - __ Ret();
|
| - }
|
| -
|
| - // Call runtime, doing whatever allocation and cleanup is necessary.
|
| - if (tagged) {
|
| - __ bind(&runtime_call_clear_stack);
|
| - __ fstp(0);
|
| - __ bind(&runtime_call);
|
| - __ TailCallExternalReference(
|
| - ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
|
| - } else { // UNTAGGED.
|
| - __ bind(&runtime_call_clear_stack);
|
| - __ bind(&runtime_call);
|
| - __ AllocateHeapNumber(rax, rdi, &skip_cache);
|
| - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
|
| - {
|
| - FrameScope scope(masm, StackFrame::INTERNAL);
|
| - __ push(rax);
|
| - __ CallRuntime(RuntimeFunction(), 1);
|
| - }
|
| - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ Ret();
|
| - }
|
| -}
|
| -
|
| -
|
| -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
|
| - switch (type_) {
|
| - // Add more cases when necessary.
|
| - case TranscendentalCache::LOG: return Runtime::kMath_log;
|
| - default:
|
| - UNIMPLEMENTED();
|
| - return Runtime::kAbort;
|
| - }
|
| -}
|
| -
|
| -
|
| -void TranscendentalCacheStub::GenerateOperation(
|
| - MacroAssembler* masm, TranscendentalCache::Type type) {
|
| - // Registers:
|
| - // rax: Newly allocated HeapNumber, which must be preserved.
|
| - // rbx: Bits of input double. Must be preserved.
|
| - // rcx: Pointer to cache entry. Must be preserved.
|
| - // st(0): Input double
|
| - ASSERT(type == TranscendentalCache::LOG);
|
| - __ fldln2();
|
| - __ fxch();
|
| - __ fyl2x();
|
| -}
|
| -
|
| -
|
| void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
|
| Label* not_numbers) {
|
| Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
|
|
|