Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(250)

Side by Side Diff: src/x64/code-stubs-x64.cc

Issue 113343003: Remove the last remnants of the TranscendentalCache. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebased Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/code-stubs-x64.h ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 579 matching lines...) Expand 10 before | Expand all | Expand 10 after
590 if (!final_result_reg.is(result_reg)) { 590 if (!final_result_reg.is(result_reg)) {
591 ASSERT(final_result_reg.is(rcx)); 591 ASSERT(final_result_reg.is(rcx));
592 __ movl(final_result_reg, result_reg); 592 __ movl(final_result_reg, result_reg);
593 } 593 }
594 __ pop(save_reg); 594 __ pop(save_reg);
595 __ pop(scratch1); 595 __ pop(scratch1);
596 __ ret(0); 596 __ ret(0);
597 } 597 }
598 598
599 599
600 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
601 // TAGGED case:
602 // Input:
603 // rsp[8] : argument (should be number).
604 // rsp[0] : return address.
605 // Output:
606 // rax: tagged double result.
607 // UNTAGGED case:
608 // Input::
609 // rsp[0] : return address.
610 // xmm1 : untagged double input argument
611 // Output:
612 // xmm1 : untagged double result.
613
614 Label runtime_call;
615 Label runtime_call_clear_stack;
616 Label skip_cache;
617 const bool tagged = (argument_type_ == TAGGED);
618 if (tagged) {
619 Label input_not_smi, loaded;
620
621 // Test that rax is a number.
622 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
623 __ movq(rax, args.GetArgumentOperand(0));
624 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
625 // Input is a smi. Untag and load it onto the FPU stack.
626 // Then load the bits of the double into rbx.
627 __ SmiToInteger32(rax, rax);
628 __ subq(rsp, Immediate(kDoubleSize));
629 __ Cvtlsi2sd(xmm1, rax);
630 __ movsd(Operand(rsp, 0), xmm1);
631 __ movq(rbx, xmm1);
632 __ movq(rdx, xmm1);
633 __ fld_d(Operand(rsp, 0));
634 __ addq(rsp, Immediate(kDoubleSize));
635 __ jmp(&loaded, Label::kNear);
636
637 __ bind(&input_not_smi);
638 // Check if input is a HeapNumber.
639 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
640 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
641 __ j(not_equal, &runtime_call);
642 // Input is a HeapNumber. Push it on the FPU stack and load its
643 // bits into rbx.
644 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
645 __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
646 __ movq(rdx, rbx);
647
648 __ bind(&loaded);
649 } else { // UNTAGGED.
650 __ movq(rbx, xmm1);
651 __ movq(rdx, xmm1);
652 }
653
654 // ST[0] == double value, if TAGGED.
655 // rbx = bits of double value.
656 // rdx = also bits of double value.
657 // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
658 // h = h0 = bits ^ (bits >> 32);
659 // h ^= h >> 16;
660 // h ^= h >> 8;
661 // h = h & (cacheSize - 1);
662 // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
663 __ sar(rdx, Immediate(32));
664 __ xorl(rdx, rbx);
665 __ movl(rcx, rdx);
666 __ movl(rax, rdx);
667 __ movl(rdi, rdx);
668 __ sarl(rdx, Immediate(8));
669 __ sarl(rcx, Immediate(16));
670 __ sarl(rax, Immediate(24));
671 __ xorl(rcx, rdx);
672 __ xorl(rax, rdi);
673 __ xorl(rcx, rax);
674 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
675 __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
676
677 // ST[0] == double value.
678 // rbx = bits of double value.
679 // rcx = TranscendentalCache::hash(double value).
680 ExternalReference cache_array =
681 ExternalReference::transcendental_cache_array_address(masm->isolate());
682 __ Move(rax, cache_array);
683 int cache_array_index =
684 type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
685 __ movq(rax, Operand(rax, cache_array_index));
686 // rax points to the cache for the type type_.
687 // If NULL, the cache hasn't been initialized yet, so go through runtime.
688 __ testq(rax, rax);
689 __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
690 #ifdef DEBUG
691 // Check that the layout of cache elements match expectations.
692 { // NOLINT - doesn't like a single brace on a line.
693 TranscendentalCache::SubCache::Element test_elem[2];
694 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
695 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
696 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
697 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
698 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
699 // Two uint_32's and a pointer per element.
700 CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
701 static_cast<int>(elem2_start - elem_start));
702 CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
703 CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
704 CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
705 }
706 #endif
707 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
708 __ addl(rcx, rcx);
709 __ lea(rcx, Operand(rax, rcx, times_8, 0));
710 // Check if cache matches: Double value is stored in uint32_t[2] array.
711 Label cache_miss;
712 __ cmpq(rbx, Operand(rcx, 0));
713 __ j(not_equal, &cache_miss, Label::kNear);
714 // Cache hit!
715 Counters* counters = masm->isolate()->counters();
716 __ IncrementCounter(counters->transcendental_cache_hit(), 1);
717 __ movq(rax, Operand(rcx, 2 * kIntSize));
718 if (tagged) {
719 __ fstp(0); // Clear FPU stack.
720 __ ret(kPointerSize);
721 } else { // UNTAGGED.
722 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
723 __ Ret();
724 }
725
726 __ bind(&cache_miss);
727 __ IncrementCounter(counters->transcendental_cache_miss(), 1);
728 // Update cache with new value.
729 if (tagged) {
730 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
731 } else { // UNTAGGED.
732 __ AllocateHeapNumber(rax, rdi, &skip_cache);
733 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
734 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
735 }
736 GenerateOperation(masm, type_);
737 __ movq(Operand(rcx, 0), rbx);
738 __ movq(Operand(rcx, 2 * kIntSize), rax);
739 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
740 if (tagged) {
741 __ ret(kPointerSize);
742 } else { // UNTAGGED.
743 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
744 __ Ret();
745
746 // Skip cache and return answer directly, only in untagged case.
747 __ bind(&skip_cache);
748 __ subq(rsp, Immediate(kDoubleSize));
749 __ movsd(Operand(rsp, 0), xmm1);
750 __ fld_d(Operand(rsp, 0));
751 GenerateOperation(masm, type_);
752 __ fstp_d(Operand(rsp, 0));
753 __ movsd(xmm1, Operand(rsp, 0));
754 __ addq(rsp, Immediate(kDoubleSize));
755 // We return the value in xmm1 without adding it to the cache, but
756 // we cause a scavenging GC so that future allocations will succeed.
757 {
758 FrameScope scope(masm, StackFrame::INTERNAL);
759 // Allocate an unused object bigger than a HeapNumber.
760 __ Push(Smi::FromInt(2 * kDoubleSize));
761 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
762 }
763 __ Ret();
764 }
765
766 // Call runtime, doing whatever allocation and cleanup is necessary.
767 if (tagged) {
768 __ bind(&runtime_call_clear_stack);
769 __ fstp(0);
770 __ bind(&runtime_call);
771 __ TailCallExternalReference(
772 ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
773 } else { // UNTAGGED.
774 __ bind(&runtime_call_clear_stack);
775 __ bind(&runtime_call);
776 __ AllocateHeapNumber(rax, rdi, &skip_cache);
777 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
778 {
779 FrameScope scope(masm, StackFrame::INTERNAL);
780 __ push(rax);
781 __ CallRuntime(RuntimeFunction(), 1);
782 }
783 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
784 __ Ret();
785 }
786 }
787
788
789 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
790 switch (type_) {
791 // Add more cases when necessary.
792 case TranscendentalCache::LOG: return Runtime::kMath_log;
793 default:
794 UNIMPLEMENTED();
795 return Runtime::kAbort;
796 }
797 }
798
799
800 void TranscendentalCacheStub::GenerateOperation(
801 MacroAssembler* masm, TranscendentalCache::Type type) {
802 // Registers:
803 // rax: Newly allocated HeapNumber, which must be preserved.
804 // rbx: Bits of input double. Must be preserved.
805 // rcx: Pointer to cache entry. Must be preserved.
806 // st(0): Input double
807 ASSERT(type == TranscendentalCache::LOG);
808 __ fldln2();
809 __ fxch();
810 __ fyl2x();
811 }
812
813
814 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, 600 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
815 Label* not_numbers) { 601 Label* not_numbers) {
816 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; 602 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
817 // Load operand in rdx into xmm0, or branch to not_numbers. 603 // Load operand in rdx into xmm0, or branch to not_numbers.
818 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex); 604 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
819 __ JumpIfSmi(rdx, &load_smi_rdx); 605 __ JumpIfSmi(rdx, &load_smi_rdx);
820 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx); 606 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
821 __ j(not_equal, not_numbers); // Argument in rdx is not a number. 607 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
822 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); 608 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
823 // Load operand in rax into xmm1, or branch to not_numbers. 609 // Load operand in rax into xmm1, or branch to not_numbers.
(...skipping 4870 matching lines...) Expand 10 before | Expand all | Expand 10 after
5694 __ bind(&fast_elements_case); 5480 __ bind(&fast_elements_case);
5695 GenerateCase(masm, FAST_ELEMENTS); 5481 GenerateCase(masm, FAST_ELEMENTS);
5696 } 5482 }
5697 5483
5698 5484
5699 #undef __ 5485 #undef __
5700 5486
5701 } } // namespace v8::internal 5487 } } // namespace v8::internal
5702 5488
5703 #endif // V8_TARGET_ARCH_X64 5489 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/code-stubs-x64.h ('k') | src/x64/codegen-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698