OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 677 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
688 if (!final_result_reg.is(result_reg)) { | 688 if (!final_result_reg.is(result_reg)) { |
689 ASSERT(final_result_reg.is(ecx)); | 689 ASSERT(final_result_reg.is(ecx)); |
690 __ mov(final_result_reg, result_reg); | 690 __ mov(final_result_reg, result_reg); |
691 } | 691 } |
692 __ pop(save_reg); | 692 __ pop(save_reg); |
693 __ pop(scratch1); | 693 __ pop(scratch1); |
694 __ ret(0); | 694 __ ret(0); |
695 } | 695 } |
696 | 696 |
697 | 697 |
698 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | |
699 // TAGGED case: | |
700 // Input: | |
701 // esp[4]: tagged number input argument (should be number). | |
702 // esp[0]: return address. | |
703 // Output: | |
704 // eax: tagged double result. | |
705 // UNTAGGED case: | |
706 // Input:: | |
707 // esp[0]: return address. | |
708 // xmm1: untagged double input argument | |
709 // Output: | |
710 // xmm1: untagged double result. | |
711 | |
712 Label runtime_call; | |
713 Label runtime_call_clear_stack; | |
714 Label skip_cache; | |
715 const bool tagged = (argument_type_ == TAGGED); | |
716 if (tagged) { | |
717 // Test that eax is a number. | |
718 Label input_not_smi; | |
719 Label loaded; | |
720 __ mov(eax, Operand(esp, kPointerSize)); | |
721 __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear); | |
722 // Input is a smi. Untag and load it onto the FPU stack. | |
723 // Then load the low and high words of the double into ebx, edx. | |
724 STATIC_ASSERT(kSmiTagSize == 1); | |
725 __ sar(eax, 1); | |
726 __ sub(esp, Immediate(2 * kPointerSize)); | |
727 __ mov(Operand(esp, 0), eax); | |
728 __ fild_s(Operand(esp, 0)); | |
729 __ fst_d(Operand(esp, 0)); | |
730 __ pop(edx); | |
731 __ pop(ebx); | |
732 __ jmp(&loaded, Label::kNear); | |
733 __ bind(&input_not_smi); | |
734 // Check if input is a HeapNumber. | |
735 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); | |
736 Factory* factory = masm->isolate()->factory(); | |
737 __ cmp(ebx, Immediate(factory->heap_number_map())); | |
738 __ j(not_equal, &runtime_call); | |
739 // Input is a HeapNumber. Push it on the FPU stack and load its | |
740 // low and high words into ebx, edx. | |
741 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
742 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); | |
743 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); | |
744 | |
745 __ bind(&loaded); | |
746 } else { // UNTAGGED. | |
747 CpuFeatureScope scope(masm, SSE2); | |
748 if (CpuFeatures::IsSupported(SSE4_1)) { | |
749 CpuFeatureScope sse4_scope(masm, SSE4_1); | |
750 __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. | |
751 } else { | |
752 __ pshufd(xmm0, xmm1, 0x1); | |
753 __ movd(edx, xmm0); | |
754 } | |
755 __ movd(ebx, xmm1); | |
756 } | |
757 | |
758 // ST[0] or xmm1 == double value | |
759 // ebx = low 32 bits of double value | |
760 // edx = high 32 bits of double value | |
761 // Compute hash (the shifts are arithmetic): | |
762 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | |
763 __ mov(ecx, ebx); | |
764 __ xor_(ecx, edx); | |
765 __ mov(eax, ecx); | |
766 __ sar(eax, 16); | |
767 __ xor_(ecx, eax); | |
768 __ mov(eax, ecx); | |
769 __ sar(eax, 8); | |
770 __ xor_(ecx, eax); | |
771 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | |
772 __ and_(ecx, | |
773 Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); | |
774 | |
775 // ST[0] or xmm1 == double value. | |
776 // ebx = low 32 bits of double value. | |
777 // edx = high 32 bits of double value. | |
778 // ecx = TranscendentalCache::hash(double value). | |
779 ExternalReference cache_array = | |
780 ExternalReference::transcendental_cache_array_address(masm->isolate()); | |
781 __ mov(eax, Immediate(cache_array)); | |
782 int cache_array_index = | |
783 type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]); | |
784 __ mov(eax, Operand(eax, cache_array_index)); | |
785 // Eax points to the cache for the type type_. | |
786 // If NULL, the cache hasn't been initialized yet, so go through runtime. | |
787 __ test(eax, eax); | |
788 __ j(zero, &runtime_call_clear_stack); | |
789 #ifdef DEBUG | |
790 // Check that the layout of cache elements match expectations. | |
791 { TranscendentalCache::SubCache::Element test_elem[2]; | |
792 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | |
793 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | |
794 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | |
795 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | |
796 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | |
797 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | |
798 CHECK_EQ(0, elem_in0 - elem_start); | |
799 CHECK_EQ(kIntSize, elem_in1 - elem_start); | |
800 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | |
801 } | |
802 #endif | |
803 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. | |
804 __ lea(ecx, Operand(ecx, ecx, times_2, 0)); | |
805 __ lea(ecx, Operand(eax, ecx, times_4, 0)); | |
806 // Check if cache matches: Double value is stored in uint32_t[2] array. | |
807 Label cache_miss; | |
808 __ cmp(ebx, Operand(ecx, 0)); | |
809 __ j(not_equal, &cache_miss, Label::kNear); | |
810 __ cmp(edx, Operand(ecx, kIntSize)); | |
811 __ j(not_equal, &cache_miss, Label::kNear); | |
812 // Cache hit! | |
813 Counters* counters = masm->isolate()->counters(); | |
814 __ IncrementCounter(counters->transcendental_cache_hit(), 1); | |
815 __ mov(eax, Operand(ecx, 2 * kIntSize)); | |
816 if (tagged) { | |
817 __ fstp(0); | |
818 __ ret(kPointerSize); | |
819 } else { // UNTAGGED. | |
820 CpuFeatureScope scope(masm, SSE2); | |
821 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
822 __ Ret(); | |
823 } | |
824 | |
825 __ bind(&cache_miss); | |
826 __ IncrementCounter(counters->transcendental_cache_miss(), 1); | |
827 // Update cache with new value. | |
828 // We are short on registers, so use no_reg as scratch. | |
829 // This gives slightly larger code. | |
830 if (tagged) { | |
831 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); | |
832 } else { // UNTAGGED. | |
833 CpuFeatureScope scope(masm, SSE2); | |
834 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | |
835 __ sub(esp, Immediate(kDoubleSize)); | |
836 __ movsd(Operand(esp, 0), xmm1); | |
837 __ fld_d(Operand(esp, 0)); | |
838 __ add(esp, Immediate(kDoubleSize)); | |
839 } | |
840 GenerateOperation(masm, type_); | |
841 __ mov(Operand(ecx, 0), ebx); | |
842 __ mov(Operand(ecx, kIntSize), edx); | |
843 __ mov(Operand(ecx, 2 * kIntSize), eax); | |
844 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); | |
845 if (tagged) { | |
846 __ ret(kPointerSize); | |
847 } else { // UNTAGGED. | |
848 CpuFeatureScope scope(masm, SSE2); | |
849 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
850 __ Ret(); | |
851 | |
852 // Skip cache and return answer directly, only in untagged case. | |
853 __ bind(&skip_cache); | |
854 __ sub(esp, Immediate(kDoubleSize)); | |
855 __ movsd(Operand(esp, 0), xmm1); | |
856 __ fld_d(Operand(esp, 0)); | |
857 GenerateOperation(masm, type_); | |
858 __ fstp_d(Operand(esp, 0)); | |
859 __ movsd(xmm1, Operand(esp, 0)); | |
860 __ add(esp, Immediate(kDoubleSize)); | |
861 // We return the value in xmm1 without adding it to the cache, but | |
862 // we cause a scavenging GC so that future allocations will succeed. | |
863 { | |
864 FrameScope scope(masm, StackFrame::INTERNAL); | |
865 // Allocate an unused object bigger than a HeapNumber. | |
866 __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); | |
867 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | |
868 } | |
869 __ Ret(); | |
870 } | |
871 | |
872 // Call runtime, doing whatever allocation and cleanup is necessary. | |
873 if (tagged) { | |
874 __ bind(&runtime_call_clear_stack); | |
875 __ fstp(0); | |
876 __ bind(&runtime_call); | |
877 ExternalReference runtime = | |
878 ExternalReference(RuntimeFunction(), masm->isolate()); | |
879 __ TailCallExternalReference(runtime, 1, 1); | |
880 } else { // UNTAGGED. | |
881 CpuFeatureScope scope(masm, SSE2); | |
882 __ bind(&runtime_call_clear_stack); | |
883 __ bind(&runtime_call); | |
884 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); | |
885 __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); | |
886 { | |
887 FrameScope scope(masm, StackFrame::INTERNAL); | |
888 __ push(eax); | |
889 __ CallRuntime(RuntimeFunction(), 1); | |
890 } | |
891 __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); | |
892 __ Ret(); | |
893 } | |
894 } | |
895 | |
896 | |
897 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | |
898 switch (type_) { | |
899 case TranscendentalCache::LOG: return Runtime::kMath_log; | |
900 default: | |
901 UNIMPLEMENTED(); | |
902 return Runtime::kAbort; | |
903 } | |
904 } | |
905 | |
906 | |
907 void TranscendentalCacheStub::GenerateOperation( | |
908 MacroAssembler* masm, TranscendentalCache::Type type) { | |
909 // Only free register is edi. | |
910 // Input value is on FP stack, and also in ebx/edx. | |
911 // Input value is possibly in xmm1. | |
912 // Address of result (a newly allocated HeapNumber) may be in eax. | |
913 ASSERT(type == TranscendentalCache::LOG); | |
914 __ fldln2(); | |
915 __ fxch(); | |
916 __ fyl2x(); | |
917 } | |
918 | |
919 | |
920 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, | 698 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, |
921 Register number) { | 699 Register number) { |
922 Label load_smi, done; | 700 Label load_smi, done; |
923 | 701 |
924 __ JumpIfSmi(number, &load_smi, Label::kNear); | 702 __ JumpIfSmi(number, &load_smi, Label::kNear); |
925 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); | 703 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); |
926 __ jmp(&done, Label::kNear); | 704 __ jmp(&done, Label::kNear); |
927 | 705 |
928 __ bind(&load_smi); | 706 __ bind(&load_smi); |
929 __ SmiUntag(number); | 707 __ SmiUntag(number); |
(...skipping 4963 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5893 __ bind(&fast_elements_case); | 5671 __ bind(&fast_elements_case); |
5894 GenerateCase(masm, FAST_ELEMENTS); | 5672 GenerateCase(masm, FAST_ELEMENTS); |
5895 } | 5673 } |
5896 | 5674 |
5897 | 5675 |
5898 #undef __ | 5676 #undef __ |
5899 | 5677 |
5900 } } // namespace v8::internal | 5678 } } // namespace v8::internal |
5901 | 5679 |
5902 #endif // V8_TARGET_ARCH_IA32 | 5680 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |