OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
(...skipping 665 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
676 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { | 676 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { |
677 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | 677 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); |
678 UseScratchRegisterScope temps(masm); | 678 UseScratchRegisterScope temps(masm); |
679 Register return_address = temps.AcquireX(); | 679 Register return_address = temps.AcquireX(); |
680 // Preserve the return address (lr will be clobbered by the pop). | 680 // Preserve the return address (lr will be clobbered by the pop). |
681 __ Mov(return_address, lr); | 681 __ Mov(return_address, lr); |
682 __ PopSafepointRegisters(); | 682 __ PopSafepointRegisters(); |
683 __ Ret(return_address); | 683 __ Ret(return_address); |
684 } | 684 } |
685 | 685 |
686 | |
687 void MathPowStub::Generate(MacroAssembler* masm) { | 686 void MathPowStub::Generate(MacroAssembler* masm) { |
688 // Stack on entry: | 687 // Stack on entry: |
689 // jssp[0]: Exponent (as a tagged value). | 688 // jssp[0]: Exponent (as a tagged value). |
690 // jssp[1]: Base (as a tagged value). | 689 // jssp[1]: Base (as a tagged value). |
691 // | 690 // |
692 // The (tagged) result will be returned in x0, as a heap number. | 691 // The (tagged) result will be returned in x0, as a heap number. |
693 | 692 |
694 Register result_tagged = x0; | 693 Register result_tagged = x0; |
695 Register base_tagged = x10; | |
696 Register exponent_tagged = MathPowTaggedDescriptor::exponent(); | 694 Register exponent_tagged = MathPowTaggedDescriptor::exponent(); |
697 DCHECK(exponent_tagged.is(x11)); | 695 DCHECK(exponent_tagged.is(x11)); |
698 Register exponent_integer = MathPowIntegerDescriptor::exponent(); | 696 Register exponent_integer = MathPowIntegerDescriptor::exponent(); |
699 DCHECK(exponent_integer.is(x12)); | 697 DCHECK(exponent_integer.is(x12)); |
700 Register scratch1 = x14; | |
701 Register scratch0 = x15; | |
702 Register saved_lr = x19; | 698 Register saved_lr = x19; |
703 FPRegister result_double = d0; | 699 FPRegister result_double = d0; |
704 FPRegister base_double = d0; | 700 FPRegister base_double = d0; |
705 FPRegister exponent_double = d1; | 701 FPRegister exponent_double = d1; |
706 FPRegister base_double_copy = d2; | 702 FPRegister base_double_copy = d2; |
707 FPRegister scratch1_double = d6; | 703 FPRegister scratch1_double = d6; |
708 FPRegister scratch0_double = d7; | 704 FPRegister scratch0_double = d7; |
709 | 705 |
710 // A fast-path for integer exponents. | 706 // A fast-path for integer exponents. |
711 Label exponent_is_smi, exponent_is_integer; | 707 Label exponent_is_smi, exponent_is_integer; |
712 // Bail out to runtime. | |
713 Label call_runtime; | |
714 // Allocate a heap number for the result, and return it. | 708 // Allocate a heap number for the result, and return it. |
715 Label done; | 709 Label done; |
716 | 710 |
717 // Unpack the inputs. | 711 // Unpack the inputs. |
718 if (exponent_type() == ON_STACK) { | 712 if (exponent_type() == TAGGED) { |
719 Label base_is_smi; | |
720 Label unpack_exponent; | |
721 | |
722 __ Pop(exponent_tagged, base_tagged); | |
723 | |
724 __ JumpIfSmi(base_tagged, &base_is_smi); | |
725 __ JumpIfNotHeapNumber(base_tagged, &call_runtime); | |
726 // base_tagged is a heap number, so load its double value. | |
727 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset)); | |
728 __ B(&unpack_exponent); | |
729 __ Bind(&base_is_smi); | |
730 // base_tagged is a SMI, so untag it and convert it to a double. | |
731 __ SmiUntagToDouble(base_double, base_tagged); | |
732 | |
733 __ Bind(&unpack_exponent); | |
734 // x10 base_tagged The tagged base (input). | |
735 // x11 exponent_tagged The tagged exponent (input). | |
736 // d1 base_double The base as a double. | |
737 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | |
738 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime); | |
739 // exponent_tagged is a heap number, so load its double value. | |
740 __ Ldr(exponent_double, | |
741 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | |
742 } else if (exponent_type() == TAGGED) { | |
743 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | 713 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); |
744 __ Ldr(exponent_double, | 714 __ Ldr(exponent_double, |
745 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | 715 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); |
746 } | 716 } |
747 | 717 |
748 // Handle double (heap number) exponents. | 718 // Handle double (heap number) exponents. |
749 if (exponent_type() != INTEGER) { | 719 if (exponent_type() != INTEGER) { |
750 // Detect integer exponents stored as doubles and handle those in the | 720 // Detect integer exponents stored as doubles and handle those in the |
751 // integer fast-path. | 721 // integer fast-path. |
752 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double, | 722 __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double, |
753 scratch0_double, &exponent_is_integer); | 723 scratch0_double, &exponent_is_integer); |
754 | 724 |
755 if (exponent_type() == ON_STACK) { | |
756 FPRegister half_double = d3; | |
757 FPRegister minus_half_double = d4; | |
758 // Detect square root case. Crankshaft detects constant +/-0.5 at compile | |
759 // time and uses DoMathPowHalf instead. We then skip this check for | |
760 // non-constant cases of +/-0.5 as these hardly occur. | |
761 | |
762 __ Fmov(minus_half_double, -0.5); | |
763 __ Fmov(half_double, 0.5); | |
764 __ Fcmp(minus_half_double, exponent_double); | |
765 __ Fccmp(half_double, exponent_double, NZFlag, ne); | |
766 // Condition flags at this point: | |
767 // 0.5; nZCv // Identified by eq && pl | |
768 // -0.5: NZcv // Identified by eq && mi | |
769 // other: ?z?? // Identified by ne | |
770 __ B(ne, &call_runtime); | |
771 | |
772 // The exponent is 0.5 or -0.5. | |
773 | |
774 // Given that exponent is known to be either 0.5 or -0.5, the following | |
775 // special cases could apply (according to ECMA-262 15.8.2.13): | |
776 // | |
777 // base.isNaN(): The result is NaN. | |
778 // (base == +INFINITY) || (base == -INFINITY) | |
779 // exponent == 0.5: The result is +INFINITY. | |
780 // exponent == -0.5: The result is +0. | |
781 // (base == +0) || (base == -0) | |
782 // exponent == 0.5: The result is +0. | |
783 // exponent == -0.5: The result is +INFINITY. | |
784 // (base < 0) && base.isFinite(): The result is NaN. | |
785 // | |
786 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except | |
787 // where base is -INFINITY or -0. | |
788 | |
789 // Add +0 to base. This has no effect other than turning -0 into +0. | |
790 __ Fadd(base_double, base_double, fp_zero); | |
791 // The operation -0+0 results in +0 in all cases except where the | |
792 // FPCR rounding mode is 'round towards minus infinity' (RM). The | |
793 // ARM64 simulator does not currently simulate FPCR (where the rounding | |
794 // mode is set), so test the operation with some debug code. | |
795 if (masm->emit_debug_code()) { | |
796 UseScratchRegisterScope temps(masm); | |
797 Register temp = temps.AcquireX(); | |
798 __ Fneg(scratch0_double, fp_zero); | |
799 // Verify that we correctly generated +0.0 and -0.0. | |
800 // bits(+0.0) = 0x0000000000000000 | |
801 // bits(-0.0) = 0x8000000000000000 | |
802 __ Fmov(temp, fp_zero); | |
803 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); | |
804 __ Fmov(temp, scratch0_double); | |
805 __ Eor(temp, temp, kDSignMask); | |
806 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); | |
807 // Check that -0.0 + 0.0 == +0.0. | |
808 __ Fadd(scratch0_double, scratch0_double, fp_zero); | |
809 __ Fmov(temp, scratch0_double); | |
810 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); | |
811 } | |
812 | |
813 // If base is -INFINITY, make it +INFINITY. | |
814 // * Calculate base - base: All infinities will become NaNs since both | |
815 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64. | |
816 // * If the result is NaN, calculate abs(base). | |
817 __ Fsub(scratch0_double, base_double, base_double); | |
818 __ Fcmp(scratch0_double, 0.0); | |
819 __ Fabs(scratch1_double, base_double); | |
820 __ Fcsel(base_double, scratch1_double, base_double, vs); | |
821 | |
822 // Calculate the square root of base. | |
823 __ Fsqrt(result_double, base_double); | |
824 __ Fcmp(exponent_double, 0.0); | |
825 __ B(ge, &done); // Finish now for exponents of 0.5. | |
826 // Find the inverse for exponents of -0.5. | |
827 __ Fmov(scratch0_double, 1.0); | |
828 __ Fdiv(result_double, scratch0_double, result_double); | |
829 __ B(&done); | |
830 } | |
831 | |
832 { | 725 { |
833 AllowExternalCallThatCantCauseGC scope(masm); | 726 AllowExternalCallThatCantCauseGC scope(masm); |
834 __ Mov(saved_lr, lr); | 727 __ Mov(saved_lr, lr); |
835 __ CallCFunction( | 728 __ CallCFunction( |
836 ExternalReference::power_double_double_function(isolate()), | 729 ExternalReference::power_double_double_function(isolate()), 0, 2); |
837 0, 2); | |
838 __ Mov(lr, saved_lr); | 730 __ Mov(lr, saved_lr); |
839 __ B(&done); | 731 __ B(&done); |
840 } | 732 } |
841 | 733 |
842 // Handle SMI exponents. | 734 // Handle SMI exponents. |
843 __ Bind(&exponent_is_smi); | 735 __ Bind(&exponent_is_smi); |
844 // x10 base_tagged The tagged base (input). | 736 // x10 base_tagged The tagged base (input). |
845 // x11 exponent_tagged The tagged exponent (input). | 737 // x11 exponent_tagged The tagged exponent (input). |
846 // d1 base_double The base as a double. | 738 // d1 base_double The base as a double. |
847 __ SmiUntag(exponent_integer, exponent_tagged); | 739 __ SmiUntag(exponent_integer, exponent_tagged); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
896 __ Fdiv(result_double, scratch0_double, result_double); | 788 __ Fdiv(result_double, scratch0_double, result_double); |
897 // ECMA-262 only requires Math.pow to return an 'implementation-dependent | 789 // ECMA-262 only requires Math.pow to return an 'implementation-dependent |
898 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow | 790 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow |
899 // to calculate the subnormal value 2^-1074. This method of calculating | 791 // to calculate the subnormal value 2^-1074. This method of calculating |
900 // negative powers doesn't work because 2^1074 overflows to infinity. To | 792 // negative powers doesn't work because 2^1074 overflows to infinity. To |
901 // catch this corner-case, we bail out if the result was 0. (This can only | 793 // catch this corner-case, we bail out if the result was 0. (This can only |
902 // occur if the divisor is infinity or the base is zero.) | 794 // occur if the divisor is infinity or the base is zero.) |
903 __ Fcmp(result_double, 0.0); | 795 __ Fcmp(result_double, 0.0); |
904 __ B(&done, ne); | 796 __ B(&done, ne); |
905 | 797 |
906 if (exponent_type() == ON_STACK) { | 798 AllowExternalCallThatCantCauseGC scope(masm); |
907 // Bail out to runtime code. | 799 __ Mov(saved_lr, lr); |
908 __ Bind(&call_runtime); | 800 __ Fmov(base_double, base_double_copy); |
909 // Put the arguments back on the stack. | 801 __ Scvtf(exponent_double, exponent_integer); |
910 __ Push(base_tagged, exponent_tagged); | 802 __ CallCFunction(ExternalReference::power_double_double_function(isolate()), |
911 __ TailCallRuntime(Runtime::kMathPowRT); | 803 0, 2); |
912 | 804 __ Mov(lr, saved_lr); |
913 // Return. | 805 __ Bind(&done); |
914 __ Bind(&done); | 806 __ Ret(); |
915 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1, | |
916 result_double); | |
917 DCHECK(result_tagged.is(x0)); | |
918 __ Ret(); | |
919 } else { | |
920 AllowExternalCallThatCantCauseGC scope(masm); | |
921 __ Mov(saved_lr, lr); | |
922 __ Fmov(base_double, base_double_copy); | |
923 __ Scvtf(exponent_double, exponent_integer); | |
924 __ CallCFunction( | |
925 ExternalReference::power_double_double_function(isolate()), | |
926 0, 2); | |
927 __ Mov(lr, saved_lr); | |
928 __ Bind(&done); | |
929 __ Ret(); | |
930 } | |
931 } | 807 } |
932 | 808 |
933 | |
934 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 809 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
935 // It is important that the following stubs are generated in this order | 810 // It is important that the following stubs are generated in this order |
936 // because pregenerated stubs can only call other pregenerated stubs. | 811 // because pregenerated stubs can only call other pregenerated stubs. |
937 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses | 812 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses |
938 // CEntryStub. | 813 // CEntryStub. |
939 CEntryStub::GenerateAheadOfTime(isolate); | 814 CEntryStub::GenerateAheadOfTime(isolate); |
940 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 815 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
941 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 816 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
942 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); | 817 CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate); |
943 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 818 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
(...skipping 4776 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5720 kStackUnwindSpace, NULL, spill_offset, | 5595 kStackUnwindSpace, NULL, spill_offset, |
5721 return_value_operand, NULL); | 5596 return_value_operand, NULL); |
5722 } | 5597 } |
5723 | 5598 |
5724 #undef __ | 5599 #undef __ |
5725 | 5600 |
5726 } // namespace internal | 5601 } // namespace internal |
5727 } // namespace v8 | 5602 } // namespace v8 |
5728 | 5603 |
5729 #endif // V8_TARGET_ARCH_ARM64 | 5604 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |