| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_PPC |
| 6 | 6 |
| 7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
| 8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
| (...skipping 716 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 727 } | 727 } |
| 728 | 728 |
| 729 | 729 |
| 730 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { | 730 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { |
| 731 __ PopSafepointRegisters(); | 731 __ PopSafepointRegisters(); |
| 732 __ blr(); | 732 __ blr(); |
| 733 } | 733 } |
| 734 | 734 |
| 735 | 735 |
| 736 void MathPowStub::Generate(MacroAssembler* masm) { | 736 void MathPowStub::Generate(MacroAssembler* masm) { |
| 737 const Register base = r4; | |
| 738 const Register exponent = MathPowTaggedDescriptor::exponent(); | 737 const Register exponent = MathPowTaggedDescriptor::exponent(); |
| 739 DCHECK(exponent.is(r5)); | 738 DCHECK(exponent.is(r5)); |
| 740 const Register heapnumbermap = r8; | |
| 741 const Register heapnumber = r3; | |
| 742 const DoubleRegister double_base = d1; | 739 const DoubleRegister double_base = d1; |
| 743 const DoubleRegister double_exponent = d2; | 740 const DoubleRegister double_exponent = d2; |
| 744 const DoubleRegister double_result = d3; | 741 const DoubleRegister double_result = d3; |
| 745 const DoubleRegister double_scratch = d0; | 742 const DoubleRegister double_scratch = d0; |
| 746 const Register scratch = r11; | 743 const Register scratch = r11; |
| 747 const Register scratch2 = r10; | 744 const Register scratch2 = r10; |
| 748 | 745 |
| 749 Label call_runtime, done, int_exponent; | 746 Label call_runtime, done, int_exponent; |
| 750 if (exponent_type() == ON_STACK) { | 747 if (exponent_type() == TAGGED) { |
| 751 Label base_is_smi, unpack_exponent; | |
| 752 // The exponent and base are supplied as arguments on the stack. | |
| 753 // This can only happen if the stub is called from non-optimized code. | |
| 754 // Load input parameters from stack to double registers. | |
| 755 __ LoadP(base, MemOperand(sp, 1 * kPointerSize)); | |
| 756 __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize)); | |
| 757 | |
| 758 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | |
| 759 | |
| 760 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | |
| 761 __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | |
| 762 __ cmp(scratch, heapnumbermap); | |
| 763 __ bne(&call_runtime); | |
| 764 | |
| 765 __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | |
| 766 __ b(&unpack_exponent); | |
| 767 | |
| 768 __ bind(&base_is_smi); | |
| 769 __ ConvertIntToDouble(scratch, double_base); | |
| 770 __ bind(&unpack_exponent); | |
| 771 | |
| 772 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | |
| 773 __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | |
| 774 __ cmp(scratch, heapnumbermap); | |
| 775 __ bne(&call_runtime); | |
| 776 | |
| 777 __ lfd(double_exponent, | |
| 778 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | |
| 779 } else if (exponent_type() == TAGGED) { | |
| 780 // Base is already in double_base. | 748 // Base is already in double_base. |
| 781 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 749 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| 782 | 750 |
| 783 __ lfd(double_exponent, | 751 __ lfd(double_exponent, |
| 784 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 752 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| 785 } | 753 } |
| 786 | 754 |
| 787 if (exponent_type() != INTEGER) { | 755 if (exponent_type() != INTEGER) { |
| 788 // Detect integer exponents stored as double. | 756 // Detect integer exponents stored as double. |
| 789 __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, | 757 __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, |
| 790 double_scratch); | 758 double_scratch); |
| 791 __ beq(&int_exponent); | 759 __ beq(&int_exponent); |
| 792 | 760 |
| 793 if (exponent_type() == ON_STACK) { | |
| 794 // Detect square root case. Crankshaft detects constant +/-0.5 at | |
| 795 // compile time and uses DoMathPowHalf instead. We then skip this check | |
| 796 // for non-constant cases of +/-0.5 as these hardly occur. | |
| 797 Label not_plus_half, not_minus_inf1, not_minus_inf2; | |
| 798 | |
| 799 // Test for 0.5. | |
| 800 __ LoadDoubleLiteral(double_scratch, 0.5, scratch); | |
| 801 __ fcmpu(double_exponent, double_scratch); | |
| 802 __ bne(¬_plus_half); | |
| 803 | |
| 804 // Calculates square root of base. Check for the special case of | |
| 805 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | |
| 806 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | |
| 807 __ fcmpu(double_base, double_scratch); | |
| 808 __ bne(¬_minus_inf1); | |
| 809 __ fneg(double_result, double_scratch); | |
| 810 __ b(&done); | |
| 811 __ bind(¬_minus_inf1); | |
| 812 | |
| 813 // Add +0 to convert -0 to +0. | |
| 814 __ fadd(double_scratch, double_base, kDoubleRegZero); | |
| 815 __ fsqrt(double_result, double_scratch); | |
| 816 __ b(&done); | |
| 817 | |
| 818 __ bind(¬_plus_half); | |
| 819 __ LoadDoubleLiteral(double_scratch, -0.5, scratch); | |
| 820 __ fcmpu(double_exponent, double_scratch); | |
| 821 __ bne(&call_runtime); | |
| 822 | |
| 823 // Calculates square root of base. Check for the special case of | |
| 824 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | |
| 825 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | |
| 826 __ fcmpu(double_base, double_scratch); | |
| 827 __ bne(¬_minus_inf2); | |
| 828 __ fmr(double_result, kDoubleRegZero); | |
| 829 __ b(&done); | |
| 830 __ bind(¬_minus_inf2); | |
| 831 | |
| 832 // Add +0 to convert -0 to +0. | |
| 833 __ fadd(double_scratch, double_base, kDoubleRegZero); | |
| 834 __ LoadDoubleLiteral(double_result, 1.0, scratch); | |
| 835 __ fsqrt(double_scratch, double_scratch); | |
| 836 __ fdiv(double_result, double_result, double_scratch); | |
| 837 __ b(&done); | |
| 838 } | |
| 839 | |
| 840 __ mflr(r0); | 761 __ mflr(r0); |
| 841 __ push(r0); | 762 __ push(r0); |
| 842 { | 763 { |
| 843 AllowExternalCallThatCantCauseGC scope(masm); | 764 AllowExternalCallThatCantCauseGC scope(masm); |
| 844 __ PrepareCallCFunction(0, 2, scratch); | 765 __ PrepareCallCFunction(0, 2, scratch); |
| 845 __ MovToFloatParameters(double_base, double_exponent); | 766 __ MovToFloatParameters(double_base, double_exponent); |
| 846 __ CallCFunction( | 767 __ CallCFunction( |
| 847 ExternalReference::power_double_double_function(isolate()), 0, 2); | 768 ExternalReference::power_double_double_function(isolate()), 0, 2); |
| 848 } | 769 } |
| 849 __ pop(r0); | 770 __ pop(r0); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 898 __ fdiv(double_result, double_scratch, double_result); | 819 __ fdiv(double_result, double_scratch, double_result); |
| 899 // Test whether result is zero. Bail out to check for subnormal result. | 820 // Test whether result is zero. Bail out to check for subnormal result. |
| 900 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | 821 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. |
| 901 __ fcmpu(double_result, kDoubleRegZero); | 822 __ fcmpu(double_result, kDoubleRegZero); |
| 902 __ bne(&done); | 823 __ bne(&done); |
| 903 // double_exponent may not containe the exponent value if the input was a | 824 // double_exponent may not containe the exponent value if the input was a |
| 904 // smi. We set it with exponent value before bailing out. | 825 // smi. We set it with exponent value before bailing out. |
| 905 __ ConvertIntToDouble(exponent, double_exponent); | 826 __ ConvertIntToDouble(exponent, double_exponent); |
| 906 | 827 |
| 907 // Returning or bailing out. | 828 // Returning or bailing out. |
| 908 if (exponent_type() == ON_STACK) { | 829 __ mflr(r0); |
| 909 // The arguments are still on the stack. | 830 __ push(r0); |
| 910 __ bind(&call_runtime); | 831 { |
| 911 __ TailCallRuntime(Runtime::kMathPowRT); | 832 AllowExternalCallThatCantCauseGC scope(masm); |
| 833 __ PrepareCallCFunction(0, 2, scratch); |
| 834 __ MovToFloatParameters(double_base, double_exponent); |
| 835 __ CallCFunction( |
| 836 ExternalReference::power_double_double_function(isolate()), 0, 2); |
| 837 } |
| 838 __ pop(r0); |
| 839 __ mtlr(r0); |
| 840 __ MovFromFloatResult(double_result); |
| 912 | 841 |
| 913 // The stub is called from non-optimized code, which expects the result | 842 __ bind(&done); |
| 914 // as heap number in exponent. | 843 __ Ret(); |
| 915 __ bind(&done); | |
| 916 __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap, | |
| 917 &call_runtime); | |
| 918 __ stfd(double_result, | |
| 919 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | |
| 920 DCHECK(heapnumber.is(r3)); | |
| 921 __ Ret(2); | |
| 922 } else { | |
| 923 __ mflr(r0); | |
| 924 __ push(r0); | |
| 925 { | |
| 926 AllowExternalCallThatCantCauseGC scope(masm); | |
| 927 __ PrepareCallCFunction(0, 2, scratch); | |
| 928 __ MovToFloatParameters(double_base, double_exponent); | |
| 929 __ CallCFunction( | |
| 930 ExternalReference::power_double_double_function(isolate()), 0, 2); | |
| 931 } | |
| 932 __ pop(r0); | |
| 933 __ mtlr(r0); | |
| 934 __ MovFromFloatResult(double_result); | |
| 935 | |
| 936 __ bind(&done); | |
| 937 __ Ret(); | |
| 938 } | |
| 939 } | 844 } |
| 940 | 845 |
| 941 | 846 |
| 942 bool CEntryStub::NeedsImmovableCode() { return true; } | 847 bool CEntryStub::NeedsImmovableCode() { return true; } |
| 943 | 848 |
| 944 | 849 |
| 945 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 850 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 946 CEntryStub::GenerateAheadOfTime(isolate); | 851 CEntryStub::GenerateAheadOfTime(isolate); |
| 947 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 852 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 948 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 853 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| (...skipping 4637 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5586 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); | 5491 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); |
| 5587 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 5492 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, |
| 5588 kStackUnwindSpace, NULL, return_value_operand, NULL); | 5493 kStackUnwindSpace, NULL, return_value_operand, NULL); |
| 5589 } | 5494 } |
| 5590 | 5495 |
| 5591 #undef __ | 5496 #undef __ |
| 5592 } // namespace internal | 5497 } // namespace internal |
| 5593 } // namespace v8 | 5498 } // namespace v8 |
| 5594 | 5499 |
| 5595 #endif // V8_TARGET_ARCH_PPC | 5500 #endif // V8_TARGET_ARCH_PPC |
| OLD | NEW |