| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 3828 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3839 // the value can be represented in a Smi. If not, we need to convert | 3839 // the value can be represented in a Smi. If not, we need to convert |
| 3840 // it to a HeapNumber. | 3840 // it to a HeapNumber. |
| 3841 Label box_int; | 3841 Label box_int; |
| 3842 __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result. | 3842 __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result. |
| 3843 __ Branch(&box_int, lt, t3, Operand(zero_reg)); | 3843 __ Branch(&box_int, lt, t3, Operand(zero_reg)); |
| 3844 // Tag integer as smi and return it. | 3844 // Tag integer as smi and return it. |
| 3845 __ sll(v0, value, kSmiTagSize); | 3845 __ sll(v0, value, kSmiTagSize); |
| 3846 __ Ret(); | 3846 __ Ret(); |
| 3847 | 3847 |
| 3848 __ bind(&box_int); | 3848 __ bind(&box_int); |
| 3849 // Allocate a HeapNumber for the result and perform int-to-double | |
| 3850 // conversion. | |
| 3851 // The arm version uses a temporary here to save r0, but we don't need to | |
| 3852 // (a0 is not modified). | |
| 3853 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); | |
| 3854 __ AllocateHeapNumber(v0, a3, t0, t1, &slow); | |
| 3855 | 3849 |
| 3856 if (CpuFeatures::IsSupported(FPU)) { | 3850 if (CpuFeatures::IsSupported(FPU)) { |
| 3857 CpuFeatures::Scope scope(FPU); | 3851 CpuFeatures::Scope scope(FPU); |
| 3852 // Allocate a HeapNumber for the result and perform int-to-double |
| 3853 // conversion. |
| 3854 // The arm version uses a temporary here to save r0, but we don't need to |
| 3855 // (a0 is not modified). |
| 3856 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
| 3857 __ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT); |
| 3858 __ mtc1(value, f0); | 3858 __ mtc1(value, f0); |
| 3859 __ cvt_d_w(f0, f0); | 3859 __ cvt_d_w(f0, f0); |
| 3860 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 3860 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset)); |
| 3861 __ Addu(v0, v0, kHeapObjectTag); |
| 3861 __ Ret(); | 3862 __ Ret(); |
| 3862 } else { | 3863 } else { |
| 3864 // Allocate a HeapNumber for the result and perform int-to-double |
| 3865 // conversion. |
| 3866 // The arm version uses a temporary here to save r0, but we don't need to |
| 3867 // (a0 is not modified). |
| 3868 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); |
| 3869 __ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT); |
| 3863 Register dst1 = t2; | 3870 Register dst1 = t2; |
| 3864 Register dst2 = t3; | 3871 Register dst2 = t3; |
| 3865 FloatingPointHelper::Destination dest = | 3872 FloatingPointHelper::Destination dest = |
| 3866 FloatingPointHelper::kCoreRegisters; | 3873 FloatingPointHelper::kCoreRegisters; |
| 3867 FloatingPointHelper::ConvertIntToDouble(masm, | 3874 FloatingPointHelper::ConvertIntToDouble(masm, |
| 3868 value, | 3875 value, |
| 3869 dest, | 3876 dest, |
| 3870 f0, | 3877 f0, |
| 3871 dst1, | 3878 dst1, |
| 3872 dst2, | 3879 dst2, |
| (...skipping 16 matching lines...) Expand all Loading... |
| 3889 // It can fit in an Smi. | 3896 // It can fit in an Smi. |
| 3890 // Tag integer as smi and return it. | 3897 // Tag integer as smi and return it. |
| 3891 __ sll(v0, value, kSmiTagSize); | 3898 __ sll(v0, value, kSmiTagSize); |
| 3892 __ Ret(); | 3899 __ Ret(); |
| 3893 | 3900 |
| 3894 __ bind(&pl_box_int); | 3901 __ bind(&pl_box_int); |
| 3895 // Allocate a HeapNumber for the result and perform int-to-double | 3902 // Allocate a HeapNumber for the result and perform int-to-double |
| 3896 // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all | 3903 // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all |
| 3897 // registers - also when jumping due to exhausted young space. | 3904 // registers - also when jumping due to exhausted young space. |
| 3898 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 3905 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 3899 __ AllocateHeapNumber(v0, t2, t3, t6, &slow); | 3906 __ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT); |
| 3900 | 3907 |
| 3901 // This is replaced by a macro: | 3908 // This is replaced by a macro: |
| 3902 // __ mtc1(value, f0); // LS 32-bits. | 3909 // __ mtc1(value, f0); // LS 32-bits. |
| 3903 // __ mtc1(zero_reg, f1); // MS 32-bits are all zero. | 3910 // __ mtc1(zero_reg, f1); // MS 32-bits are all zero. |
| 3904 // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit. | 3911 // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit. |
| 3905 | 3912 |
| 3906 __ Cvt_d_uw(f0, value, f22); | 3913 __ Cvt_d_uw(f0, value, f22); |
| 3907 | 3914 |
| 3908 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 3915 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset)); |
| 3909 | 3916 |
| 3917 __ Addu(v0, v0, kHeapObjectTag); |
| 3910 __ Ret(); | 3918 __ Ret(); |
| 3911 } else { | 3919 } else { |
| 3912 // Check whether unsigned integer fits into smi. | 3920 // Check whether unsigned integer fits into smi. |
| 3913 Label box_int_0, box_int_1, done; | 3921 Label box_int_0, box_int_1, done; |
| 3914 __ And(t2, value, Operand(0x80000000)); | 3922 __ And(t2, value, Operand(0x80000000)); |
| 3915 __ Branch(&box_int_0, ne, t2, Operand(zero_reg)); | 3923 __ Branch(&box_int_0, ne, t2, Operand(zero_reg)); |
| 3916 __ And(t2, value, Operand(0x40000000)); | 3924 __ And(t2, value, Operand(0x40000000)); |
| 3917 __ Branch(&box_int_1, ne, t2, Operand(zero_reg)); | 3925 __ Branch(&box_int_1, ne, t2, Operand(zero_reg)); |
| 3918 | 3926 |
| 3919 // Tag integer as smi and return it. | 3927 // Tag integer as smi and return it. |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3932 // Integer has one leading zero. | 3940 // Integer has one leading zero. |
| 3933 GenerateUInt2Double(masm, hiword, loword, t0, 1); | 3941 GenerateUInt2Double(masm, hiword, loword, t0, 1); |
| 3934 | 3942 |
| 3935 | 3943 |
| 3936 __ bind(&done); | 3944 __ bind(&done); |
| 3937 // Integer was converted to double in registers hiword:loword. | 3945 // Integer was converted to double in registers hiword:loword. |
| 3938 // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber | 3946 // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber |
| 3939 // clobbers all registers - also when jumping due to exhausted young | 3947 // clobbers all registers - also when jumping due to exhausted young |
| 3940 // space. | 3948 // space. |
| 3941 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 3949 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 3942 __ AllocateHeapNumber(t2, t3, t5, t6, &slow); | 3950 __ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT); |
| 3943 | 3951 |
| 3944 __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset)); | 3952 __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset)); |
| 3945 __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset)); | 3953 __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset)); |
| 3946 | 3954 |
| 3947 __ mov(v0, t2); | 3955 __ mov(v0, t2); |
| 3948 __ Ret(); | 3956 __ Ret(); |
| 3949 } | 3957 } |
| 3950 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3958 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 3951 // For the floating-point array type, we need to always allocate a | 3959 // For the floating-point array type, we need to always allocate a |
| 3952 // HeapNumber. | 3960 // HeapNumber. |
| 3953 if (CpuFeatures::IsSupported(FPU)) { | 3961 if (CpuFeatures::IsSupported(FPU)) { |
| 3954 CpuFeatures::Scope scope(FPU); | 3962 CpuFeatures::Scope scope(FPU); |
| 3955 // Allocate a HeapNumber for the result. Don't use a0 and a1 as | 3963 // Allocate a HeapNumber for the result. Don't use a0 and a1 as |
| 3956 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3964 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3957 // exhausted young space. | 3965 // exhausted young space. |
| 3958 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 3966 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 3959 __ AllocateHeapNumber(v0, t3, t5, t6, &slow); | 3967 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT); |
| 3960 // The float (single) value is already in fpu reg f0 (if we use float). | 3968 // The float (single) value is already in fpu reg f0 (if we use float). |
| 3961 __ cvt_d_s(f0, f0); | 3969 __ cvt_d_s(f0, f0); |
| 3962 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 3970 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset)); |
| 3971 |
| 3972 __ Addu(v0, v0, kHeapObjectTag); |
| 3963 __ Ret(); | 3973 __ Ret(); |
| 3964 } else { | 3974 } else { |
| 3965 // Allocate a HeapNumber for the result. Don't use a0 and a1 as | 3975 // Allocate a HeapNumber for the result. Don't use a0 and a1 as |
| 3966 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3976 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3967 // exhausted young space. | 3977 // exhausted young space. |
| 3968 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 3978 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 3969 __ AllocateHeapNumber(v0, t3, t5, t6, &slow); | 3979 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT); |
| 3970 // FPU is not available, do manual single to double conversion. | 3980 // FPU is not available, do manual single to double conversion. |
| 3971 | 3981 |
| 3972 // a2: floating point value (binary32). | 3982 // a2: floating point value (binary32). |
| 3973 // v0: heap number for result | 3983 // v0: heap number for result |
| 3974 | 3984 |
| 3975 // Extract mantissa to t4. | 3985 // Extract mantissa to t4. |
| 3976 __ And(t4, value, Operand(kBinary32MantissaMask)); | 3986 __ And(t4, value, Operand(kBinary32MantissaMask)); |
| 3977 | 3987 |
| 3978 // Extract exponent to t5. | 3988 // Extract exponent to t5. |
| 3979 __ srl(t5, value, kBinary32MantissaBits); | 3989 __ srl(t5, value, kBinary32MantissaBits); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4014 __ Ret(); | 4024 __ Ret(); |
| 4015 } | 4025 } |
| 4016 | 4026 |
| 4017 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 4027 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 4018 if (CpuFeatures::IsSupported(FPU)) { | 4028 if (CpuFeatures::IsSupported(FPU)) { |
| 4019 CpuFeatures::Scope scope(FPU); | 4029 CpuFeatures::Scope scope(FPU); |
| 4020 // Allocate a HeapNumber for the result. Don't use a0 and a1 as | 4030 // Allocate a HeapNumber for the result. Don't use a0 and a1 as |
| 4021 // AllocateHeapNumber clobbers all registers - also when jumping due to | 4031 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 4022 // exhausted young space. | 4032 // exhausted young space. |
| 4023 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 4033 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 4024 __ AllocateHeapNumber(v0, t3, t5, t6, &slow); | 4034 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT); |
| 4025 // The double value is already in f0 | 4035 // The double value is already in f0 |
| 4026 __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 4036 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset)); |
| 4037 |
| 4038 __ Addu(v0, v0, kHeapObjectTag); |
| 4027 __ Ret(); | 4039 __ Ret(); |
| 4028 } else { | 4040 } else { |
| 4029 // Allocate a HeapNumber for the result. Don't use a0 and a1 as | 4041 // Allocate a HeapNumber for the result. Don't use a0 and a1 as |
| 4030 // AllocateHeapNumber clobbers all registers - also when jumping due to | 4042 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 4031 // exhausted young space. | 4043 // exhausted young space. |
| 4032 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); | 4044 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
| 4033 __ AllocateHeapNumber(v0, t3, t5, t6, &slow); | 4045 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT); |
| 4034 | 4046 |
| 4035 __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); | 4047 __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); |
| 4036 __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset)); | 4048 __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset)); |
| 4037 __ Ret(); | 4049 __ Ret(); |
| 4038 } | 4050 } |
| 4039 | 4051 |
| 4040 } else { | 4052 } else { |
| 4041 // Tag integer as smi and return it. | 4053 // Tag integer as smi and return it. |
| 4042 __ sll(v0, value, kSmiTagSize); | 4054 __ sll(v0, value, kSmiTagSize); |
| 4043 __ Ret(); | 4055 __ Ret(); |
| (...skipping 497 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4541 // Load the upper word of the double in the fixed array and test for NaN. | 4553 // Load the upper word of the double in the fixed array and test for NaN. |
| 4542 __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); | 4554 __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
| 4543 __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); | 4555 __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); |
| 4544 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); | 4556 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 4545 __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); | 4557 __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
| 4546 __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); | 4558 __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); |
| 4547 | 4559 |
| 4548 // Non-NaN. Allocate a new heap number and copy the double value into it. | 4560 // Non-NaN. Allocate a new heap number and copy the double value into it. |
| 4549 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 4561 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 4550 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, | 4562 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
| 4551 heap_number_map, &slow_allocate_heapnumber); | 4563 heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); |
| 4552 | 4564 |
| 4553 // Don't need to reload the upper 32 bits of the double, it's already in | 4565 // Don't need to reload the upper 32 bits of the double, it's already in |
| 4554 // scratch. | 4566 // scratch. |
| 4555 __ sw(scratch, FieldMemOperand(heap_number_reg, | 4567 __ sw(scratch, FieldMemOperand(heap_number_reg, |
| 4556 HeapNumber::kExponentOffset)); | 4568 HeapNumber::kExponentOffset)); |
| 4557 __ lw(scratch, FieldMemOperand(indexed_double_offset, | 4569 __ lw(scratch, FieldMemOperand(indexed_double_offset, |
| 4558 FixedArray::kHeaderSize)); | 4570 FixedArray::kHeaderSize)); |
| 4559 __ sw(scratch, FieldMemOperand(heap_number_reg, | 4571 __ sw(scratch, FieldMemOperand(heap_number_reg, |
| 4560 HeapNumber::kMantissaOffset)); | 4572 HeapNumber::kMantissaOffset)); |
| 4561 | 4573 |
| (...skipping 319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4881 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4893 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
| 4882 } | 4894 } |
| 4883 } | 4895 } |
| 4884 | 4896 |
| 4885 | 4897 |
| 4886 #undef __ | 4898 #undef __ |
| 4887 | 4899 |
| 4888 } } // namespace v8::internal | 4900 } } // namespace v8::internal |
| 4889 | 4901 |
| 4890 #endif // V8_TARGET_ARCH_MIPS | 4902 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |