| OLD | NEW |
| 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2037 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2048 | 2048 |
| 2049 // Backup FPSCR. | 2049 // Backup FPSCR. |
| 2050 __ vmrs(r3); | 2050 __ vmrs(r3); |
| 2051 // Set custom FPCSR: | 2051 // Set custom FPCSR: |
| 2052 // - Set rounding mode to "Round towards Minus Infinity" | 2052 // - Set rounding mode to "Round towards Minus Infinity" |
| 2053 // (ie bits [23:22] = 0b10). | 2053 // (ie bits [23:22] = 0b10). |
| 2054 // - Clear vfp cumulative exception flags (bits [3:0]). | 2054 // - Clear vfp cumulative exception flags (bits [3:0]). |
| 2055 // - Make sure Flush-to-zero mode control bit is unset (bit 22). | 2055 // - Make sure Flush-to-zero mode control bit is unset (bit 22). |
| 2056 __ bic(r9, r3, | 2056 __ bic(r9, r3, |
| 2057 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); | 2057 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); |
| 2058 __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits)); | 2058 __ orr(r9, r9, Operand(kRoundToMinusInf)); |
| 2059 __ vmsr(r9); | 2059 __ vmsr(r9); |
| 2060 | 2060 |
| 2061 // Convert the argument to an integer. | 2061 // Convert the argument to an integer. |
| 2062 __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al); | 2062 __ vcvt_s32_f64(s0, d1, kFPSCRRounding); |
| 2063 | 2063 |
| 2064 // Use vcvt latency to start checking for special cases. | 2064 // Use vcvt latency to start checking for special cases. |
| 2065 // Get the argument exponent and clear the sign bit. | 2065 // Get the argument exponent and clear the sign bit. |
| 2066 __ bic(r6, r5, Operand(HeapNumber::kSignMask)); | 2066 __ bic(r6, r5, Operand(HeapNumber::kSignMask)); |
| 2067 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); | 2067 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); |
| 2068 | 2068 |
| 2069 // Retrieve FPSCR and check for vfp exceptions. | 2069 // Retrieve FPSCR and check for vfp exceptions. |
| 2070 __ vmrs(r9); | 2070 __ vmrs(r9); |
| 2071 __ tst(r9, Operand(kVFPExceptionMask)); | 2071 __ tst(r9, Operand(kVFPExceptionMask)); |
| 2072 __ b(&no_vfp_exception, eq); | 2072 __ b(&no_vfp_exception, eq); |
| (...skipping 1716 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3789 __ sub(r5, value, Operand(kHeapObjectTag)); | 3789 __ sub(r5, value, Operand(kHeapObjectTag)); |
| 3790 __ vldr(d0, r5, HeapNumber::kValueOffset); | 3790 __ vldr(d0, r5, HeapNumber::kValueOffset); |
| 3791 | 3791 |
| 3792 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 3792 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 3793 // NaNs and Infinities have all-one exponents so they sign extend to -1. | 3793 // NaNs and Infinities have all-one exponents so they sign extend to -1. |
| 3794 __ cmp(r6, Operand(-1)); | 3794 __ cmp(r6, Operand(-1)); |
| 3795 __ mov(r5, Operand(0), LeaveCC, eq); | 3795 __ mov(r5, Operand(0), LeaveCC, eq); |
| 3796 | 3796 |
| 3797 // Not infinity or NaN simply convert to int. | 3797 // Not infinity or NaN simply convert to int. |
| 3798 if (IsElementTypeSigned(array_type)) { | 3798 if (IsElementTypeSigned(array_type)) { |
| 3799 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); | 3799 __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne); |
| 3800 } else { | 3800 } else { |
| 3801 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); | 3801 __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne); |
| 3802 } | 3802 } |
| 3803 __ vmov(r5, s0, ne); | 3803 __ vmov(r5, s0, ne); |
| 3804 | 3804 |
| 3805 switch (array_type) { | 3805 switch (array_type) { |
| 3806 case kExternalByteArray: | 3806 case kExternalByteArray: |
| 3807 case kExternalUnsignedByteArray: | 3807 case kExternalUnsignedByteArray: |
| 3808 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | 3808 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 3809 break; | 3809 break; |
| 3810 case kExternalShortArray: | 3810 case kExternalShortArray: |
| 3811 case kExternalUnsignedShortArray: | 3811 case kExternalUnsignedShortArray: |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3966 | 3966 |
| 3967 return GetCode(flags); | 3967 return GetCode(flags); |
| 3968 } | 3968 } |
| 3969 | 3969 |
| 3970 | 3970 |
| 3971 #undef __ | 3971 #undef __ |
| 3972 | 3972 |
| 3973 } } // namespace v8::internal | 3973 } } // namespace v8::internal |
| 3974 | 3974 |
| 3975 #endif // V8_TARGET_ARCH_ARM | 3975 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |