OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2014 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2025 | 2025 |
2026 // Retrieve FPSCR. | 2026 // Retrieve FPSCR. |
2027 vmrs(scratch); | 2027 vmrs(scratch); |
2028 // Restore FPSCR. | 2028 // Restore FPSCR. |
2029 vmsr(prev_fpscr); | 2029 vmsr(prev_fpscr); |
2030 // Check for vfp exceptions. | 2030 // Check for vfp exceptions. |
2031 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); | 2031 tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); |
2032 } | 2032 } |
2033 | 2033 |
2034 | 2034 |
| 2035 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, |
| 2036 Register input_high, |
| 2037 Register input_low, |
| 2038 Register scratch) { |
| 2039 Label done, normal_exponent, restore_sign; |
| 2040 |
| 2041 // Extract the biased exponent in result. |
| 2042 Ubfx(result, |
| 2043 input_high, |
| 2044 HeapNumber::kExponentShift, |
| 2045 HeapNumber::kExponentBits); |
| 2046 |
| 2047 // Check for Infinity and NaNs, which should return 0. |
| 2048 cmp(result, Operand(HeapNumber::kExponentMask)); |
| 2049 mov(result, Operand(0), LeaveCC, eq); |
| 2050 b(eq, &done); |
| 2051 |
| 2052 // Express exponent as delta to (number of mantissa bits + 31). |
| 2053 sub(result, |
| 2054 result, |
| 2055 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), |
| 2056 SetCC); |
| 2057 |
| 2058 // If the delta is strictly positive, all bits would be shifted away, |
| 2059 // which means that we can return 0. |
| 2060 b(le, &normal_exponent); |
| 2061 mov(result, Operand(0)); |
| 2062 b(&done); |
| 2063 |
| 2064 bind(&normal_exponent); |
| 2065 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; |
| 2066 // Calculate shift. |
| 2067 add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); |
| 2068 |
| 2069 // Save the sign. |
| 2070 Register sign = result; |
| 2071 result = no_reg; |
| 2072 and_(sign, input_high, Operand(HeapNumber::kSignMask)); |
| 2073 |
| 2074 // Set the implicit 1 before the mantissa part in input_high. |
| 2075 orr(input_high, |
| 2076 input_high, |
| 2077 Operand(1 << HeapNumber::kMantissaBitsInTopWord)); |
| 2078 // Shift the mantissa bits to the correct position. |
| 2079 // We don't need to clear non-mantissa bits as they will be shifted away. |
| 2080 // If they weren't, it would mean that the answer is in the 32bit range. |
| 2081 mov(input_high, Operand(input_high, LSL, scratch)); |
| 2082 |
| 2083 // Replace the shifted bits with bits from the lower mantissa word. |
| 2084 Label pos_shift, shift_done; |
| 2085 rsb(scratch, scratch, Operand(32), SetCC); |
| 2086 b(&pos_shift, ge); |
| 2087 |
| 2088 // Negate scratch. |
| 2089 rsb(scratch, scratch, Operand(0)); |
| 2090 mov(input_low, Operand(input_low, LSL, scratch)); |
| 2091 b(&shift_done); |
| 2092 |
| 2093 bind(&pos_shift); |
| 2094 mov(input_low, Operand(input_low, LSR, scratch)); |
| 2095 |
| 2096 bind(&shift_done); |
| 2097 orr(input_high, input_high, Operand(input_low)); |
| 2098 // Restore sign if necessary. |
| 2099 cmp(sign, Operand(0)); |
| 2100 result = sign; |
| 2101 sign = no_reg; |
| 2102 rsb(result, input_high, Operand(0), LeaveCC, ne); |
| 2103 mov(result, input_high, LeaveCC, eq); |
| 2104 bind(&done); |
| 2105 } |
| 2106 |
| 2107 |
| 2108 void MacroAssembler::EmitECMATruncate(Register result, |
| 2109 DwVfpRegister double_input, |
| 2110 SwVfpRegister single_scratch, |
| 2111 Register scratch, |
| 2112 Register input_high, |
| 2113 Register input_low) { |
| 2114 CpuFeatures::Scope scope(VFP3); |
| 2115 ASSERT(!input_high.is(result)); |
| 2116 ASSERT(!input_low.is(result)); |
| 2117 ASSERT(!input_low.is(input_high)); |
| 2118 ASSERT(!scratch.is(result) && |
| 2119 !scratch.is(input_high) && |
| 2120 !scratch.is(input_low)); |
| 2121 ASSERT(!single_scratch.is(double_input.low()) && |
| 2122 !single_scratch.is(double_input.high())); |
| 2123 |
| 2124 Label done; |
| 2125 |
| 2126 // Clear cumulative exception flags. |
| 2127 ClearFPSCRBits(kVFPExceptionMask, scratch); |
| 2128 // Try a conversion to a signed integer. |
| 2129 vcvt_s32_f64(single_scratch, double_input); |
| 2130 vmov(result, single_scratch); |
| 2131 // Retrieve he FPSCR. |
| 2132 vmrs(scratch); |
| 2133 // Check for overflow and NaNs. |
| 2134 tst(scratch, Operand(kVFPOverflowExceptionBit | |
| 2135 kVFPUnderflowExceptionBit | |
| 2136 kVFPInvalidOpExceptionBit)); |
| 2137 // If we had no exceptions we are done. |
| 2138 b(eq, &done); |
| 2139 |
| 2140 // Load the double value and perform a manual truncation. |
| 2141 vmov(input_low, input_high, double_input); |
| 2142 EmitOutOfInt32RangeTruncate(result, |
| 2143 input_high, |
| 2144 input_low, |
| 2145 scratch); |
| 2146 bind(&done); |
| 2147 } |
| 2148 |
| 2149 |
2035 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 2150 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
2036 Register src, | 2151 Register src, |
2037 int num_least_bits) { | 2152 int num_least_bits) { |
2038 if (CpuFeatures::IsSupported(ARMv7)) { | 2153 if (CpuFeatures::IsSupported(ARMv7)) { |
2039 ubfx(dst, src, kSmiTagSize, num_least_bits); | 2154 ubfx(dst, src, kSmiTagSize, num_least_bits); |
2040 } else { | 2155 } else { |
2041 mov(dst, Operand(src, ASR, kSmiTagSize)); | 2156 mov(dst, Operand(src, ASR, kSmiTagSize)); |
2042 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 2157 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
2043 } | 2158 } |
2044 } | 2159 } |
(...skipping 726 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2771 void CodePatcher::EmitCondition(Condition cond) { | 2886 void CodePatcher::EmitCondition(Condition cond) { |
2772 Instr instr = Assembler::instr_at(masm_.pc_); | 2887 Instr instr = Assembler::instr_at(masm_.pc_); |
2773 instr = (instr & ~kCondMask) | cond; | 2888 instr = (instr & ~kCondMask) | cond; |
2774 masm_.emit(instr); | 2889 masm_.emit(instr); |
2775 } | 2890 } |
2776 | 2891 |
2777 | 2892 |
2778 } } // namespace v8::internal | 2893 } } // namespace v8::internal |
2779 | 2894 |
2780 #endif // V8_TARGET_ARCH_ARM | 2895 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |