OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
8 | 8 |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/division-by-constant.h" | 10 #include "src/base/division-by-constant.h" |
(...skipping 932 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
943 str(src2, dst2, cond); | 943 str(src2, dst2, cond); |
944 } else { // PostIndex or NegPostIndex. | 944 } else { // PostIndex or NegPostIndex. |
945 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); | 945 DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex)); |
946 dst2.set_offset(dst2.offset() - 4); | 946 dst2.set_offset(dst2.offset() - 4); |
947 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); | 947 str(src1, MemOperand(dst.rn(), 4, PostIndex), cond); |
948 str(src2, dst2, cond); | 948 str(src2, dst2, cond); |
949 } | 949 } |
950 } | 950 } |
951 } | 951 } |
952 | 952 |
953 | |
954 void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { | |
955 // If needed, restore wanted bits of FPSCR. | |
956 Label fpscr_done; | |
957 vmrs(scratch); | |
958 if (emit_debug_code()) { | |
959 Label rounding_mode_correct; | |
960 tst(scratch, Operand(kVFPRoundingModeMask)); | |
961 b(eq, &rounding_mode_correct); | |
962 // Don't call Assert here, since Runtime_Abort could re-enter here. | |
963 stop("Default rounding mode not set"); | |
964 bind(&rounding_mode_correct); | |
965 } | |
966 tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
967 b(ne, &fpscr_done); | |
968 orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); | |
969 vmsr(scratch); | |
970 bind(&fpscr_done); | |
971 } | |
972 | |
973 | |
974 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, | 953 void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, |
975 const DwVfpRegister src, | 954 const DwVfpRegister src, |
976 const Condition cond) { | 955 const Condition cond) { |
| 956 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which |
| 957 // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0 |
| 958 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. |
977 vsub(dst, src, kDoubleRegZero, cond); | 959 vsub(dst, src, kDoubleRegZero, cond); |
978 } | 960 } |
979 | 961 |
980 | 962 |
981 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, | 963 void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, |
982 const SwVfpRegister src2, | 964 const SwVfpRegister src2, |
983 const Condition cond) { | 965 const Condition cond) { |
984 // Compare and move FPSCR flags to the normal condition flags. | 966 // Compare and move FPSCR flags to the normal condition flags. |
985 VFPCompareAndLoadFlags(src1, src2, pc, cond); | 967 VFPCompareAndLoadFlags(src1, src2, pc, cond); |
986 } | 968 } |
(...skipping 1420 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2407 JumpIfSmi(value_reg, &smi_value); | 2389 JumpIfSmi(value_reg, &smi_value); |
2408 | 2390 |
2409 // Ensure that the object is a heap number | 2391 // Ensure that the object is a heap number |
2410 CheckMap(value_reg, | 2392 CheckMap(value_reg, |
2411 scratch1, | 2393 scratch1, |
2412 isolate()->factory()->heap_number_map(), | 2394 isolate()->factory()->heap_number_map(), |
2413 fail, | 2395 fail, |
2414 DONT_DO_SMI_CHECK); | 2396 DONT_DO_SMI_CHECK); |
2415 | 2397 |
2416 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 2398 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
2417 // Force a canonical NaN. | |
2418 if (emit_debug_code()) { | |
2419 vmrs(ip); | |
2420 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); | |
2421 Assert(ne, kDefaultNaNModeNotSet); | |
2422 } | |
2423 VFPCanonicalizeNaN(double_scratch); | 2399 VFPCanonicalizeNaN(double_scratch); |
2424 b(&store); | 2400 b(&store); |
2425 | 2401 |
2426 bind(&smi_value); | 2402 bind(&smi_value); |
2427 SmiToDouble(double_scratch, value_reg); | 2403 SmiToDouble(double_scratch, value_reg); |
2428 | 2404 |
2429 bind(&store); | 2405 bind(&store); |
2430 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); | 2406 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); |
2431 vstr(double_scratch, | 2407 vstr(double_scratch, |
2432 FieldMemOperand(scratch1, | 2408 FieldMemOperand(scratch1, |
(...skipping 1537 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3970 } | 3946 } |
3971 } | 3947 } |
3972 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); | 3948 if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift)); |
3973 add(result, result, Operand(dividend, LSR, 31)); | 3949 add(result, result, Operand(dividend, LSR, 31)); |
3974 } | 3950 } |
3975 | 3951 |
3976 } // namespace internal | 3952 } // namespace internal |
3977 } // namespace v8 | 3953 } // namespace v8 |
3978 | 3954 |
3979 #endif // V8_TARGET_ARCH_ARM | 3955 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |