OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
8 | 8 |
9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
10 #include "codegen.h" | 10 #include "codegen.h" |
(...skipping 1204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1215 Abort(kTheCurrentStackPointerIsBelowCsp); | 1215 Abort(kTheCurrentStackPointerIsBelowCsp); |
1216 | 1216 |
1217 bind(&ok); | 1217 bind(&ok); |
1218 // Restore StackPointer(). | 1218 // Restore StackPointer(). |
1219 sub(StackPointer(), csp, StackPointer()); | 1219 sub(StackPointer(), csp, StackPointer()); |
1220 } | 1220 } |
1221 } | 1221 } |
1222 } | 1222 } |
1223 | 1223 |
1224 | 1224 |
| 1225 void MacroAssembler::AssertFPCRState(Register fpcr) { |
| 1226 if (emit_debug_code()) { |
| 1227 Label unexpected_mode, done; |
| 1228 UseScratchRegisterScope temps(this); |
| 1229 if (fpcr.IsNone()) { |
| 1230 fpcr = temps.AcquireX(); |
| 1231 Mrs(fpcr, FPCR); |
| 1232 } |
| 1233 |
| 1234 // Settings overridden by ConfiugreFPCR(): |
| 1235 // - Assert that default-NaN mode is set. |
| 1236 Tbz(fpcr, DN_offset, &unexpected_mode); |
| 1237 |
| 1238 // Settings left to their default values: |
| 1239 // - Assert that flush-to-zero is not set. |
| 1240 Tbnz(fpcr, FZ_offset, &unexpected_mode); |
| 1241 // - Assert that the rounding mode is nearest-with-ties-to-even. |
| 1242 STATIC_ASSERT(FPTieEven == 0); |
| 1243 Tst(fpcr, RMode_mask); |
| 1244 B(eq, &done); |
| 1245 |
| 1246 Bind(&unexpected_mode); |
| 1247 Abort(kUnexpectedFPCRMode); |
| 1248 |
| 1249 Bind(&done); |
| 1250 } |
| 1251 } |
| 1252 |
| 1253 |
| 1254 void MacroAssembler::ConfigureFPCR() { |
| 1255 UseScratchRegisterScope temps(this); |
| 1256 Register fpcr = temps.AcquireX(); |
| 1257 Mrs(fpcr, FPCR); |
| 1258 |
| 1259 // If necessary, enable default-NaN mode. The default values of the other FPCR |
| 1260 // options should be suitable, and AssertFPCRState will verify that. |
| 1261 Label no_write_required; |
| 1262 Tbnz(fpcr, DN_offset, &no_write_required); |
| 1263 |
| 1264 Orr(fpcr, fpcr, DN_mask); |
| 1265 Msr(FPCR, fpcr); |
| 1266 |
| 1267 Bind(&no_write_required); |
| 1268 AssertFPCRState(fpcr); |
| 1269 } |
| 1270 |
| 1271 |
| 1272 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, |
| 1273 const FPRegister& src) { |
| 1274 AssertFPCRState(); |
| 1275 |
| 1276 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except |
| 1277 // for NaNs, which become the default NaN. We use fsub rather than fadd |
| 1278 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. |
| 1279 Fsub(dst, src, fp_zero); |
| 1280 } |
| 1281 |
| 1282 |
1225 void MacroAssembler::LoadRoot(CPURegister destination, | 1283 void MacroAssembler::LoadRoot(CPURegister destination, |
1226 Heap::RootListIndex index) { | 1284 Heap::RootListIndex index) { |
1227 // TODO(jbramley): Most root values are constants, and can be synthesized | 1285 // TODO(jbramley): Most root values are constants, and can be synthesized |
1228 // without a load. Refer to the ARM back end for details. | 1286 // without a load. Refer to the ARM back end for details. |
1229 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); | 1287 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); |
1230 } | 1288 } |
1231 | 1289 |
1232 | 1290 |
1233 void MacroAssembler::StoreRoot(Register source, | 1291 void MacroAssembler::StoreRoot(Register source, |
1234 Heap::RootListIndex index) { | 1292 Heap::RootListIndex index) { |
(...skipping 2646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3881 } | 3939 } |
3882 | 3940 |
3883 | 3941 |
3884 // Note: The ARM version of this clobbers elements_reg, but this version does | 3942 // Note: The ARM version of this clobbers elements_reg, but this version does |
3885 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. | 3943 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. |
3886 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 3944 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
3887 Register key_reg, | 3945 Register key_reg, |
3888 Register elements_reg, | 3946 Register elements_reg, |
3889 Register scratch1, | 3947 Register scratch1, |
3890 FPRegister fpscratch1, | 3948 FPRegister fpscratch1, |
3891 FPRegister fpscratch2, | |
3892 Label* fail, | 3949 Label* fail, |
3893 int elements_offset) { | 3950 int elements_offset) { |
3894 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | 3951 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); |
3895 Label store_num; | 3952 Label store_num; |
3896 | 3953 |
3897 // Speculatively convert the smi to a double - all smis can be exactly | 3954 // Speculatively convert the smi to a double - all smis can be exactly |
3898 // represented as a double. | 3955 // represented as a double. |
3899 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); | 3956 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); |
3900 | 3957 |
3901 // If value_reg is a smi, we're done. | 3958 // If value_reg is a smi, we're done. |
3902 JumpIfSmi(value_reg, &store_num); | 3959 JumpIfSmi(value_reg, &store_num); |
3903 | 3960 |
3904 // Ensure that the object is a heap number. | 3961 // Ensure that the object is a heap number. |
3905 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), | 3962 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), |
3906 fail, DONT_DO_SMI_CHECK); | 3963 fail, DONT_DO_SMI_CHECK); |
3907 | 3964 |
3908 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 3965 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
3909 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
3910 | 3966 |
3911 // Check for NaN by comparing the number to itself: NaN comparison will | 3967 // Canonicalize NaNs. |
3912 // report unordered, indicated by the overflow flag being set. | 3968 CanonicalizeNaN(fpscratch1); |
3913 Fcmp(fpscratch1, fpscratch1); | |
3914 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); | |
3915 | 3969 |
3916 // Store the result. | 3970 // Store the result. |
3917 Bind(&store_num); | 3971 Bind(&store_num); |
3918 Add(scratch1, elements_reg, | 3972 Add(scratch1, elements_reg, |
3919 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); | 3973 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); |
3920 Str(fpscratch1, | 3974 Str(fpscratch1, |
3921 FieldMemOperand(scratch1, | 3975 FieldMemOperand(scratch1, |
3922 FixedDoubleArray::kHeaderSize - elements_offset)); | 3976 FixedDoubleArray::kHeaderSize - elements_offset)); |
3923 } | 3977 } |
3924 | 3978 |
(...skipping 1290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5215 } | 5269 } |
5216 } | 5270 } |
5217 | 5271 |
5218 | 5272 |
5219 #undef __ | 5273 #undef __ |
5220 | 5274 |
5221 | 5275 |
5222 } } // namespace v8::internal | 5276 } } // namespace v8::internal |
5223 | 5277 |
5224 #endif // V8_TARGET_ARCH_ARM64 | 5278 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |