OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "v8.h" | 5 #include "v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
8 | 8 |
9 #include "bootstrapper.h" | 9 #include "bootstrapper.h" |
10 #include "codegen.h" | 10 #include "codegen.h" |
(...skipping 1204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1215 Abort(kTheCurrentStackPointerIsBelowCsp); | 1215 Abort(kTheCurrentStackPointerIsBelowCsp); |
1216 | 1216 |
1217 bind(&ok); | 1217 bind(&ok); |
1218 // Restore StackPointer(). | 1218 // Restore StackPointer(). |
1219 sub(StackPointer(), csp, StackPointer()); | 1219 sub(StackPointer(), csp, StackPointer()); |
1220 } | 1220 } |
1221 } | 1221 } |
1222 } | 1222 } |
1223 | 1223 |
1224 | 1224 |
1225 void MacroAssembler::AssertFPCRState(Register fpcr) { | |
1226 if (emit_debug_code()) { | |
1227 Label unexpected_mode, done; | |
1228 UseScratchRegisterScope temps(this); | |
1229 if (fpcr.IsNone()) { | |
1230 fpcr = temps.AcquireX(); | |
1231 Mrs(fpcr, FPCR); | |
1232 } | |
1233 | |
1234 // Assert that default-NaN mode is set. | |
1235 Tbz(fpcr, DN_offset, &unexpected_mode); | |
1236 | |
1237 // Assert that flush-to-zero is not set. | |
1238 Tbnz(fpcr, FZ_offset, &unexpected_mode); | |
1239 | |
1240 // Assert that the rounding mode is nearest-with-ties-to-even. | |
1241 STATIC_ASSERT(FPTieEven == 0); | |
1242 Tst(fpcr, RMode_mask); | |
1243 B(eq, &done); | |
1244 | |
1245 Bind(&unexpected_mode); | |
1246 Abort(kUnexpectedFPCRMode); | |
1247 | |
1248 Bind(&done); | |
1249 } | |
1250 } | |
1251 | |
1252 | |
1253 void MacroAssembler::ConfigureFPCR() { | |
1254 UseScratchRegisterScope temps(this); | |
1255 Register fpcr = temps.AcquireX(); | |
1256 Mrs(fpcr, FPCR); | |
1257 | |
1258 // If necessary, enable default-NaN mode. The default values of the other FPCR | |
1259 // options should be suitable. | |
1260 Label no_write_required; | |
1261 Tbnz(fpcr, DN_offset, &no_write_required); | |
1262 | |
1263 Orr(fpcr, fpcr, DN_mask); | |
1264 Msr(FPCR, fpcr); | |
ulan
2014/05/02 09:10:33
AssertFPCRState also checks flush-to-zero and roun
jbramley
2014/05/02 09:32:05
The flush-to-zero and rounding mode settings are l
| |
1265 | |
1266 Bind(&no_write_required); | |
1267 AssertFPCRState(fpcr); | |
1268 } | |
1269 | |
1270 | |
1271 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst, | |
1272 const FPRegister& src) { | |
1273 AssertFPCRState(); | |
1274 | |
1275 // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs expect | |
ulan
2014/05/02 09:10:33
s/expect/except
jbramley
2014/05/02 09:32:05
Done.
| |
1276 // for NaNs, which become the default NaN. We use fsub rather than fadd | |
1277 // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0. | |
1278 Fsub(dst, src, fp_zero); | |
1279 } | |
1280 | |
1281 | |
1225 void MacroAssembler::LoadRoot(CPURegister destination, | 1282 void MacroAssembler::LoadRoot(CPURegister destination, |
1226 Heap::RootListIndex index) { | 1283 Heap::RootListIndex index) { |
1227 // TODO(jbramley): Most root values are constants, and can be synthesized | 1284 // TODO(jbramley): Most root values are constants, and can be synthesized |
1228 // without a load. Refer to the ARM back end for details. | 1285 // without a load. Refer to the ARM back end for details. |
1229 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); | 1286 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); |
1230 } | 1287 } |
1231 | 1288 |
1232 | 1289 |
1233 void MacroAssembler::StoreRoot(Register source, | 1290 void MacroAssembler::StoreRoot(Register source, |
1234 Heap::RootListIndex index) { | 1291 Heap::RootListIndex index) { |
(...skipping 2646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3881 } | 3938 } |
3882 | 3939 |
3883 | 3940 |
3884 // Note: The ARM version of this clobbers elements_reg, but this version does | 3941 // Note: The ARM version of this clobbers elements_reg, but this version does |
3885 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. | 3942 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. |
3886 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 3943 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
3887 Register key_reg, | 3944 Register key_reg, |
3888 Register elements_reg, | 3945 Register elements_reg, |
3889 Register scratch1, | 3946 Register scratch1, |
3890 FPRegister fpscratch1, | 3947 FPRegister fpscratch1, |
3891 FPRegister fpscratch2, | |
3892 Label* fail, | 3948 Label* fail, |
3893 int elements_offset) { | 3949 int elements_offset) { |
3894 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | 3950 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); |
3895 Label store_num; | 3951 Label store_num; |
3896 | 3952 |
3897 // Speculatively convert the smi to a double - all smis can be exactly | 3953 // Speculatively convert the smi to a double - all smis can be exactly |
3898 // represented as a double. | 3954 // represented as a double. |
3899 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); | 3955 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); |
3900 | 3956 |
3901 // If value_reg is a smi, we're done. | 3957 // If value_reg is a smi, we're done. |
3902 JumpIfSmi(value_reg, &store_num); | 3958 JumpIfSmi(value_reg, &store_num); |
3903 | 3959 |
3904 // Ensure that the object is a heap number. | 3960 // Ensure that the object is a heap number. |
3905 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), | 3961 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), |
3906 fail, DONT_DO_SMI_CHECK); | 3962 fail, DONT_DO_SMI_CHECK); |
3907 | 3963 |
3908 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 3964 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
3909 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
3910 | 3965 |
3911 // Check for NaN by comparing the number to itself: NaN comparison will | 3966 // Canonicalize NaNs. |
3912 // report unordered, indicated by the overflow flag being set. | 3967 CanonicalizeNaN(fpscratch1); |
3913 Fcmp(fpscratch1, fpscratch1); | |
3914 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); | |
3915 | 3968 |
3916 // Store the result. | 3969 // Store the result. |
3917 Bind(&store_num); | 3970 Bind(&store_num); |
3918 Add(scratch1, elements_reg, | 3971 Add(scratch1, elements_reg, |
3919 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); | 3972 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); |
3920 Str(fpscratch1, | 3973 Str(fpscratch1, |
3921 FieldMemOperand(scratch1, | 3974 FieldMemOperand(scratch1, |
3922 FixedDoubleArray::kHeaderSize - elements_offset)); | 3975 FixedDoubleArray::kHeaderSize - elements_offset)); |
3923 } | 3976 } |
3924 | 3977 |
(...skipping 1290 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5215 } | 5268 } |
5216 } | 5269 } |
5217 | 5270 |
5218 | 5271 |
5219 #undef __ | 5272 #undef __ |
5220 | 5273 |
5221 | 5274 |
5222 } } // namespace v8::internal | 5275 } } // namespace v8::internal |
5223 | 5276 |
5224 #endif // V8_TARGET_ARCH_ARM64 | 5277 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |