OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if V8_TARGET_ARCH_A64 | 30 #if V8_TARGET_ARCH_ARM64 |
31 | 31 |
32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
35 #include "stub-cache.h" | 35 #include "stub-cache.h" |
36 | 36 |
37 namespace v8 { | 37 namespace v8 { |
38 namespace internal { | 38 namespace internal { |
39 | 39 |
40 | 40 |
(...skipping 1189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1230 // exponent == -0.5: The result is +INFINITY. | 1230 // exponent == -0.5: The result is +INFINITY. |
1231 // (base < 0) && base.isFinite(): The result is NaN. | 1231 // (base < 0) && base.isFinite(): The result is NaN. |
1232 // | 1232 // |
1233 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except | 1233 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except |
1234 // where base is -INFINITY or -0. | 1234 // where base is -INFINITY or -0. |
1235 | 1235 |
1236 // Add +0 to base. This has no effect other than turning -0 into +0. | 1236 // Add +0 to base. This has no effect other than turning -0 into +0. |
1237 __ Fadd(base_double, base_double, fp_zero); | 1237 __ Fadd(base_double, base_double, fp_zero); |
1238 // The operation -0+0 results in +0 in all cases except where the | 1238 // The operation -0+0 results in +0 in all cases except where the |
1239 // FPCR rounding mode is 'round towards minus infinity' (RM). The | 1239 // FPCR rounding mode is 'round towards minus infinity' (RM). The |
1240 // A64 simulator does not currently simulate FPCR (where the rounding | 1240 // ARM64 simulator does not currently simulate FPCR (where the rounding |
1241 // mode is set), so test the operation with some debug code. | 1241 // mode is set), so test the operation with some debug code. |
1242 if (masm->emit_debug_code()) { | 1242 if (masm->emit_debug_code()) { |
1243 UseScratchRegisterScope temps(masm); | 1243 UseScratchRegisterScope temps(masm); |
1244 Register temp = temps.AcquireX(); | 1244 Register temp = temps.AcquireX(); |
1245 __ Fneg(scratch0_double, fp_zero); | 1245 __ Fneg(scratch0_double, fp_zero); |
1246 // Verify that we correctly generated +0.0 and -0.0. | 1246 // Verify that we correctly generated +0.0 and -0.0. |
1247 // bits(+0.0) = 0x0000000000000000 | 1247 // bits(+0.0) = 0x0000000000000000 |
1248 // bits(-0.0) = 0x8000000000000000 | 1248 // bits(-0.0) = 0x8000000000000000 |
1249 __ Fmov(temp, fp_zero); | 1249 __ Fmov(temp, fp_zero); |
1250 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); | 1250 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); |
1251 __ Fmov(temp, scratch0_double); | 1251 __ Fmov(temp, scratch0_double); |
1252 __ Eor(temp, temp, kDSignMask); | 1252 __ Eor(temp, temp, kDSignMask); |
1253 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); | 1253 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); |
1254 // Check that -0.0 + 0.0 == +0.0. | 1254 // Check that -0.0 + 0.0 == +0.0. |
1255 __ Fadd(scratch0_double, scratch0_double, fp_zero); | 1255 __ Fadd(scratch0_double, scratch0_double, fp_zero); |
1256 __ Fmov(temp, scratch0_double); | 1256 __ Fmov(temp, scratch0_double); |
1257 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); | 1257 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); |
1258 } | 1258 } |
1259 | 1259 |
1260 // If base is -INFINITY, make it +INFINITY. | 1260 // If base is -INFINITY, make it +INFINITY. |
1261 // * Calculate base - base: All infinities will become NaNs since both | 1261 // * Calculate base - base: All infinities will become NaNs since both |
1262 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64. | 1262 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64. |
1263 // * If the result is NaN, calculate abs(base). | 1263 // * If the result is NaN, calculate abs(base). |
1264 __ Fsub(scratch0_double, base_double, base_double); | 1264 __ Fsub(scratch0_double, base_double, base_double); |
1265 __ Fcmp(scratch0_double, 0.0); | 1265 __ Fcmp(scratch0_double, 0.0); |
1266 __ Fabs(scratch1_double, base_double); | 1266 __ Fabs(scratch1_double, base_double); |
1267 __ Fcsel(base_double, scratch1_double, base_double, vs); | 1267 __ Fcsel(base_double, scratch1_double, base_double, vs); |
1268 | 1268 |
1269 // Calculate the square root of base. | 1269 // Calculate the square root of base. |
1270 __ Fsqrt(result_double, base_double); | 1270 __ Fsqrt(result_double, base_double); |
1271 __ Fcmp(exponent_double, 0.0); | 1271 __ Fcmp(exponent_double, 0.0); |
1272 __ B(ge, &done); // Finish now for exponents of 0.5. | 1272 __ B(ge, &done); // Finish now for exponents of 0.5. |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1392 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 1392 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
1393 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 1393 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
1394 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 1394 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
1395 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 1395 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
1396 BinaryOpICStub::GenerateAheadOfTime(isolate); | 1396 BinaryOpICStub::GenerateAheadOfTime(isolate); |
1397 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | 1397 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); |
1398 } | 1398 } |
1399 | 1399 |
1400 | 1400 |
1401 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 1401 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
1402 // Floating-point code doesn't get special handling in A64, so there's | 1402 // Floating-point code doesn't get special handling in ARM64, so there's |
1403 // nothing to do here. | 1403 // nothing to do here. |
1404 USE(isolate); | 1404 USE(isolate); |
1405 } | 1405 } |
1406 | 1406 |
1407 | 1407 |
1408 static void JumpIfOOM(MacroAssembler* masm, | 1408 static void JumpIfOOM(MacroAssembler* masm, |
1409 Register value, | 1409 Register value, |
1410 Register scratch, | 1410 Register scratch, |
1411 Label* oom_label) { | 1411 Label* oom_label) { |
1412 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); | 1412 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); |
(...skipping 3215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4628 } | 4628 } |
4629 | 4629 |
4630 // Tail call into the stub that handles binary operations with allocation | 4630 // Tail call into the stub that handles binary operations with allocation |
4631 // sites. | 4631 // sites. |
4632 BinaryOpWithAllocationSiteStub stub(state_); | 4632 BinaryOpWithAllocationSiteStub stub(state_); |
4633 __ TailCallStub(&stub); | 4633 __ TailCallStub(&stub); |
4634 } | 4634 } |
4635 | 4635 |
4636 | 4636 |
4637 bool CodeStub::CanUseFPRegisters() { | 4637 bool CodeStub::CanUseFPRegisters() { |
4638 // FP registers always available on A64. | 4638 // FP registers always available on ARM64. |
4639 return true; | 4639 return true; |
4640 } | 4640 } |
4641 | 4641 |
4642 | 4642 |
4643 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 4643 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
4644 // We need some extra registers for this stub, they have been allocated | 4644 // We need some extra registers for this stub, they have been allocated |
4645 // but we need to save them before using them. | 4645 // but we need to save them before using them. |
4646 regs_.Save(masm); | 4646 regs_.Save(masm); |
4647 | 4647 |
4648 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 4648 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4926 // Save all kCallerSaved registers (including lr), since this can be called | 4926 // Save all kCallerSaved registers (including lr), since this can be called |
4927 // from anywhere. | 4927 // from anywhere. |
4928 // TODO(jbramley): What about FP registers? | 4928 // TODO(jbramley): What about FP registers? |
4929 __ PushCPURegList(kCallerSaved); | 4929 __ PushCPURegList(kCallerSaved); |
4930 ASSERT(kCallerSaved.IncludesAliasOf(lr)); | 4930 ASSERT(kCallerSaved.IncludesAliasOf(lr)); |
4931 const int kNumSavedRegs = kCallerSaved.Count(); | 4931 const int kNumSavedRegs = kCallerSaved.Count(); |
4932 | 4932 |
4933 // Compute the function's address as the first argument. | 4933 // Compute the function's address as the first argument. |
4934 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); | 4934 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); |
4935 | 4935 |
4936 #if V8_HOST_ARCH_A64 | 4936 #if V8_HOST_ARCH_ARM64 |
4937 uintptr_t entry_hook = | 4937 uintptr_t entry_hook = |
4938 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook()); | 4938 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook()); |
4939 __ Mov(x10, entry_hook); | 4939 __ Mov(x10, entry_hook); |
4940 #else | 4940 #else |
4941 // Under the simulator we need to indirect the entry hook through a trampoline | 4941 // Under the simulator we need to indirect the entry hook through a trampoline |
4942 // function at a known address. | 4942 // function at a known address. |
4943 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | 4943 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); |
4944 __ Mov(x10, Operand(ExternalReference(&dispatcher, | 4944 __ Mov(x10, Operand(ExternalReference(&dispatcher, |
4945 ExternalReference::BUILTIN_CALL, | 4945 ExternalReference::BUILTIN_CALL, |
4946 masm->isolate()))); | 4946 masm->isolate()))); |
(...skipping 772 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5719 spill_offset, | 5719 spill_offset, |
5720 MemOperand(fp, 6 * kPointerSize), | 5720 MemOperand(fp, 6 * kPointerSize), |
5721 NULL); | 5721 NULL); |
5722 } | 5722 } |
5723 | 5723 |
5724 | 5724 |
5725 #undef __ | 5725 #undef __ |
5726 | 5726 |
5727 } } // namespace v8::internal | 5727 } } // namespace v8::internal |
5728 | 5728 |
5729 #endif // V8_TARGET_ARCH_A64 | 5729 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |