OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if V8_TARGET_ARCH_A64 | 30 #if V8_TARGET_ARCH_ARM64 |
31 | 31 |
32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
33 #include "codegen.h" | 33 #include "codegen.h" |
34 #include "cpu-profiler.h" | 34 #include "cpu-profiler.h" |
35 #include "debug.h" | 35 #include "debug.h" |
36 #include "isolate-inl.h" | 36 #include "isolate-inl.h" |
37 #include "runtime.h" | 37 #include "runtime.h" |
38 | 38 |
39 namespace v8 { | 39 namespace v8 { |
40 namespace internal { | 40 namespace internal { |
(...skipping 1806 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1847 SmiTag(scratch1, length); | 1847 SmiTag(scratch1, length); |
1848 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | 1848 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
1849 | 1849 |
1850 Mov(scratch2, String::kEmptyHashField); | 1850 Mov(scratch2, String::kEmptyHashField); |
1851 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | 1851 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
1852 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); | 1852 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); |
1853 } | 1853 } |
1854 | 1854 |
1855 | 1855 |
1856 int MacroAssembler::ActivationFrameAlignment() { | 1856 int MacroAssembler::ActivationFrameAlignment() { |
1857 #if V8_HOST_ARCH_A64 | 1857 #if V8_HOST_ARCH_ARM64 |
1858 // Running on the real platform. Use the alignment as mandated by the local | 1858 // Running on the real platform. Use the alignment as mandated by the local |
1859 // environment. | 1859 // environment. |
1860 // Note: This will break if we ever start generating snapshots on one ARM | 1860 // Note: This will break if we ever start generating snapshots on one ARM |
1861 // platform for another ARM platform with a different alignment. | 1861 // platform for another ARM platform with a different alignment. |
1862 return OS::ActivationFrameAlignment(); | 1862 return OS::ActivationFrameAlignment(); |
1863 #else // V8_HOST_ARCH_A64 | 1863 #else // V8_HOST_ARCH_ARM64 |
1864 // If we are using the simulator then we should always align to the expected | 1864 // If we are using the simulator then we should always align to the expected |
1865 // alignment. As the simulator is used to generate snapshots we do not know | 1865 // alignment. As the simulator is used to generate snapshots we do not know |
1866 // if the target platform will need alignment, so this is controlled from a | 1866 // if the target platform will need alignment, so this is controlled from a |
1867 // flag. | 1867 // flag. |
1868 return FLAG_sim_stack_alignment; | 1868 return FLAG_sim_stack_alignment; |
1869 #endif // V8_HOST_ARCH_A64 | 1869 #endif // V8_HOST_ARCH_ARM64 |
1870 } | 1870 } |
1871 | 1871 |
1872 | 1872 |
1873 void MacroAssembler::CallCFunction(ExternalReference function, | 1873 void MacroAssembler::CallCFunction(ExternalReference function, |
1874 int num_of_reg_args) { | 1874 int num_of_reg_args) { |
1875 CallCFunction(function, num_of_reg_args, 0); | 1875 CallCFunction(function, num_of_reg_args, 0); |
1876 } | 1876 } |
1877 | 1877 |
1878 | 1878 |
1879 void MacroAssembler::CallCFunction(ExternalReference function, | 1879 void MacroAssembler::CallCFunction(ExternalReference function, |
(...skipping 1365 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3245 // Assert that result actually contains top on entry. | 3245 // Assert that result actually contains top on entry. |
3246 Ldr(scratch3, MemOperand(top_address)); | 3246 Ldr(scratch3, MemOperand(top_address)); |
3247 Cmp(result, scratch3); | 3247 Cmp(result, scratch3); |
3248 Check(eq, kUnexpectedAllocationTop); | 3248 Check(eq, kUnexpectedAllocationTop); |
3249 } | 3249 } |
3250 // Load the allocation limit. 'result' already contains the allocation top. | 3250 // Load the allocation limit. 'result' already contains the allocation top. |
3251 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3251 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
3252 } | 3252 } |
3253 | 3253 |
3254 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3254 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
3255 // the same alignment on A64. | 3255 // the same alignment on ARM64. |
3256 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3256 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
3257 | 3257 |
3258 // Calculate new top and bail out if new space is exhausted. | 3258 // Calculate new top and bail out if new space is exhausted. |
3259 Adds(scratch3, result, object_size); | 3259 Adds(scratch3, result, object_size); |
3260 B(vs, gc_required); | 3260 B(vs, gc_required); |
3261 Cmp(scratch3, allocation_limit); | 3261 Cmp(scratch3, allocation_limit); |
3262 B(hi, gc_required); | 3262 B(hi, gc_required); |
3263 Str(scratch3, MemOperand(top_address)); | 3263 Str(scratch3, MemOperand(top_address)); |
3264 | 3264 |
3265 // Tag the object if requested. | 3265 // Tag the object if requested. |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3317 // Assert that result actually contains top on entry. | 3317 // Assert that result actually contains top on entry. |
3318 Ldr(scratch3, MemOperand(top_address)); | 3318 Ldr(scratch3, MemOperand(top_address)); |
3319 Cmp(result, scratch3); | 3319 Cmp(result, scratch3); |
3320 Check(eq, kUnexpectedAllocationTop); | 3320 Check(eq, kUnexpectedAllocationTop); |
3321 } | 3321 } |
3322 // Load the allocation limit. 'result' already contains the allocation top. | 3322 // Load the allocation limit. 'result' already contains the allocation top. |
3323 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | 3323 Ldr(allocation_limit, MemOperand(top_address, limit - top)); |
3324 } | 3324 } |
3325 | 3325 |
3326 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 3326 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
3327 // the same alignment on A64. | 3327 // the same alignment on ARM64. |
3328 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 3328 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
3329 | 3329 |
3330 // Calculate new top and bail out if new space is exhausted | 3330 // Calculate new top and bail out if new space is exhausted |
3331 if ((flags & SIZE_IN_WORDS) != 0) { | 3331 if ((flags & SIZE_IN_WORDS) != 0) { |
3332 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); | 3332 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); |
3333 } else { | 3333 } else { |
3334 Adds(scratch3, result, object_size); | 3334 Adds(scratch3, result, object_size); |
3335 } | 3335 } |
3336 | 3336 |
3337 if (emit_debug_code()) { | 3337 if (emit_debug_code()) { |
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3832 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 3832 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); |
3833 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 3833 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
3834 // If cond==ls, set cond=hi, otherwise compare. | 3834 // If cond==ls, set cond=hi, otherwise compare. |
3835 Ccmp(scratch, | 3835 Ccmp(scratch, |
3836 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); | 3836 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); |
3837 B(hi, fail); | 3837 B(hi, fail); |
3838 } | 3838 } |
3839 | 3839 |
3840 | 3840 |
3841 // Note: The ARM version of this clobbers elements_reg, but this version does | 3841 // Note: The ARM version of this clobbers elements_reg, but this version does |
3842 // not. Some uses of this in A64 assume that elements_reg will be preserved. | 3842 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. |
3843 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | 3843 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, |
3844 Register key_reg, | 3844 Register key_reg, |
3845 Register elements_reg, | 3845 Register elements_reg, |
3846 Register scratch1, | 3846 Register scratch1, |
3847 FPRegister fpscratch1, | 3847 FPRegister fpscratch1, |
3848 FPRegister fpscratch2, | 3848 FPRegister fpscratch2, |
3849 Label* fail, | 3849 Label* fail, |
3850 int elements_offset) { | 3850 int elements_offset) { |
3851 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | 3851 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); |
3852 Label store_num; | 3852 Label store_num; |
(...skipping 1165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5018 // When the stub is called, the sequence is replaced with the young sequence | 5018 // When the stub is called, the sequence is replaced with the young sequence |
5019 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the | 5019 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the |
5020 // stub jumps to &start, stored in x0. The young sequence does not call the | 5020 // stub jumps to &start, stored in x0. The young sequence does not call the |
5021 // stub so there is no infinite loop here. | 5021 // stub so there is no infinite loop here. |
5022 // | 5022 // |
5023 // A branch (br) is used rather than a call (blr) because this code replaces | 5023 // A branch (br) is used rather than a call (blr) because this code replaces |
5024 // the frame setup code that would normally preserve lr. | 5024 // the frame setup code that would normally preserve lr. |
5025 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); | 5025 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); |
5026 __ adr(x0, &start); | 5026 __ adr(x0, &start); |
5027 __ br(ip0); | 5027 __ br(ip0); |
5028 // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up | 5028 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up |
5029 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. | 5029 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. |
5030 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); | 5030 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); |
5031 if (stub) { | 5031 if (stub) { |
5032 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); | 5032 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); |
5033 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | 5033 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); |
5034 } | 5034 } |
5035 } | 5035 } |
5036 | 5036 |
5037 | 5037 |
5038 bool MacroAssembler::IsYoungSequence(byte* sequence) { | 5038 bool MacroAssembler::IsYoungSequence(byte* sequence) { |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5161 } | 5161 } |
5162 } | 5162 } |
5163 } | 5163 } |
5164 | 5164 |
5165 | 5165 |
5166 #undef __ | 5166 #undef __ |
5167 | 5167 |
5168 | 5168 |
5169 } } // namespace v8::internal | 5169 } } // namespace v8::internal |
5170 | 5170 |
5171 #endif // V8_TARGET_ARCH_A64 | 5171 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |