OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/compiler.h" | 9 #include "vm/compiler.h" |
10 #include "vm/dart_entry.h" | 10 #include "vm/dart_entry.h" |
(...skipping 712 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
723 | 723 |
724 Heap::Space space = Heap::kNew; | 724 Heap::Space space = Heap::kNew; |
725 __ ldr(R8, Address(THR, Thread::heap_offset())); | 725 __ ldr(R8, Address(THR, Thread::heap_offset())); |
726 | 726 |
727 // Calculate and align allocation size. | 727 // Calculate and align allocation size. |
728 // Load new object start and calculate next object start. | 728 // Load new object start and calculate next object start. |
729 // R1: array element type. | 729 // R1: array element type. |
730 // R2: array length as Smi. | 730 // R2: array length as Smi. |
731 // R8: heap. | 731 // R8: heap. |
732 __ LoadFromOffset(R0, R8, Heap::TopOffset(space)); | 732 __ LoadFromOffset(R0, R8, Heap::TopOffset(space)); |
733 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; | 733 intptr_t fixed_size_plus_alignment_padding = |
734 __ LoadImmediate(R3, fixed_size); | 734 sizeof(RawArray) + kObjectAlignment - 1; |
| 735 __ LoadImmediate(R3, fixed_size_plus_alignment_padding); |
735 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi. | 736 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi. |
736 ASSERT(kSmiTagShift == 1); | 737 ASSERT(kSmiTagShift == 1); |
737 __ andi(R3, R3, Immediate(~(kObjectAlignment - 1))); | 738 __ andi(R3, R3, Immediate(~(kObjectAlignment - 1))); |
738 // R0: potential new object start. | 739 // R0: potential new object start. |
739 // R3: object size in bytes. | 740 // R3: object size in bytes. |
740 __ adds(R7, R3, Operand(R0)); | 741 __ adds(R7, R3, Operand(R0)); |
741 __ b(&slow_case, CS); // Branch if unsigned overflow. | 742 __ b(&slow_case, CS); // Branch if unsigned overflow. |
742 | 743 |
743 // Check if the allocation fits into the remaining space. | 744 // Check if the allocation fits into the remaining space. |
744 // R0: potential new object start. | 745 // R0: potential new object start. |
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
969 // Called for inline allocation of contexts. | 970 // Called for inline allocation of contexts. |
970 // Input: | 971 // Input: |
971 // R1: number of context variables. | 972 // R1: number of context variables. |
972 // Output: | 973 // Output: |
973 // R0: new allocated RawContext object. | 974 // R0: new allocated RawContext object. |
974 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { | 975 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { |
975 if (FLAG_inline_alloc) { | 976 if (FLAG_inline_alloc) { |
976 Label slow_case; | 977 Label slow_case; |
977 // First compute the rounded instance size. | 978 // First compute the rounded instance size. |
978 // R1: number of context variables. | 979 // R1: number of context variables. |
979 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; | 980 intptr_t fixed_size_plus_alignment_padding = |
980 __ LoadImmediate(R2, fixed_size); | 981 sizeof(RawContext) + kObjectAlignment - 1; |
| 982 __ LoadImmediate(R2, fixed_size_plus_alignment_padding); |
981 __ add(R2, R2, Operand(R1, LSL, 3)); | 983 __ add(R2, R2, Operand(R1, LSL, 3)); |
982 ASSERT(kSmiTagShift == 1); | 984 ASSERT(kSmiTagShift == 1); |
983 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); | 985 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); |
984 | 986 |
985 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R4, &slow_case)); | 987 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R4, &slow_case)); |
986 // Now allocate the object. | 988 // Now allocate the object. |
987 // R1: number of context variables. | 989 // R1: number of context variables. |
988 // R2: object size. | 990 // R2: object size. |
989 const intptr_t cid = kContextCid; | 991 const intptr_t cid = kContextCid; |
990 Heap::Space space = Heap::kNew; | 992 Heap::Space space = Heap::kNew; |
(...skipping 1317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2308 | 2310 |
2309 // Called from switchable IC calls. | 2311 // Called from switchable IC calls. |
2310 // R0: receiver | 2312 // R0: receiver |
2311 // R5: SingleTargetCache | 2313 // R5: SingleTargetCache |
2312 // Passed to target: | 2314 // Passed to target: |
2313 // CODE_REG: target Code object | 2315 // CODE_REG: target Code object |
2314 void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) { | 2316 void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) { |
2315 Label miss; | 2317 Label miss; |
2316 __ LoadClassIdMayBeSmi(R1, R0); | 2318 __ LoadClassIdMayBeSmi(R1, R0); |
2317 __ ldr(R2, FieldAddress(R5, SingleTargetCache::lower_limit_offset()), | 2319 __ ldr(R2, FieldAddress(R5, SingleTargetCache::lower_limit_offset()), |
2318 kUnsignedWord); | 2320 kUnsignedHalfword); |
2319 __ ldr(R3, FieldAddress(R5, SingleTargetCache::upper_limit_offset()), | 2321 __ ldr(R3, FieldAddress(R5, SingleTargetCache::upper_limit_offset()), |
2320 kUnsignedWord); | 2322 kUnsignedHalfword); |
2321 | 2323 |
2322 __ cmp(R1, Operand(R2)); | 2324 __ cmp(R1, Operand(R2)); |
2323 __ b(&miss, LT); | 2325 __ b(&miss, LT); |
2324 __ cmp(R1, Operand(R3)); | 2326 __ cmp(R1, Operand(R3)); |
2325 __ b(&miss, GT); | 2327 __ b(&miss, GT); |
2326 | 2328 |
2327 __ ldr(R1, FieldAddress(R5, SingleTargetCache::entry_point_offset())); | 2329 __ ldr(R1, FieldAddress(R5, SingleTargetCache::entry_point_offset())); |
2328 __ ldr(CODE_REG, FieldAddress(R5, SingleTargetCache::target_offset())); | 2330 __ ldr(CODE_REG, FieldAddress(R5, SingleTargetCache::target_offset())); |
2329 __ br(R1); | 2331 __ br(R1); |
2330 | 2332 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2374 } | 2376 } |
2375 | 2377 |
2376 | 2378 |
2377 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { | 2379 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { |
2378 __ brk(0); | 2380 __ brk(0); |
2379 } | 2381 } |
2380 | 2382 |
2381 } // namespace dart | 2383 } // namespace dart |
2382 | 2384 |
2383 #endif // defined TARGET_ARCH_ARM64 | 2385 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |