OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/cpu.h" | 9 #include "vm/cpu.h" |
10 #include "vm/longjump.h" | 10 #include "vm/longjump.h" |
11 #include "vm/runtime_entry.h" | 11 #include "vm/runtime_entry.h" |
12 #include "vm/simulator.h" | 12 #include "vm/simulator.h" |
13 #include "vm/stack_frame.h" | 13 #include "vm/stack_frame.h" |
14 #include "vm/stub_code.h" | 14 #include "vm/stub_code.h" |
15 | 15 |
16 namespace dart { | 16 namespace dart { |
17 | 17 |
18 DECLARE_FLAG(bool, check_code_pointer); | 18 DECLARE_FLAG(bool, check_code_pointer); |
19 DECLARE_FLAG(bool, inline_alloc); | 19 DECLARE_FLAG(bool, inline_alloc); |
20 | 20 |
21 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); | 21 DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches"); |
22 | 22 |
23 | 23 |
24 Assembler::Assembler(bool use_far_branches) | 24 Assembler::Assembler(bool use_far_branches) |
25 : buffer_(), | 25 : buffer_(), |
26 prologue_offset_(-1), | 26 prologue_offset_(-1), |
27 has_single_entry_point_(true), | 27 has_single_entry_point_(true), |
28 use_far_branches_(use_far_branches), | 28 use_far_branches_(use_far_branches), |
29 comments_(), | 29 comments_(), |
30 constant_pool_allowed_(false) { | 30 constant_pool_allowed_(false) {} |
31 } | |
32 | 31 |
33 | 32 |
34 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { | 33 void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) { |
35 ASSERT(Utils::IsAligned(data, 4)); | 34 ASSERT(Utils::IsAligned(data, 4)); |
36 ASSERT(Utils::IsAligned(length, 4)); | 35 ASSERT(Utils::IsAligned(length, 4)); |
37 const uword end = data + length; | 36 const uword end = data + length; |
38 while (data < end) { | 37 while (data < end) { |
39 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; | 38 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction; |
40 data += 4; | 39 data += 4; |
41 } | 40 } |
42 } | 41 } |
43 | 42 |
44 | 43 |
45 void Assembler::Emit(int32_t value) { | 44 void Assembler::Emit(int32_t value) { |
46 AssemblerBuffer::EnsureCapacity ensured(&buffer_); | 45 AssemblerBuffer::EnsureCapacity ensured(&buffer_); |
47 buffer_.Emit<int32_t>(value); | 46 buffer_.Emit<int32_t>(value); |
48 } | 47 } |
49 | 48 |
50 | 49 |
51 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { | 50 static const char* cpu_reg_names[kNumberOfCpuRegisters] = { |
52 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | 51 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", |
53 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", | 52 "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21", |
54 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", | 53 "r22", "r23", "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31", |
55 "r24", "ip0", "ip1", "pp", "ctx", "fp", "lr", "r31", | |
56 }; | 54 }; |
57 | 55 |
58 | 56 |
59 const char* Assembler::RegisterName(Register reg) { | 57 const char* Assembler::RegisterName(Register reg) { |
60 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); | 58 ASSERT((0 <= reg) && (reg < kNumberOfCpuRegisters)); |
61 return cpu_reg_names[reg]; | 59 return cpu_reg_names[reg]; |
62 } | 60 } |
63 | 61 |
64 | 62 |
65 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { | 63 static const char* fpu_reg_names[kNumberOfFpuRegisters] = { |
66 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", | 64 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", |
67 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", | 65 "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", |
68 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", | 66 "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", |
69 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", | |
70 }; | 67 }; |
71 | 68 |
72 | 69 |
73 const char* Assembler::FpuRegisterName(FpuRegister reg) { | 70 const char* Assembler::FpuRegisterName(FpuRegister reg) { |
74 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); | 71 ASSERT((0 <= reg) && (reg < kNumberOfFpuRegisters)); |
75 return fpu_reg_names[reg]; | 72 return fpu_reg_names[reg]; |
76 } | 73 } |
77 | 74 |
78 | 75 |
79 void Assembler::Bind(Label* label) { | 76 void Assembler::Bind(Label* label) { |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
129 const int32_t far_branch = | 126 const int32_t far_branch = |
130 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); | 127 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize); |
131 | 128 |
132 // Grab the link to the next branch. | 129 // Grab the link to the next branch. |
133 const int32_t next = DecodeImm26BranchOffset(far_branch); | 130 const int32_t next = DecodeImm26BranchOffset(far_branch); |
134 | 131 |
135 // Re-target the guarding branch and flip the conditional sense. | 132 // Re-target the guarding branch and flip the conditional sense. |
136 int32_t encoded_guard_branch = | 133 int32_t encoded_guard_branch = |
137 EncodeImm19BranchOffset(dest, guard_branch); | 134 EncodeImm19BranchOffset(dest, guard_branch); |
138 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch); | 135 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch); |
139 encoded_guard_branch = EncodeImm19BranchCondition( | 136 encoded_guard_branch = |
140 InvertCondition(c), encoded_guard_branch); | 137 EncodeImm19BranchCondition(InvertCondition(c), encoded_guard_branch); |
141 | 138 |
142 // Write back the re-encoded instructions. The far branch becomes a nop. | 139 // Write back the re-encoded instructions. The far branch becomes a nop. |
143 buffer_.Store<int32_t>( | 140 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, |
144 position + 0 * Instr::kInstrSize, encoded_guard_branch); | 141 encoded_guard_branch); |
145 buffer_.Store<int32_t>( | 142 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, |
146 position + 1 * Instr::kInstrSize, Instr::kNopInstruction); | 143 Instr::kNopInstruction); |
147 label->position_ = next; | 144 label->position_ = next; |
148 } else { | 145 } else { |
149 const int32_t next = buffer_.Load<int32_t>(position); | 146 const int32_t next = buffer_.Load<int32_t>(position); |
150 const int32_t encoded = EncodeImm19BranchOffset(dest, next); | 147 const int32_t encoded = EncodeImm19BranchOffset(dest, next); |
151 buffer_.Store<int32_t>(position, encoded); | 148 buffer_.Store<int32_t>(position, encoded); |
152 label->position_ = DecodeImm19BranchOffset(next); | 149 label->position_ = DecodeImm19BranchOffset(next); |
153 } | 150 } |
154 } | 151 } |
155 label->BindTo(bound_pc); | 152 label->BindTo(bound_pc); |
156 } | 153 } |
(...skipping 20 matching lines...) Expand all Loading... |
177 int count = 0; | 174 int count = 0; |
178 do { | 175 do { |
179 count++; | 176 count++; |
180 } while (value >>= 1); | 177 } while (value >>= 1); |
181 return width - count; | 178 return width - count; |
182 } | 179 } |
183 | 180 |
184 | 181 |
185 static int CountOneBits(uint64_t value, int width) { | 182 static int CountOneBits(uint64_t value, int width) { |
186 // Mask out unused bits to ensure that they are not counted. | 183 // Mask out unused bits to ensure that they are not counted. |
187 value &= (0xffffffffffffffffUL >> (64-width)); | 184 value &= (0xffffffffffffffffUL >> (64 - width)); |
188 | 185 |
189 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); | 186 value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555); |
190 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); | 187 value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333); |
191 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); | 188 value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f); |
192 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); | 189 value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff); |
193 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); | 190 value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); |
194 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); | 191 value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); |
195 | 192 |
196 return value; | 193 return value; |
197 } | 194 } |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
375 // If the raw smi does not fit into a 32-bit signed int, then we'll keep | 372 // If the raw smi does not fit into a 32-bit signed int, then we'll keep |
376 // the raw value in the object pool. | 373 // the raw value in the object pool. |
377 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); | 374 return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw())); |
378 } | 375 } |
379 ASSERT(object.IsNotTemporaryScopedHandle()); | 376 ASSERT(object.IsNotTemporaryScopedHandle()); |
380 ASSERT(object.IsOld()); | 377 ASSERT(object.IsOld()); |
381 return true; | 378 return true; |
382 } | 379 } |
383 | 380 |
384 | 381 |
385 void Assembler::LoadNativeEntry(Register dst, | 382 void Assembler::LoadNativeEntry(Register dst, const ExternalLabel* label) { |
386 const ExternalLabel* label) { | |
387 const int32_t offset = ObjectPool::element_offset( | 383 const int32_t offset = ObjectPool::element_offset( |
388 object_pool_wrapper_.FindNativeEntry(label, kNotPatchable)); | 384 object_pool_wrapper_.FindNativeEntry(label, kNotPatchable)); |
389 LoadWordFromPoolOffset(dst, offset); | 385 LoadWordFromPoolOffset(dst, offset); |
390 } | 386 } |
391 | 387 |
392 | 388 |
393 void Assembler::LoadIsolate(Register dst) { | 389 void Assembler::LoadIsolate(Register dst) { |
394 ldr(dst, Address(THR, Thread::isolate_offset())); | 390 ldr(dst, Address(THR, Thread::isolate_offset())); |
395 } | 391 } |
396 | 392 |
(...skipping 337 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
734 Operand::Immediate) { | 730 Operand::Immediate) { |
735 cmn(rn, op); | 731 cmn(rn, op); |
736 } else { | 732 } else { |
737 ASSERT(rn != TMP2); | 733 ASSERT(rn != TMP2); |
738 LoadImmediate(TMP2, imm); | 734 LoadImmediate(TMP2, imm); |
739 cmp(rn, Operand(TMP2)); | 735 cmp(rn, Operand(TMP2)); |
740 } | 736 } |
741 } | 737 } |
742 | 738 |
743 | 739 |
744 void Assembler::LoadFromOffset( | 740 void Assembler::LoadFromOffset(Register dest, |
745 Register dest, Register base, int32_t offset, OperandSize sz) { | 741 Register base, |
| 742 int32_t offset, |
| 743 OperandSize sz) { |
746 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { | 744 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { |
747 ldr(dest, Address(base, offset, Address::Offset, sz), sz); | 745 ldr(dest, Address(base, offset, Address::Offset, sz), sz); |
748 } else { | 746 } else { |
749 ASSERT(base != TMP2); | 747 ASSERT(base != TMP2); |
750 AddImmediate(TMP2, base, offset); | 748 AddImmediate(TMP2, base, offset); |
751 ldr(dest, Address(TMP2), sz); | 749 ldr(dest, Address(TMP2), sz); |
752 } | 750 } |
753 } | 751 } |
754 | 752 |
755 | 753 |
(...skipping 12 matching lines...) Expand all Loading... |
768 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { | 766 if (Address::CanHoldOffset(offset, Address::Offset, kQWord)) { |
769 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); | 767 fldrq(dest, Address(base, offset, Address::Offset, kQWord)); |
770 } else { | 768 } else { |
771 ASSERT(base != TMP2); | 769 ASSERT(base != TMP2); |
772 AddImmediate(TMP2, base, offset); | 770 AddImmediate(TMP2, base, offset); |
773 fldrq(dest, Address(TMP2)); | 771 fldrq(dest, Address(TMP2)); |
774 } | 772 } |
775 } | 773 } |
776 | 774 |
777 | 775 |
778 void Assembler::StoreToOffset( | 776 void Assembler::StoreToOffset(Register src, |
779 Register src, Register base, int32_t offset, OperandSize sz) { | 777 Register base, |
| 778 int32_t offset, |
| 779 OperandSize sz) { |
780 ASSERT(base != TMP2); | 780 ASSERT(base != TMP2); |
781 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { | 781 if (Address::CanHoldOffset(offset, Address::Offset, sz)) { |
782 str(src, Address(base, offset, Address::Offset, sz), sz); | 782 str(src, Address(base, offset, Address::Offset, sz), sz); |
783 } else { | 783 } else { |
784 ASSERT(src != TMP2); | 784 ASSERT(src != TMP2); |
785 AddImmediate(TMP2, base, offset); | 785 AddImmediate(TMP2, base, offset); |
786 str(src, Address(TMP2), sz); | 786 str(src, Address(TMP2), sz); |
787 } | 787 } |
788 } | 788 } |
789 | 789 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
825 | 825 |
826 | 826 |
827 void Assembler::VRSqrts(VRegister vd, VRegister vn) { | 827 void Assembler::VRSqrts(VRegister vd, VRegister vn) { |
828 ASSERT(vd != VTMP); | 828 ASSERT(vd != VTMP); |
829 ASSERT(vn != VTMP); | 829 ASSERT(vn != VTMP); |
830 | 830 |
831 // Reciprocal square root estimate. | 831 // Reciprocal square root estimate. |
832 vrsqrtes(vd, vn); | 832 vrsqrtes(vd, vn); |
833 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2. | 833 // 2 Newton-Raphson steps. xn+1 = xn * (3 - V1*xn^2) / 2. |
834 // First step. | 834 // First step. |
835 vmuls(VTMP, vd, vd); // VTMP <- xn^2 | 835 vmuls(VTMP, vd, vd); // VTMP <- xn^2 |
836 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2. | 836 vrsqrtss(VTMP, vn, VTMP); // VTMP <- (3 - V1*VTMP) / 2. |
837 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP | 837 vmuls(vd, vd, VTMP); // xn+1 <- xn * VTMP |
838 // Second step. | 838 // Second step. |
839 vmuls(VTMP, vd, vd); | 839 vmuls(VTMP, vd, vd); |
840 vrsqrtss(VTMP, vn, VTMP); | 840 vrsqrtss(VTMP, vn, VTMP); |
841 vmuls(vd, vd, VTMP); | 841 vmuls(vd, vd, VTMP); |
842 } | 842 } |
843 | 843 |
844 | 844 |
845 | |
846 // Store into object. | 845 // Store into object. |
847 // Preserves object and value registers. | 846 // Preserves object and value registers. |
848 void Assembler::StoreIntoObjectFilterNoSmi(Register object, | 847 void Assembler::StoreIntoObjectFilterNoSmi(Register object, |
849 Register value, | 848 Register value, |
850 Label* no_update) { | 849 Label* no_update) { |
851 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && | 850 COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) && |
852 (kOldObjectAlignmentOffset == 0)); | 851 (kOldObjectAlignmentOffset == 0)); |
853 | 852 |
854 // Write-barrier triggers if the value is in the new space (has bit set) and | 853 // Write-barrier triggers if the value is in the new space (has bit set) and |
855 // the object is in the old space (has bit cleared). | 854 // the object is in the old space (has bit cleared). |
(...skipping 17 matching lines...) Expand all Loading... |
873 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); | 872 tsti(TMP, Immediate(kNewObjectAlignmentOffset)); |
874 b(no_update, EQ); | 873 b(no_update, EQ); |
875 } | 874 } |
876 | 875 |
877 | 876 |
878 void Assembler::StoreIntoObjectOffset(Register object, | 877 void Assembler::StoreIntoObjectOffset(Register object, |
879 int32_t offset, | 878 int32_t offset, |
880 Register value, | 879 Register value, |
881 bool can_value_be_smi) { | 880 bool can_value_be_smi) { |
882 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { | 881 if (Address::CanHoldOffset(offset - kHeapObjectTag)) { |
883 StoreIntoObject( | 882 StoreIntoObject(object, FieldAddress(object, offset), value, |
884 object, FieldAddress(object, offset), value, can_value_be_smi); | 883 can_value_be_smi); |
885 } else { | 884 } else { |
886 AddImmediate(TMP, object, offset - kHeapObjectTag); | 885 AddImmediate(TMP, object, offset - kHeapObjectTag); |
887 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); | 886 StoreIntoObject(object, Address(TMP), value, can_value_be_smi); |
888 } | 887 } |
889 } | 888 } |
890 | 889 |
891 | 890 |
892 void Assembler::StoreIntoObject(Register object, | 891 void Assembler::StoreIntoObject(Register object, |
893 const Address& dest, | 892 const Address& dest, |
894 Register value, | 893 Register value, |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
969 } else { | 968 } else { |
970 AddImmediate(TMP, object, offset - kHeapObjectTag); | 969 AddImmediate(TMP, object, offset - kHeapObjectTag); |
971 StoreIntoObjectNoBarrier(object, Address(TMP), value); | 970 StoreIntoObjectNoBarrier(object, Address(TMP), value); |
972 } | 971 } |
973 } | 972 } |
974 | 973 |
975 | 974 |
976 void Assembler::LoadClassId(Register result, Register object) { | 975 void Assembler::LoadClassId(Register result, Register object) { |
977 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); | 976 ASSERT(RawObject::kClassIdTagPos == kBitsPerInt32); |
978 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); | 977 ASSERT(RawObject::kClassIdTagSize == kBitsPerInt32); |
979 const intptr_t class_id_offset = Object::tags_offset() + | 978 const intptr_t class_id_offset = |
980 RawObject::kClassIdTagPos / kBitsPerByte; | 979 Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte; |
981 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, | 980 LoadFromOffset(result, object, class_id_offset - kHeapObjectTag, |
982 kUnsignedWord); | 981 kUnsignedWord); |
983 } | 982 } |
984 | 983 |
985 | 984 |
986 void Assembler::LoadClassById(Register result, Register class_id) { | 985 void Assembler::LoadClassById(Register result, Register class_id) { |
987 ASSERT(result != class_id); | 986 ASSERT(result != class_id); |
988 LoadIsolate(result); | 987 LoadIsolate(result); |
989 const intptr_t offset = | 988 const intptr_t offset = |
990 Isolate::class_table_offset() + ClassTable::table_offset(); | 989 Isolate::class_table_offset() + ClassTable::table_offset(); |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1283 intptr_t table_offset = | 1282 intptr_t table_offset = |
1284 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1283 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1285 ldr(temp_reg, Address(temp_reg, table_offset)); | 1284 ldr(temp_reg, Address(temp_reg, table_offset)); |
1286 AddImmediate(temp_reg, temp_reg, state_offset); | 1285 AddImmediate(temp_reg, temp_reg, state_offset); |
1287 ldr(temp_reg, Address(temp_reg, 0)); | 1286 ldr(temp_reg, Address(temp_reg, 0)); |
1288 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); | 1287 tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask())); |
1289 b(trace, NE); | 1288 b(trace, NE); |
1290 } | 1289 } |
1291 | 1290 |
1292 | 1291 |
1293 void Assembler::UpdateAllocationStats(intptr_t cid, | 1292 void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) { |
1294 Heap::Space space) { | |
1295 ASSERT(cid > 0); | 1293 ASSERT(cid > 0); |
1296 intptr_t counter_offset = | 1294 intptr_t counter_offset = |
1297 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); | 1295 ClassTable::CounterOffsetFor(cid, space == Heap::kNew); |
1298 LoadIsolate(TMP2); | 1296 LoadIsolate(TMP2); |
1299 intptr_t table_offset = | 1297 intptr_t table_offset = |
1300 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1298 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1301 ldr(TMP, Address(TMP2, table_offset)); | 1299 ldr(TMP, Address(TMP2, table_offset)); |
1302 AddImmediate(TMP2, TMP, counter_offset); | 1300 AddImmediate(TMP2, TMP, counter_offset); |
1303 ldr(TMP, Address(TMP2, 0)); | 1301 ldr(TMP, Address(TMP2, 0)); |
1304 AddImmediate(TMP, TMP, 1); | 1302 AddImmediate(TMP, TMP, 1); |
1305 str(TMP, Address(TMP2, 0)); | 1303 str(TMP, Address(TMP2, 0)); |
1306 } | 1304 } |
1307 | 1305 |
1308 | 1306 |
1309 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, | 1307 void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, |
1310 Register size_reg, | 1308 Register size_reg, |
1311 Heap::Space space) { | 1309 Heap::Space space) { |
1312 ASSERT(cid > 0); | 1310 ASSERT(cid > 0); |
1313 const uword class_offset = ClassTable::ClassOffsetFor(cid); | 1311 const uword class_offset = ClassTable::ClassOffsetFor(cid); |
1314 const uword count_field_offset = (space == Heap::kNew) ? | 1312 const uword count_field_offset = |
1315 ClassHeapStats::allocated_since_gc_new_space_offset() : | 1313 (space == Heap::kNew) |
1316 ClassHeapStats::allocated_since_gc_old_space_offset(); | 1314 ? ClassHeapStats::allocated_since_gc_new_space_offset() |
1317 const uword size_field_offset = (space == Heap::kNew) ? | 1315 : ClassHeapStats::allocated_since_gc_old_space_offset(); |
1318 ClassHeapStats::allocated_size_since_gc_new_space_offset() : | 1316 const uword size_field_offset = |
1319 ClassHeapStats::allocated_size_since_gc_old_space_offset(); | 1317 (space == Heap::kNew) |
| 1318 ? ClassHeapStats::allocated_size_since_gc_new_space_offset() |
| 1319 : ClassHeapStats::allocated_size_since_gc_old_space_offset(); |
1320 LoadIsolate(TMP2); | 1320 LoadIsolate(TMP2); |
1321 intptr_t table_offset = | 1321 intptr_t table_offset = |
1322 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); | 1322 Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid); |
1323 ldr(TMP, Address(TMP2, table_offset)); | 1323 ldr(TMP, Address(TMP2, table_offset)); |
1324 AddImmediate(TMP2, TMP, class_offset); | 1324 AddImmediate(TMP2, TMP, class_offset); |
1325 ldr(TMP, Address(TMP2, count_field_offset)); | 1325 ldr(TMP, Address(TMP2, count_field_offset)); |
1326 AddImmediate(TMP, TMP, 1); | 1326 AddImmediate(TMP, TMP, 1); |
1327 str(TMP, Address(TMP2, count_field_offset)); | 1327 str(TMP, Address(TMP2, count_field_offset)); |
1328 ldr(TMP, Address(TMP2, size_field_offset)); | 1328 ldr(TMP, Address(TMP2, size_field_offset)); |
1329 add(TMP, TMP, Operand(size_reg)); | 1329 add(TMP, TMP, Operand(size_reg)); |
(...skipping 23 matching lines...) Expand all Loading... |
1353 ldr(TMP, Address(temp_reg, Heap::EndOffset(space))); | 1353 ldr(TMP, Address(temp_reg, Heap::EndOffset(space))); |
1354 CompareRegisters(TMP, instance_reg); | 1354 CompareRegisters(TMP, instance_reg); |
1355 // fail if heap end unsigned less than or equal to instance_reg. | 1355 // fail if heap end unsigned less than or equal to instance_reg. |
1356 b(failure, LS); | 1356 b(failure, LS); |
1357 | 1357 |
1358 // Successfully allocated the object, now update top to point to | 1358 // Successfully allocated the object, now update top to point to |
1359 // next object start and store the class in the class field of object. | 1359 // next object start and store the class in the class field of object. |
1360 str(instance_reg, Address(temp_reg, Heap::TopOffset(space))); | 1360 str(instance_reg, Address(temp_reg, Heap::TopOffset(space))); |
1361 | 1361 |
1362 ASSERT(instance_size >= kHeapObjectTag); | 1362 ASSERT(instance_size >= kHeapObjectTag); |
1363 AddImmediate( | 1363 AddImmediate(instance_reg, instance_reg, -instance_size + kHeapObjectTag); |
1364 instance_reg, instance_reg, -instance_size + kHeapObjectTag); | |
1365 NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space)); | 1364 NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space)); |
1366 | 1365 |
1367 uword tags = 0; | 1366 uword tags = 0; |
1368 tags = RawObject::SizeTag::update(instance_size, tags); | 1367 tags = RawObject::SizeTag::update(instance_size, tags); |
1369 ASSERT(cls.id() != kIllegalCid); | 1368 ASSERT(cls.id() != kIllegalCid); |
1370 tags = RawObject::ClassIdTag::update(cls.id(), tags); | 1369 tags = RawObject::ClassIdTag::update(cls.id(), tags); |
1371 LoadImmediate(TMP, tags); | 1370 LoadImmediate(TMP, tags); |
1372 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset()); | 1371 StoreFieldToOffset(TMP, instance_reg, Object::tags_offset()); |
1373 } else { | 1372 } else { |
1374 b(failure); | 1373 b(failure); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1420 b(failure); | 1419 b(failure); |
1421 } | 1420 } |
1422 } | 1421 } |
1423 | 1422 |
1424 | 1423 |
1425 Address Assembler::ElementAddressForIntIndex(bool is_external, | 1424 Address Assembler::ElementAddressForIntIndex(bool is_external, |
1426 intptr_t cid, | 1425 intptr_t cid, |
1427 intptr_t index_scale, | 1426 intptr_t index_scale, |
1428 Register array, | 1427 Register array, |
1429 intptr_t index) const { | 1428 intptr_t index) const { |
1430 const int64_t offset = index * index_scale + | 1429 const int64_t offset = |
| 1430 index * index_scale + |
1431 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 1431 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
1432 ASSERT(Utils::IsInt(32, offset)); | 1432 ASSERT(Utils::IsInt(32, offset)); |
1433 const OperandSize size = Address::OperandSizeFor(cid); | 1433 const OperandSize size = Address::OperandSizeFor(cid); |
1434 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); | 1434 ASSERT(Address::CanHoldOffset(offset, Address::Offset, size)); |
1435 return Address(array, static_cast<int32_t>(offset), Address::Offset, size); | 1435 return Address(array, static_cast<int32_t>(offset), Address::Offset, size); |
1436 } | 1436 } |
1437 | 1437 |
1438 | 1438 |
1439 void Assembler::LoadElementAddressForIntIndex(Register address, | 1439 void Assembler::LoadElementAddressForIntIndex(Register address, |
1440 bool is_external, | 1440 bool is_external, |
1441 intptr_t cid, | 1441 intptr_t cid, |
1442 intptr_t index_scale, | 1442 intptr_t index_scale, |
1443 Register array, | 1443 Register array, |
1444 intptr_t index) { | 1444 intptr_t index) { |
1445 const int64_t offset = index * index_scale + | 1445 const int64_t offset = |
| 1446 index * index_scale + |
1446 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); | 1447 (is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag)); |
1447 AddImmediate(address, array, offset); | 1448 AddImmediate(address, array, offset); |
1448 } | 1449 } |
1449 | 1450 |
1450 | 1451 |
1451 Address Assembler::ElementAddressForRegIndex(bool is_load, | 1452 Address Assembler::ElementAddressForRegIndex(bool is_load, |
1452 bool is_external, | 1453 bool is_external, |
1453 intptr_t cid, | 1454 intptr_t cid, |
1454 intptr_t index_scale, | 1455 intptr_t index_scale, |
1455 Register array, | 1456 Register array, |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1493 add(address, array, Operand(index, ASR, 1)); | 1494 add(address, array, Operand(index, ASR, 1)); |
1494 } else { | 1495 } else { |
1495 add(address, array, Operand(index, LSL, shift)); | 1496 add(address, array, Operand(index, LSL, shift)); |
1496 } | 1497 } |
1497 if (offset != 0) { | 1498 if (offset != 0) { |
1498 AddImmediate(address, address, offset); | 1499 AddImmediate(address, address, offset); |
1499 } | 1500 } |
1500 } | 1501 } |
1501 | 1502 |
1502 | 1503 |
1503 void Assembler::LoadUnaligned(Register dst, Register addr, Register tmp, | 1504 void Assembler::LoadUnaligned(Register dst, |
| 1505 Register addr, |
| 1506 Register tmp, |
1504 OperandSize sz) { | 1507 OperandSize sz) { |
1505 ASSERT(dst != addr); | 1508 ASSERT(dst != addr); |
1506 ldr(dst, Address(addr, 0), kUnsignedByte); | 1509 ldr(dst, Address(addr, 0), kUnsignedByte); |
1507 if (sz == kHalfword) { | 1510 if (sz == kHalfword) { |
1508 ldr(tmp, Address(addr, 1), kByte); | 1511 ldr(tmp, Address(addr, 1), kByte); |
1509 orr(dst, dst, Operand(tmp, LSL, 8)); | 1512 orr(dst, dst, Operand(tmp, LSL, 8)); |
1510 return; | 1513 return; |
1511 } | 1514 } |
1512 ldr(tmp, Address(addr, 1), kUnsignedByte); | 1515 ldr(tmp, Address(addr, 1), kUnsignedByte); |
1513 orr(dst, dst, Operand(tmp, LSL, 8)); | 1516 orr(dst, dst, Operand(tmp, LSL, 8)); |
(...skipping 20 matching lines...) Expand all Loading... |
1534 orr(dst, dst, Operand(tmp, LSL, 48)); | 1537 orr(dst, dst, Operand(tmp, LSL, 48)); |
1535 ldr(tmp, Address(addr, 7), kUnsignedByte); | 1538 ldr(tmp, Address(addr, 7), kUnsignedByte); |
1536 orr(dst, dst, Operand(tmp, LSL, 56)); | 1539 orr(dst, dst, Operand(tmp, LSL, 56)); |
1537 if (sz == kDoubleWord) { | 1540 if (sz == kDoubleWord) { |
1538 return; | 1541 return; |
1539 } | 1542 } |
1540 UNIMPLEMENTED(); | 1543 UNIMPLEMENTED(); |
1541 } | 1544 } |
1542 | 1545 |
1543 | 1546 |
1544 void Assembler::StoreUnaligned(Register src, Register addr, Register tmp, | 1547 void Assembler::StoreUnaligned(Register src, |
| 1548 Register addr, |
| 1549 Register tmp, |
1545 OperandSize sz) { | 1550 OperandSize sz) { |
1546 str(src, Address(addr, 0), kUnsignedByte); | 1551 str(src, Address(addr, 0), kUnsignedByte); |
1547 LsrImmediate(tmp, src, 8); | 1552 LsrImmediate(tmp, src, 8); |
1548 str(tmp, Address(addr, 1), kUnsignedByte); | 1553 str(tmp, Address(addr, 1), kUnsignedByte); |
1549 if ((sz == kHalfword) || (sz == kUnsignedHalfword)) { | 1554 if ((sz == kHalfword) || (sz == kUnsignedHalfword)) { |
1550 return; | 1555 return; |
1551 } | 1556 } |
1552 LsrImmediate(tmp, src, 16); | 1557 LsrImmediate(tmp, src, 16); |
1553 str(tmp, Address(addr, 2), kUnsignedByte); | 1558 str(tmp, Address(addr, 2), kUnsignedByte); |
1554 LsrImmediate(tmp, src, 24); | 1559 LsrImmediate(tmp, src, 24); |
(...skipping 11 matching lines...) Expand all Loading... |
1566 str(tmp, Address(addr, 7), kUnsignedByte); | 1571 str(tmp, Address(addr, 7), kUnsignedByte); |
1567 if (sz == kDoubleWord) { | 1572 if (sz == kDoubleWord) { |
1568 return; | 1573 return; |
1569 } | 1574 } |
1570 UNIMPLEMENTED(); | 1575 UNIMPLEMENTED(); |
1571 } | 1576 } |
1572 | 1577 |
1573 } // namespace dart | 1578 } // namespace dart |
1574 | 1579 |
1575 #endif // defined TARGET_ARCH_ARM64 | 1580 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |