OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/code_generator.h" | 9 #include "vm/code_generator.h" |
10 #include "vm/compiler.h" | 10 #include "vm/compiler.h" |
11 #include "vm/dart_entry.h" | 11 #include "vm/dart_entry.h" |
12 #include "vm/flow_graph_compiler.h" | 12 #include "vm/flow_graph_compiler.h" |
13 #include "vm/heap.h" | 13 #include "vm/heap.h" |
14 #include "vm/instructions.h" | 14 #include "vm/instructions.h" |
15 #include "vm/object_store.h" | 15 #include "vm/object_store.h" |
16 #include "vm/stack_frame.h" | 16 #include "vm/stack_frame.h" |
17 #include "vm/stub_code.h" | 17 #include "vm/stub_code.h" |
18 #include "vm/tags.h" | 18 #include "vm/tags.h" |
19 | 19 |
20 #define __ assembler-> | 20 #define __ assembler-> |
21 | 21 |
22 namespace dart { | 22 namespace dart { |
23 | 23 |
24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); | 24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); |
25 DEFINE_FLAG(bool, use_slow_path, false, | 25 DEFINE_FLAG(bool, |
26 "Set to true for debugging & verifying the slow paths."); | 26 use_slow_path, |
| 27 false, |
| 28 "Set to true for debugging & verifying the slow paths."); |
27 DECLARE_FLAG(bool, trace_optimized_ic_calls); | 29 DECLARE_FLAG(bool, trace_optimized_ic_calls); |
28 | 30 |
29 // Input parameters: | 31 // Input parameters: |
30 // LR : return address. | 32 // LR : return address. |
31 // SP : address of last argument in argument array. | 33 // SP : address of last argument in argument array. |
32 // SP + 8*R4 - 8 : address of first argument in argument array. | 34 // SP + 8*R4 - 8 : address of first argument in argument array. |
33 // SP + 8*R4 : address of return value. | 35 // SP + 8*R4 : address of return value. |
34 // R5 : address of the runtime function to call. | 36 // R5 : address of the runtime function to call. |
35 // R4 : number of arguments to the call. | 37 // R4 : number of arguments to the call. |
36 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | 38 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { |
37 const intptr_t thread_offset = NativeArguments::thread_offset(); | 39 const intptr_t thread_offset = NativeArguments::thread_offset(); |
38 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | 40 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); |
39 const intptr_t argv_offset = NativeArguments::argv_offset(); | 41 const intptr_t argv_offset = NativeArguments::argv_offset(); |
40 const intptr_t retval_offset = NativeArguments::retval_offset(); | 42 const intptr_t retval_offset = NativeArguments::retval_offset(); |
41 | 43 |
42 __ SetPrologueOffset(); | 44 __ SetPrologueOffset(); |
43 __ Comment("CallToRuntimeStub"); | 45 __ Comment("CallToRuntimeStub"); |
44 __ EnterStubFrame(); | 46 __ EnterStubFrame(); |
45 | 47 |
46 // Save exit frame information to enable stack walking as we are about | 48 // Save exit frame information to enable stack walking as we are about |
47 // to transition to Dart VM C++ code. | 49 // to transition to Dart VM C++ code. |
48 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); | 50 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); |
49 | 51 |
50 #if defined(DEBUG) | 52 #if defined(DEBUG) |
51 { Label ok; | 53 { |
| 54 Label ok; |
52 // Check that we are always entering from Dart code. | 55 // Check that we are always entering from Dart code. |
53 __ LoadFromOffset(R8, THR, Thread::vm_tag_offset()); | 56 __ LoadFromOffset(R8, THR, Thread::vm_tag_offset()); |
54 __ CompareImmediate(R8, VMTag::kDartTagId); | 57 __ CompareImmediate(R8, VMTag::kDartTagId); |
55 __ b(&ok, EQ); | 58 __ b(&ok, EQ); |
56 __ Stop("Not coming from Dart code."); | 59 __ Stop("Not coming from Dart code."); |
57 __ Bind(&ok); | 60 __ Bind(&ok); |
58 } | 61 } |
59 #endif | 62 #endif |
60 | 63 |
61 // Mark that the thread is executing VM code. | 64 // Mark that the thread is executing VM code. |
(...skipping 17 matching lines...) Expand all Loading... |
79 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | 82 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. |
80 ASSERT(argc_tag_offset == 1 * kWordSize); | 83 ASSERT(argc_tag_offset == 1 * kWordSize); |
81 __ mov(R1, R4); // Set argc in NativeArguments. | 84 __ mov(R1, R4); // Set argc in NativeArguments. |
82 | 85 |
83 ASSERT(argv_offset == 2 * kWordSize); | 86 ASSERT(argv_offset == 2 * kWordSize); |
84 __ add(R2, ZR, Operand(R4, LSL, 3)); | 87 __ add(R2, ZR, Operand(R4, LSL, 3)); |
85 __ add(R2, FP, Operand(R2)); // Compute argv. | 88 __ add(R2, FP, Operand(R2)); // Compute argv. |
86 // Set argv in NativeArguments. | 89 // Set argv in NativeArguments. |
87 __ AddImmediate(R2, R2, kParamEndSlotFromFp * kWordSize); | 90 __ AddImmediate(R2, R2, kParamEndSlotFromFp * kWordSize); |
88 | 91 |
89 ASSERT(retval_offset == 3 * kWordSize); | 92 ASSERT(retval_offset == 3 * kWordSize); |
90 __ AddImmediate(R3, R2, kWordSize); | 93 __ AddImmediate(R3, R2, kWordSize); |
91 | 94 |
92 __ StoreToOffset(R0, SP, thread_offset); | 95 __ StoreToOffset(R0, SP, thread_offset); |
93 __ StoreToOffset(R1, SP, argc_tag_offset); | 96 __ StoreToOffset(R1, SP, argc_tag_offset); |
94 __ StoreToOffset(R2, SP, argv_offset); | 97 __ StoreToOffset(R2, SP, argv_offset); |
95 __ StoreToOffset(R3, SP, retval_offset); | 98 __ StoreToOffset(R3, SP, retval_offset); |
96 __ mov(R0, SP); // Pass the pointer to the NativeArguments. | 99 __ mov(R0, SP); // Pass the pointer to the NativeArguments. |
97 | 100 |
98 // We are entering runtime code, so the C stack pointer must be restored from | 101 // We are entering runtime code, so the C stack pointer must be restored from |
99 // the stack limit to the top of the stack. We cache the stack limit address | 102 // the stack limit to the top of the stack. We cache the stack limit address |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
145 const intptr_t argv_offset = NativeArguments::argv_offset(); | 148 const intptr_t argv_offset = NativeArguments::argv_offset(); |
146 const intptr_t retval_offset = NativeArguments::retval_offset(); | 149 const intptr_t retval_offset = NativeArguments::retval_offset(); |
147 | 150 |
148 __ EnterStubFrame(); | 151 __ EnterStubFrame(); |
149 | 152 |
150 // Save exit frame information to enable stack walking as we are about | 153 // Save exit frame information to enable stack walking as we are about |
151 // to transition to native code. | 154 // to transition to native code. |
152 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); | 155 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); |
153 | 156 |
154 #if defined(DEBUG) | 157 #if defined(DEBUG) |
155 { Label ok; | 158 { |
| 159 Label ok; |
156 // Check that we are always entering from Dart code. | 160 // Check that we are always entering from Dart code. |
157 __ LoadFromOffset(R6, THR, Thread::vm_tag_offset()); | 161 __ LoadFromOffset(R6, THR, Thread::vm_tag_offset()); |
158 __ CompareImmediate(R6, VMTag::kDartTagId); | 162 __ CompareImmediate(R6, VMTag::kDartTagId); |
159 __ b(&ok, EQ); | 163 __ b(&ok, EQ); |
160 __ Stop("Not coming from Dart code."); | 164 __ Stop("Not coming from Dart code."); |
161 __ Bind(&ok); | 165 __ Bind(&ok); |
162 } | 166 } |
163 #endif | 167 #endif |
164 | 168 |
165 // Mark that the thread is executing native code. | 169 // Mark that the thread is executing native code. |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
238 const intptr_t argv_offset = NativeArguments::argv_offset(); | 242 const intptr_t argv_offset = NativeArguments::argv_offset(); |
239 const intptr_t retval_offset = NativeArguments::retval_offset(); | 243 const intptr_t retval_offset = NativeArguments::retval_offset(); |
240 | 244 |
241 __ EnterStubFrame(); | 245 __ EnterStubFrame(); |
242 | 246 |
243 // Save exit frame information to enable stack walking as we are about | 247 // Save exit frame information to enable stack walking as we are about |
244 // to transition to native code. | 248 // to transition to native code. |
245 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); | 249 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset()); |
246 | 250 |
247 #if defined(DEBUG) | 251 #if defined(DEBUG) |
248 { Label ok; | 252 { |
| 253 Label ok; |
249 // Check that we are always entering from Dart code. | 254 // Check that we are always entering from Dart code. |
250 __ LoadFromOffset(R6, THR, Thread::vm_tag_offset()); | 255 __ LoadFromOffset(R6, THR, Thread::vm_tag_offset()); |
251 __ CompareImmediate(R6, VMTag::kDartTagId); | 256 __ CompareImmediate(R6, VMTag::kDartTagId); |
252 __ b(&ok, EQ); | 257 __ b(&ok, EQ); |
253 __ Stop("Not coming from Dart code."); | 258 __ Stop("Not coming from Dart code."); |
254 __ Bind(&ok); | 259 __ Bind(&ok); |
255 } | 260 } |
256 #endif | 261 #endif |
257 | 262 |
258 // Mark that the thread is executing native code. | 263 // Mark that the thread is executing native code. |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
472 __ str(r, Address(SP, -1 * kWordSize, Address::PreIndex)); | 477 __ str(r, Address(SP, -1 * kWordSize, Address::PreIndex)); |
473 } | 478 } |
474 } | 479 } |
475 | 480 |
476 for (intptr_t reg_idx = kNumberOfVRegisters - 1; reg_idx >= 0; reg_idx--) { | 481 for (intptr_t reg_idx = kNumberOfVRegisters - 1; reg_idx >= 0; reg_idx--) { |
477 VRegister vreg = static_cast<VRegister>(reg_idx); | 482 VRegister vreg = static_cast<VRegister>(reg_idx); |
478 __ PushQuad(vreg); | 483 __ PushQuad(vreg); |
479 } | 484 } |
480 | 485 |
481 __ mov(R0, SP); // Pass address of saved registers block. | 486 __ mov(R0, SP); // Pass address of saved registers block. |
482 bool is_lazy = (kind == kLazyDeoptFromReturn) || | 487 bool is_lazy = |
483 (kind == kLazyDeoptFromThrow); | 488 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); |
484 __ LoadImmediate(R1, is_lazy ? 1 : 0); | 489 __ LoadImmediate(R1, is_lazy ? 1 : 0); |
485 __ ReserveAlignedFrameSpace(0); | 490 __ ReserveAlignedFrameSpace(0); |
486 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | 491 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); |
487 // Result (R0) is stack-size (FP - SP) in bytes. | 492 // Result (R0) is stack-size (FP - SP) in bytes. |
488 | 493 |
489 if (kind == kLazyDeoptFromReturn) { | 494 if (kind == kLazyDeoptFromReturn) { |
490 // Restore result into R1 temporarily. | 495 // Restore result into R1 temporarily. |
491 __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize); | 496 __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize); |
492 } else if (kind == kLazyDeoptFromThrow) { | 497 } else if (kind == kLazyDeoptFromThrow) { |
493 // Restore result into R1 temporarily. | 498 // Restore result into R1 temporarily. |
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
731 __ add(R0, R0, Operand(kHeapObjectTag)); | 736 __ add(R0, R0, Operand(kHeapObjectTag)); |
732 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3, space)); | 737 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3, space)); |
733 | 738 |
734 // R0: new object start as a tagged pointer. | 739 // R0: new object start as a tagged pointer. |
735 // R1: array element type. | 740 // R1: array element type. |
736 // R2: array length as Smi. | 741 // R2: array length as Smi. |
737 // R3: array size. | 742 // R3: array size. |
738 // R7: new object end address. | 743 // R7: new object end address. |
739 | 744 |
740 // Store the type argument field. | 745 // Store the type argument field. |
741 __ StoreIntoObjectOffsetNoBarrier( | 746 __ StoreIntoObjectOffsetNoBarrier(R0, Array::type_arguments_offset(), R1); |
742 R0, Array::type_arguments_offset(), R1); | |
743 | 747 |
744 // Set the length field. | 748 // Set the length field. |
745 __ StoreIntoObjectOffsetNoBarrier(R0, Array::length_offset(), R2); | 749 __ StoreIntoObjectOffsetNoBarrier(R0, Array::length_offset(), R2); |
746 | 750 |
747 // Calculate the size tag. | 751 // Calculate the size tag. |
748 // R0: new object start as a tagged pointer. | 752 // R0: new object start as a tagged pointer. |
749 // R2: array length as Smi. | 753 // R2: array length as Smi. |
750 // R3: array size. | 754 // R3: array size. |
751 // R7: new object end address. | 755 // R7: new object end address. |
752 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 756 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1013 // R0: new object. | 1017 // R0: new object. |
1014 // R1: number of context variables. | 1018 // R1: number of context variables. |
1015 __ LoadObject(R2, Object::null_object()); | 1019 __ LoadObject(R2, Object::null_object()); |
1016 __ StoreFieldToOffset(R2, R0, Context::parent_offset()); | 1020 __ StoreFieldToOffset(R2, R0, Context::parent_offset()); |
1017 | 1021 |
1018 // Initialize the context variables. | 1022 // Initialize the context variables. |
1019 // R0: new object. | 1023 // R0: new object. |
1020 // R1: number of context variables. | 1024 // R1: number of context variables. |
1021 // R2: raw null. | 1025 // R2: raw null. |
1022 Label loop, done; | 1026 Label loop, done; |
1023 __ AddImmediate( | 1027 __ AddImmediate(R3, R0, Context::variable_offset(0) - kHeapObjectTag); |
1024 R3, R0, Context::variable_offset(0) - kHeapObjectTag); | |
1025 __ Bind(&loop); | 1028 __ Bind(&loop); |
1026 __ subs(R1, R1, Operand(1)); | 1029 __ subs(R1, R1, Operand(1)); |
1027 __ b(&done, MI); | 1030 __ b(&done, MI); |
1028 __ str(R2, Address(R3, R1, UXTX, Address::Scaled)); | 1031 __ str(R2, Address(R3, R1, UXTX, Address::Scaled)); |
1029 __ b(&loop, NE); // Loop if R1 not zero. | 1032 __ b(&loop, NE); // Loop if R1 not zero. |
1030 __ Bind(&done); | 1033 __ Bind(&done); |
1031 | 1034 |
1032 // Done allocating and initializing the context. | 1035 // Done allocating and initializing the context. |
1033 // R0: new object. | 1036 // R0: new object. |
1034 __ ret(); | 1037 __ ret(); |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1177 | 1180 |
1178 // R0: raw null. | 1181 // R0: raw null. |
1179 // R2: new object start. | 1182 // R2: new object start. |
1180 // R3: next object start. | 1183 // R3: next object start. |
1181 // R1: new object type arguments (if is_cls_parameterized). | 1184 // R1: new object type arguments (if is_cls_parameterized). |
1182 // First try inlining the initialization without a loop. | 1185 // First try inlining the initialization without a loop. |
1183 if (instance_size < (kInlineInstanceSize * kWordSize)) { | 1186 if (instance_size < (kInlineInstanceSize * kWordSize)) { |
1184 // Check if the object contains any non-header fields. | 1187 // Check if the object contains any non-header fields. |
1185 // Small objects are initialized using a consecutive set of writes. | 1188 // Small objects are initialized using a consecutive set of writes. |
1186 for (intptr_t current_offset = Instance::NextFieldOffset(); | 1189 for (intptr_t current_offset = Instance::NextFieldOffset(); |
1187 current_offset < instance_size; | 1190 current_offset < instance_size; current_offset += kWordSize) { |
1188 current_offset += kWordSize) { | |
1189 __ StoreToOffset(R0, R2, current_offset); | 1191 __ StoreToOffset(R0, R2, current_offset); |
1190 } | 1192 } |
1191 } else { | 1193 } else { |
1192 __ AddImmediate(R4, R2, Instance::NextFieldOffset()); | 1194 __ AddImmediate(R4, R2, Instance::NextFieldOffset()); |
1193 // Loop until the whole object is initialized. | 1195 // Loop until the whole object is initialized. |
1194 // R0: raw null. | 1196 // R0: raw null. |
1195 // R2: new object. | 1197 // R2: new object. |
1196 // R3: next object start. | 1198 // R3: next object start. |
1197 // R4: next word to be initialized. | 1199 // R4: next word to be initialized. |
1198 // R1: new object type arguments (if is_cls_parameterized). | 1200 // R1: new object type arguments (if is_cls_parameterized). |
(...skipping 18 matching lines...) Expand all Loading... |
1217 // R0: new object. | 1219 // R0: new object. |
1218 __ ret(); | 1220 __ ret(); |
1219 | 1221 |
1220 __ Bind(&slow_case); | 1222 __ Bind(&slow_case); |
1221 } | 1223 } |
1222 // If is_cls_parameterized: | 1224 // If is_cls_parameterized: |
1223 // R1: new object type arguments. | 1225 // R1: new object type arguments. |
1224 // Create a stub frame as we are pushing some objects on the stack before | 1226 // Create a stub frame as we are pushing some objects on the stack before |
1225 // calling into the runtime. | 1227 // calling into the runtime. |
1226 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. | 1228 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. |
1227 __ Push(ZR); // Result slot. | 1229 __ Push(ZR); // Result slot. |
1228 __ PushObject(cls); // Push class of object to be allocated. | 1230 __ PushObject(cls); // Push class of object to be allocated. |
1229 if (is_cls_parameterized) { | 1231 if (is_cls_parameterized) { |
1230 // Push type arguments. | 1232 // Push type arguments. |
1231 __ Push(R1); | 1233 __ Push(R1); |
1232 } else { | 1234 } else { |
1233 // Push null type arguments. | 1235 // Push null type arguments. |
1234 __ PushObject(Object::null_object()); | 1236 __ PushObject(Object::null_object()); |
1235 } | 1237 } |
1236 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. | 1238 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. |
1237 __ Drop(2); // Pop arguments. | 1239 __ Drop(2); // Pop arguments. |
1238 __ Pop(R0); // Pop result (newly allocated object). | 1240 __ Pop(R0); // Pop result (newly allocated object). |
1239 // R0: new object | 1241 // R0: new object |
1240 // Restore the frame pointer. | 1242 // Restore the frame pointer. |
1241 __ LeaveStubFrame(); | 1243 __ LeaveStubFrame(); |
1242 __ ret(); | 1244 __ ret(); |
1243 } | 1245 } |
1244 | 1246 |
1245 | 1247 |
1246 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function | 1248 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function |
1247 // from the entry code of a dart function after an error in passed argument | 1249 // from the entry code of a dart function after an error in passed argument |
(...skipping 29 matching lines...) Expand all Loading... |
1277 | 1279 |
1278 // R6: function object. | 1280 // R6: function object. |
1279 // R5: inline cache data object. | 1281 // R5: inline cache data object. |
1280 // Cannot use function object from ICData as it may be the inlined | 1282 // Cannot use function object from ICData as it may be the inlined |
1281 // function and not the top-scope function. | 1283 // function and not the top-scope function. |
1282 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { | 1284 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { |
1283 Register ic_reg = R5; | 1285 Register ic_reg = R5; |
1284 Register func_reg = R6; | 1286 Register func_reg = R6; |
1285 if (FLAG_trace_optimized_ic_calls) { | 1287 if (FLAG_trace_optimized_ic_calls) { |
1286 __ EnterStubFrame(); | 1288 __ EnterStubFrame(); |
1287 __ Push(R6); // Preserve. | 1289 __ Push(R6); // Preserve. |
1288 __ Push(R5); // Preserve. | 1290 __ Push(R5); // Preserve. |
1289 __ Push(ic_reg); // Argument. | 1291 __ Push(ic_reg); // Argument. |
1290 __ Push(func_reg); // Argument. | 1292 __ Push(func_reg); // Argument. |
1291 __ CallRuntime(kTraceICCallRuntimeEntry, 2); | 1293 __ CallRuntime(kTraceICCallRuntimeEntry, 2); |
1292 __ Drop(2); // Discard argument; | 1294 __ Drop(2); // Discard argument; |
1293 __ Pop(R5); // Restore. | 1295 __ Pop(R5); // Restore. |
1294 __ Pop(R6); // Restore. | 1296 __ Pop(R6); // Restore. |
1295 __ LeaveStubFrame(); | 1297 __ LeaveStubFrame(); |
1296 } | 1298 } |
1297 __ LoadFieldFromOffset( | 1299 __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset(), kWord); |
1298 R7, func_reg, Function::usage_counter_offset(), kWord); | |
1299 __ add(R7, R7, Operand(1)); | 1300 __ add(R7, R7, Operand(1)); |
1300 __ StoreFieldToOffset( | 1301 __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset(), kWord); |
1301 R7, func_reg, Function::usage_counter_offset(), kWord); | |
1302 } | 1302 } |
1303 | 1303 |
1304 | 1304 |
1305 // Loads function into 'temp_reg'. | 1305 // Loads function into 'temp_reg'. |
1306 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, | 1306 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, |
1307 Register temp_reg) { | 1307 Register temp_reg) { |
1308 if (FLAG_optimization_counter_threshold >= 0) { | 1308 if (FLAG_optimization_counter_threshold >= 0) { |
1309 Register ic_reg = R5; | 1309 Register ic_reg = R5; |
1310 Register func_reg = temp_reg; | 1310 Register func_reg = temp_reg; |
1311 ASSERT(temp_reg == R6); | 1311 ASSERT(temp_reg == R6); |
1312 __ Comment("Increment function counter"); | 1312 __ Comment("Increment function counter"); |
1313 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::owner_offset()); | 1313 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::owner_offset()); |
1314 __ LoadFieldFromOffset( | 1314 __ LoadFieldFromOffset(R7, func_reg, Function::usage_counter_offset(), |
1315 R7, func_reg, Function::usage_counter_offset(), kWord); | 1315 kWord); |
1316 __ AddImmediate(R7, R7, 1); | 1316 __ AddImmediate(R7, R7, 1); |
1317 __ StoreFieldToOffset( | 1317 __ StoreFieldToOffset(R7, func_reg, Function::usage_counter_offset(), |
1318 R7, func_reg, Function::usage_counter_offset(), kWord); | 1318 kWord); |
1319 } | 1319 } |
1320 } | 1320 } |
1321 | 1321 |
1322 | 1322 |
1323 // Note: R5 must be preserved. | 1323 // Note: R5 must be preserved. |
1324 // Attempt a quick Smi operation for known operations ('kind'). The ICData | 1324 // Attempt a quick Smi operation for known operations ('kind'). The ICData |
1325 // must have been primed with a Smi/Smi check that will be used for counting | 1325 // must have been primed with a Smi/Smi check that will be used for counting |
1326 // the invocations. | 1326 // the invocations. |
1327 static void EmitFastSmiOp(Assembler* assembler, | 1327 static void EmitFastSmiOp(Assembler* assembler, |
1328 Token::Kind kind, | 1328 Token::Kind kind, |
1329 intptr_t num_args, | 1329 intptr_t num_args, |
1330 Label* not_smi_or_overflow) { | 1330 Label* not_smi_or_overflow) { |
1331 __ Comment("Fast Smi op"); | 1331 __ Comment("Fast Smi op"); |
1332 __ ldr(R0, Address(SP, + 0 * kWordSize)); // Right. | 1332 __ ldr(R0, Address(SP, +0 * kWordSize)); // Right. |
1333 __ ldr(R1, Address(SP, + 1 * kWordSize)); // Left. | 1333 __ ldr(R1, Address(SP, +1 * kWordSize)); // Left. |
1334 __ orr(TMP, R0, Operand(R1)); | 1334 __ orr(TMP, R0, Operand(R1)); |
1335 __ tsti(TMP, Immediate(kSmiTagMask)); | 1335 __ tsti(TMP, Immediate(kSmiTagMask)); |
1336 __ b(not_smi_or_overflow, NE); | 1336 __ b(not_smi_or_overflow, NE); |
1337 switch (kind) { | 1337 switch (kind) { |
1338 case Token::kADD: { | 1338 case Token::kADD: { |
1339 __ adds(R0, R1, Operand(R0)); // Adds. | 1339 __ adds(R0, R1, Operand(R0)); // Adds. |
1340 __ b(not_smi_or_overflow, VS); // Branch if overflow. | 1340 __ b(not_smi_or_overflow, VS); // Branch if overflow. |
1341 break; | 1341 break; |
1342 } | 1342 } |
1343 case Token::kSUB: { | 1343 case Token::kSUB: { |
1344 __ subs(R0, R1, Operand(R0)); // Subtract. | 1344 __ subs(R0, R1, Operand(R0)); // Subtract. |
1345 __ b(not_smi_or_overflow, VS); // Branch if overflow. | 1345 __ b(not_smi_or_overflow, VS); // Branch if overflow. |
1346 break; | 1346 break; |
1347 } | 1347 } |
1348 case Token::kEQ: { | 1348 case Token::kEQ: { |
1349 __ CompareRegisters(R0, R1); | 1349 __ CompareRegisters(R0, R1); |
1350 __ LoadObject(R0, Bool::True()); | 1350 __ LoadObject(R0, Bool::True()); |
1351 __ LoadObject(R1, Bool::False()); | 1351 __ LoadObject(R1, Bool::False()); |
1352 __ csel(R0, R1, R0, NE); | 1352 __ csel(R0, R1, R0, NE); |
1353 break; | 1353 break; |
1354 } | 1354 } |
1355 default: UNIMPLEMENTED(); | 1355 default: |
| 1356 UNIMPLEMENTED(); |
1356 } | 1357 } |
1357 | 1358 |
1358 // R5: IC data object (preserved). | 1359 // R5: IC data object (preserved). |
1359 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); | 1360 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); |
1360 // R6: ic_data_array with check entries: classes and target functions. | 1361 // R6: ic_data_array with check entries: classes and target functions. |
1361 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); | 1362 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); |
1362 // R6: points directly to the first ic data array element. | 1363 // R6: points directly to the first ic data array element. |
1363 #if defined(DEBUG) | 1364 #if defined(DEBUG) |
1364 // Check that first entry is for Smi/Smi. | 1365 // Check that first entry is for Smi/Smi. |
1365 Label error, ok; | 1366 Label error, ok; |
1366 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid)); | 1367 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid)); |
1367 __ ldr(R1, Address(R6, 0)); | 1368 __ ldr(R1, Address(R6, 0)); |
1368 __ CompareImmediate(R1, imm_smi_cid); | 1369 __ CompareImmediate(R1, imm_smi_cid); |
1369 __ b(&error, NE); | 1370 __ b(&error, NE); |
1370 __ ldr(R1, Address(R6, kWordSize)); | 1371 __ ldr(R1, Address(R6, kWordSize)); |
1371 __ CompareImmediate(R1, imm_smi_cid); | 1372 __ CompareImmediate(R1, imm_smi_cid); |
1372 __ b(&ok, EQ); | 1373 __ b(&ok, EQ); |
(...skipping 26 matching lines...) Expand all Loading... |
1399 // - Match found -> jump to target. | 1400 // - Match found -> jump to target. |
1400 // - Match not found -> jump to IC miss. | 1401 // - Match not found -> jump to IC miss. |
1401 void StubCode::GenerateNArgsCheckInlineCacheStub( | 1402 void StubCode::GenerateNArgsCheckInlineCacheStub( |
1402 Assembler* assembler, | 1403 Assembler* assembler, |
1403 intptr_t num_args, | 1404 intptr_t num_args, |
1404 const RuntimeEntry& handle_ic_miss, | 1405 const RuntimeEntry& handle_ic_miss, |
1405 Token::Kind kind, | 1406 Token::Kind kind, |
1406 bool optimized) { | 1407 bool optimized) { |
1407 ASSERT(num_args > 0); | 1408 ASSERT(num_args > 0); |
1408 #if defined(DEBUG) | 1409 #if defined(DEBUG) |
1409 { Label ok; | 1410 { |
| 1411 Label ok; |
1410 // Check that the IC data array has NumArgsTested() == num_args. | 1412 // Check that the IC data array has NumArgsTested() == num_args. |
1411 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | 1413 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
1412 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, | 1414 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, |
1413 kUnsignedWord); | 1415 kUnsignedWord); |
1414 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | 1416 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. |
1415 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); | 1417 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); |
1416 __ CompareImmediate(R6, num_args); | 1418 __ CompareImmediate(R6, num_args); |
1417 __ b(&ok, EQ); | 1419 __ b(&ok, EQ); |
1418 __ Stop("Incorrect stub for IC data"); | 1420 __ Stop("Incorrect stub for IC data"); |
1419 __ Bind(&ok); | 1421 __ Bind(&ok); |
1420 } | 1422 } |
1421 #endif // DEBUG | 1423 #endif // DEBUG |
1422 | 1424 |
1423 Label stepping, done_stepping; | 1425 Label stepping, done_stepping; |
1424 if (FLAG_support_debugger && !optimized) { | 1426 if (FLAG_support_debugger && !optimized) { |
1425 __ Comment("Check single stepping"); | 1427 __ Comment("Check single stepping"); |
1426 __ LoadIsolate(R6); | 1428 __ LoadIsolate(R6); |
1427 __ LoadFromOffset( | 1429 __ LoadFromOffset(R6, R6, Isolate::single_step_offset(), kUnsignedByte); |
1428 R6, R6, Isolate::single_step_offset(), kUnsignedByte); | |
1429 __ CompareRegisters(R6, ZR); | 1430 __ CompareRegisters(R6, ZR); |
1430 __ b(&stepping, NE); | 1431 __ b(&stepping, NE); |
1431 __ Bind(&done_stepping); | 1432 __ Bind(&done_stepping); |
1432 } | 1433 } |
1433 | 1434 |
1434 Label not_smi_or_overflow; | 1435 Label not_smi_or_overflow; |
1435 if (kind != Token::kILLEGAL) { | 1436 if (kind != Token::kILLEGAL) { |
1436 EmitFastSmiOp(assembler, | 1437 EmitFastSmiOp(assembler, kind, num_args, ¬_smi_or_overflow); |
1437 kind, | |
1438 num_args, | |
1439 ¬_smi_or_overflow); | |
1440 } | 1438 } |
1441 __ Bind(¬_smi_or_overflow); | 1439 __ Bind(¬_smi_or_overflow); |
1442 | 1440 |
1443 __ Comment("Extract ICData initial values and receiver cid"); | 1441 __ Comment("Extract ICData initial values and receiver cid"); |
1444 // Load arguments descriptor into R4. | 1442 // Load arguments descriptor into R4. |
1445 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset()); | 1443 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset()); |
1446 // Loop that checks if there is an IC data match. | 1444 // Loop that checks if there is an IC data match. |
1447 Label loop, update, test, found; | 1445 Label loop, update, test, found; |
1448 // R5: IC data object (preserved). | 1446 // R5: IC data object (preserved). |
1449 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); | 1447 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1489 } | 1487 } |
1490 __ Bind(&update); | 1488 __ Bind(&update); |
1491 // Reload receiver class ID. It has not been destroyed when num_args == 1. | 1489 // Reload receiver class ID. It has not been destroyed when num_args == 1. |
1492 if (num_args > 1) { | 1490 if (num_args > 1) { |
1493 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled)); | 1491 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled)); |
1494 __ LoadTaggedClassIdMayBeSmi(R0, R0); | 1492 __ LoadTaggedClassIdMayBeSmi(R0, R0); |
1495 } | 1493 } |
1496 | 1494 |
1497 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; | 1495 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; |
1498 __ AddImmediate(R6, R6, entry_size); // Next entry. | 1496 __ AddImmediate(R6, R6, entry_size); // Next entry. |
1499 __ ldr(R1, Address(R6)); // Next class ID. | 1497 __ ldr(R1, Address(R6)); // Next class ID. |
1500 | 1498 |
1501 __ Bind(&test); | 1499 __ Bind(&test); |
1502 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done? | 1500 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done? |
1503 __ b(&loop, NE); | 1501 __ b(&loop, NE); |
1504 | 1502 |
1505 __ Comment("IC miss"); | 1503 __ Comment("IC miss"); |
1506 // Compute address of arguments. | 1504 // Compute address of arguments. |
1507 // R7: argument_count - 1 (untagged). | 1505 // R7: argument_count - 1 (untagged). |
1508 // R7 <- SP + (R7 << 3) | 1506 // R7 <- SP + (R7 << 3) |
1509 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged. | 1507 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged. |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1582 // LR: return address. | 1580 // LR: return address. |
1583 // R5: inline cache data object. | 1581 // R5: inline cache data object. |
1584 // Inline cache data object structure: | 1582 // Inline cache data object structure: |
1585 // 0: function-name | 1583 // 0: function-name |
1586 // 1: N, number of arguments checked. | 1584 // 1: N, number of arguments checked. |
1587 // 2 .. (length - 1): group of checks, each check containing: | 1585 // 2 .. (length - 1): group of checks, each check containing: |
1588 // - N classes. | 1586 // - N classes. |
1589 // - 1 target function. | 1587 // - 1 target function. |
1590 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { | 1588 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { |
1591 GenerateUsageCounterIncrement(assembler, R6); | 1589 GenerateUsageCounterIncrement(assembler, R6); |
1592 GenerateNArgsCheckInlineCacheStub(assembler, 1, | 1590 GenerateNArgsCheckInlineCacheStub( |
1593 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | 1591 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); |
1594 } | 1592 } |
1595 | 1593 |
1596 | 1594 |
1597 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { | 1595 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { |
1598 GenerateUsageCounterIncrement(assembler, R6); | 1596 GenerateUsageCounterIncrement(assembler, R6); |
1599 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1597 GenerateNArgsCheckInlineCacheStub(assembler, 2, |
1600 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | 1598 kInlineCacheMissHandlerTwoArgsRuntimeEntry, |
| 1599 Token::kILLEGAL); |
1601 } | 1600 } |
1602 | 1601 |
1603 | 1602 |
1604 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { | 1603 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { |
1605 GenerateUsageCounterIncrement(assembler, R6); | 1604 GenerateUsageCounterIncrement(assembler, R6); |
1606 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1605 GenerateNArgsCheckInlineCacheStub( |
1607 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); | 1606 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); |
1608 } | 1607 } |
1609 | 1608 |
1610 | 1609 |
1611 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { | 1610 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { |
1612 GenerateUsageCounterIncrement(assembler, R6); | 1611 GenerateUsageCounterIncrement(assembler, R6); |
1613 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1612 GenerateNArgsCheckInlineCacheStub( |
1614 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); | 1613 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); |
1615 } | 1614 } |
1616 | 1615 |
1617 | 1616 |
1618 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { | 1617 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { |
1619 GenerateUsageCounterIncrement(assembler, R6); | 1618 GenerateUsageCounterIncrement(assembler, R6); |
1620 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1619 GenerateNArgsCheckInlineCacheStub( |
1621 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); | 1620 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); |
1622 } | 1621 } |
1623 | 1622 |
1624 | 1623 |
1625 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( | 1624 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( |
1626 Assembler* assembler) { | 1625 Assembler* assembler) { |
1627 GenerateOptimizedUsageCounterIncrement(assembler); | 1626 GenerateOptimizedUsageCounterIncrement(assembler); |
1628 GenerateNArgsCheckInlineCacheStub(assembler, 1, | 1627 GenerateNArgsCheckInlineCacheStub(assembler, 1, |
1629 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, | 1628 kInlineCacheMissHandlerOneArgRuntimeEntry, |
1630 true /* optimized */); | 1629 Token::kILLEGAL, true /* optimized */); |
1631 } | 1630 } |
1632 | 1631 |
1633 | 1632 |
1634 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( | 1633 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( |
1635 Assembler* assembler) { | 1634 Assembler* assembler) { |
1636 GenerateOptimizedUsageCounterIncrement(assembler); | 1635 GenerateOptimizedUsageCounterIncrement(assembler); |
1637 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1636 GenerateNArgsCheckInlineCacheStub(assembler, 2, |
1638 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, | 1637 kInlineCacheMissHandlerTwoArgsRuntimeEntry, |
1639 true /* optimized */); | 1638 Token::kILLEGAL, true /* optimized */); |
1640 } | 1639 } |
1641 | 1640 |
1642 | 1641 |
1643 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 1642 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { |
1644 GenerateUsageCounterIncrement(assembler, R6); | 1643 GenerateUsageCounterIncrement(assembler, R6); |
1645 #if defined(DEBUG) | 1644 #if defined(DEBUG) |
1646 { Label ok; | 1645 { |
| 1646 Label ok; |
1647 // Check that the IC data array has NumArgsTested() == 0. | 1647 // Check that the IC data array has NumArgsTested() == 0. |
1648 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | 1648 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. |
1649 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, | 1649 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, |
1650 kUnsignedWord); | 1650 kUnsignedWord); |
1651 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | 1651 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. |
1652 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); | 1652 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); |
1653 __ CompareImmediate(R6, 0); | 1653 __ CompareImmediate(R6, 0); |
1654 __ b(&ok, EQ); | 1654 __ b(&ok, EQ); |
1655 __ Stop("Incorrect IC data for unoptimized static call"); | 1655 __ Stop("Incorrect IC data for unoptimized static call"); |
1656 __ Bind(&ok); | 1656 __ Bind(&ok); |
1657 } | 1657 } |
1658 #endif // DEBUG | 1658 #endif // DEBUG |
1659 | 1659 |
1660 // Check single stepping. | 1660 // Check single stepping. |
1661 Label stepping, done_stepping; | 1661 Label stepping, done_stepping; |
1662 if (FLAG_support_debugger) { | 1662 if (FLAG_support_debugger) { |
1663 __ LoadIsolate(R6); | 1663 __ LoadIsolate(R6); |
1664 __ LoadFromOffset( | 1664 __ LoadFromOffset(R6, R6, Isolate::single_step_offset(), kUnsignedByte); |
1665 R6, R6, Isolate::single_step_offset(), kUnsignedByte); | |
1666 __ CompareImmediate(R6, 0); | 1665 __ CompareImmediate(R6, 0); |
1667 __ b(&stepping, NE); | 1666 __ b(&stepping, NE); |
1668 __ Bind(&done_stepping); | 1667 __ Bind(&done_stepping); |
1669 } | 1668 } |
1670 | 1669 |
1671 // R5: IC data object (preserved). | 1670 // R5: IC data object (preserved). |
1672 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); | 1671 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset()); |
1673 // R6: ic_data_array with entries: target functions and count. | 1672 // R6: ic_data_array with entries: target functions and count. |
1674 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); | 1673 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); |
1675 // R6: points directly to the first ic data array element. | 1674 // R6: points directly to the first ic data array element. |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1709 | 1708 |
1710 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | 1709 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { |
1711 GenerateUsageCounterIncrement(assembler, R6); | 1710 GenerateUsageCounterIncrement(assembler, R6); |
1712 GenerateNArgsCheckInlineCacheStub( | 1711 GenerateNArgsCheckInlineCacheStub( |
1713 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | 1712 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); |
1714 } | 1713 } |
1715 | 1714 |
1716 | 1715 |
1717 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { | 1716 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { |
1718 GenerateUsageCounterIncrement(assembler, R6); | 1717 GenerateUsageCounterIncrement(assembler, R6); |
1719 GenerateNArgsCheckInlineCacheStub(assembler, 2, | 1718 GenerateNArgsCheckInlineCacheStub( |
1720 kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | 1719 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); |
1721 } | 1720 } |
1722 | 1721 |
1723 | 1722 |
1724 // Stub for compiling a function and jumping to the compiled code. | 1723 // Stub for compiling a function and jumping to the compiled code. |
1725 // R5: IC-Data (for methods). | 1724 // R5: IC-Data (for methods). |
1726 // R4: Arguments descriptor. | 1725 // R4: Arguments descriptor. |
1727 // R0: Function. | 1726 // R0: Function. |
1728 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | 1727 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { |
1729 // Preserve arg desc. and IC data object. | 1728 // Preserve arg desc. and IC data object. |
1730 __ EnterStubFrame(); | 1729 __ EnterStubFrame(); |
(...skipping 30 matching lines...) Expand all Loading... |
1761 __ EnterStubFrame(); | 1760 __ EnterStubFrame(); |
1762 __ Push(ZR); // Space for result. | 1761 __ Push(ZR); // Space for result. |
1763 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | 1762 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); |
1764 __ Pop(CODE_REG); | 1763 __ Pop(CODE_REG); |
1765 __ LeaveStubFrame(); | 1764 __ LeaveStubFrame(); |
1766 __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset()); | 1765 __ LoadFieldFromOffset(R0, CODE_REG, Code::entry_point_offset()); |
1767 __ br(R0); | 1766 __ br(R0); |
1768 } | 1767 } |
1769 | 1768 |
1770 // Called only from unoptimized code. All relevant registers have been saved. | 1769 // Called only from unoptimized code. All relevant registers have been saved. |
1771 void StubCode::GenerateDebugStepCheckStub( | 1770 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { |
1772 Assembler* assembler) { | |
1773 // Check single stepping. | 1771 // Check single stepping. |
1774 Label stepping, done_stepping; | 1772 Label stepping, done_stepping; |
1775 __ LoadIsolate(R1); | 1773 __ LoadIsolate(R1); |
1776 __ LoadFromOffset( | 1774 __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte); |
1777 R1, R1, Isolate::single_step_offset(), kUnsignedByte); | |
1778 __ CompareImmediate(R1, 0); | 1775 __ CompareImmediate(R1, 0); |
1779 __ b(&stepping, NE); | 1776 __ b(&stepping, NE); |
1780 __ Bind(&done_stepping); | 1777 __ Bind(&done_stepping); |
1781 | 1778 |
1782 __ ret(); | 1779 __ ret(); |
1783 | 1780 |
1784 __ Bind(&stepping); | 1781 __ Bind(&stepping); |
1785 __ EnterStubFrame(); | 1782 __ EnterStubFrame(); |
1786 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | 1783 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); |
1787 __ LeaveStubFrame(); | 1784 __ LeaveStubFrame(); |
1788 __ b(&done_stepping); | 1785 __ b(&done_stepping); |
1789 } | 1786 } |
1790 | 1787 |
1791 | 1788 |
1792 // Used to check class and type arguments. Arguments passed in registers: | 1789 // Used to check class and type arguments. Arguments passed in registers: |
1793 // LR: return address. | 1790 // LR: return address. |
1794 // R0: instance (must be preserved). | 1791 // R0: instance (must be preserved). |
1795 // R1: instantiator type arguments or NULL. | 1792 // R1: instantiator type arguments or NULL. |
1796 // R2: cache array. | 1793 // R2: cache array. |
1797 // Result in R1: null -> not found, otherwise result (true or false). | 1794 // Result in R1: null -> not found, otherwise result (true or false). |
1798 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { | 1795 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { |
1799 ASSERT((1 <= n) && (n <= 3)); | 1796 ASSERT((1 <= n) && (n <= 3)); |
1800 if (n > 1) { | 1797 if (n > 1) { |
1801 // Get instance type arguments. | 1798 // Get instance type arguments. |
1802 __ LoadClass(R3, R0); | 1799 __ LoadClass(R3, R0); |
1803 // Compute instance type arguments into R4. | 1800 // Compute instance type arguments into R4. |
1804 Label has_no_type_arguments; | 1801 Label has_no_type_arguments; |
1805 __ LoadObject(R4, Object::null_object()); | 1802 __ LoadObject(R4, Object::null_object()); |
1806 __ LoadFieldFromOffset(R5, R3, | 1803 __ LoadFieldFromOffset( |
1807 Class::type_arguments_field_offset_in_words_offset(), kWord); | 1804 R5, R3, Class::type_arguments_field_offset_in_words_offset(), kWord); |
1808 __ CompareImmediate(R5, Class::kNoTypeArguments); | 1805 __ CompareImmediate(R5, Class::kNoTypeArguments); |
1809 __ b(&has_no_type_arguments, EQ); | 1806 __ b(&has_no_type_arguments, EQ); |
1810 __ add(R5, R0, Operand(R5, LSL, 3)); | 1807 __ add(R5, R0, Operand(R5, LSL, 3)); |
1811 __ LoadFieldFromOffset(R4, R5, 0); | 1808 __ LoadFieldFromOffset(R4, R5, 0); |
1812 __ Bind(&has_no_type_arguments); | 1809 __ Bind(&has_no_type_arguments); |
1813 } | 1810 } |
1814 __ LoadClassId(R3, R0); | 1811 __ LoadClassId(R3, R0); |
1815 // R0: instance. | 1812 // R0: instance. |
1816 // R1: instantiator type arguments or NULL. | 1813 // R1: instantiator type arguments or NULL. |
1817 // R2: SubtypeTestCache. | 1814 // R2: SubtypeTestCache. |
1818 // R3: instance class id. | 1815 // R3: instance class id. |
1819 // R4: instance type arguments (null if none), used only if n > 1. | 1816 // R4: instance type arguments (null if none), used only if n > 1. |
1820 __ LoadFieldFromOffset(R2, R2, SubtypeTestCache::cache_offset()); | 1817 __ LoadFieldFromOffset(R2, R2, SubtypeTestCache::cache_offset()); |
1821 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag); | 1818 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag); |
1822 | 1819 |
1823 Label loop, found, not_found, next_iteration; | 1820 Label loop, found, not_found, next_iteration; |
1824 // R2: entry start. | 1821 // R2: entry start. |
1825 // R3: instance class id. | 1822 // R3: instance class id. |
1826 // R4: instance type arguments. | 1823 // R4: instance type arguments. |
1827 __ SmiTag(R3); | 1824 __ SmiTag(R3); |
1828 __ CompareImmediate(R3, Smi::RawValue(kClosureCid)); | 1825 __ CompareImmediate(R3, Smi::RawValue(kClosureCid)); |
1829 __ b(&loop, NE); | 1826 __ b(&loop, NE); |
1830 __ LoadFieldFromOffset(R3, R0, Closure::function_offset()); | 1827 __ LoadFieldFromOffset(R3, R0, Closure::function_offset()); |
1831 // R3: instance class id as Smi or function. | 1828 // R3: instance class id as Smi or function. |
1832 __ Bind(&loop); | 1829 __ Bind(&loop); |
1833 __ LoadFromOffset( | 1830 __ LoadFromOffset(R5, R2, |
1834 R5, R2, kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction); | 1831 kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction); |
1835 __ CompareObject(R5, Object::null_object()); | 1832 __ CompareObject(R5, Object::null_object()); |
1836 __ b(¬_found, EQ); | 1833 __ b(¬_found, EQ); |
1837 __ CompareRegisters(R5, R3); | 1834 __ CompareRegisters(R5, R3); |
1838 if (n == 1) { | 1835 if (n == 1) { |
1839 __ b(&found, EQ); | 1836 __ b(&found, EQ); |
1840 } else { | 1837 } else { |
1841 __ b(&next_iteration, NE); | 1838 __ b(&next_iteration, NE); |
1842 __ LoadFromOffset( | 1839 __ LoadFromOffset(R5, R2, |
1843 R5, R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments); | 1840 kWordSize * SubtypeTestCache::kInstanceTypeArguments); |
1844 __ CompareRegisters(R5, R4); | 1841 __ CompareRegisters(R5, R4); |
1845 if (n == 2) { | 1842 if (n == 2) { |
1846 __ b(&found, EQ); | 1843 __ b(&found, EQ); |
1847 } else { | 1844 } else { |
1848 __ b(&next_iteration, NE); | 1845 __ b(&next_iteration, NE); |
1849 __ LoadFromOffset(R5, R2, | 1846 __ LoadFromOffset( |
1850 kWordSize * SubtypeTestCache::kInstantiatorTypeArguments); | 1847 R5, R2, kWordSize * SubtypeTestCache::kInstantiatorTypeArguments); |
1851 __ CompareRegisters(R5, R1); | 1848 __ CompareRegisters(R5, R1); |
1852 __ b(&found, EQ); | 1849 __ b(&found, EQ); |
1853 } | 1850 } |
1854 } | 1851 } |
1855 __ Bind(&next_iteration); | 1852 __ Bind(&next_iteration); |
1856 __ AddImmediate( | 1853 __ AddImmediate(R2, R2, kWordSize * SubtypeTestCache::kTestEntryLength); |
1857 R2, R2, kWordSize * SubtypeTestCache::kTestEntryLength); | |
1858 __ b(&loop); | 1854 __ b(&loop); |
1859 // Fall through to not found. | 1855 // Fall through to not found. |
1860 __ Bind(¬_found); | 1856 __ Bind(¬_found); |
1861 __ LoadObject(R1, Object::null_object()); | 1857 __ LoadObject(R1, Object::null_object()); |
1862 __ ret(); | 1858 __ ret(); |
1863 | 1859 |
1864 __ Bind(&found); | 1860 __ Bind(&found); |
1865 __ LoadFromOffset(R1, R2, kWordSize * SubtypeTestCache::kTestResult); | 1861 __ LoadFromOffset(R1, R2, kWordSize * SubtypeTestCache::kTestResult); |
1866 __ ret(); | 1862 __ ret(); |
1867 } | 1863 } |
(...skipping 433 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2301 } | 2297 } |
2302 | 2298 |
2303 | 2299 |
2304 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { | 2300 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { |
2305 __ brk(0); | 2301 __ brk(0); |
2306 } | 2302 } |
2307 | 2303 |
2308 } // namespace dart | 2304 } // namespace dart |
2309 | 2305 |
2310 #endif // defined TARGET_ARCH_ARM64 | 2306 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |