Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(229)

Side by Side Diff: runtime/vm/stub_code_arm.cc

Issue 1419223003: Re-assign registers on ARM so PP and CODE_REG are below R7 (FP on iOS). (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/runtime_entry_arm.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 17 matching lines...) Expand all
28 DECLARE_FLAG(bool, trace_optimized_ic_calls); 28 DECLARE_FLAG(bool, trace_optimized_ic_calls);
29 DECLARE_FLAG(int, optimization_counter_threshold); 29 DECLARE_FLAG(int, optimization_counter_threshold);
30 DECLARE_FLAG(bool, support_debugger); 30 DECLARE_FLAG(bool, support_debugger);
31 DECLARE_FLAG(bool, lazy_dispatchers); 31 DECLARE_FLAG(bool, lazy_dispatchers);
32 32
33 // Input parameters: 33 // Input parameters:
34 // LR : return address. 34 // LR : return address.
35 // SP : address of last argument in argument array. 35 // SP : address of last argument in argument array.
36 // SP + 4*R4 - 4 : address of first argument in argument array. 36 // SP + 4*R4 - 4 : address of first argument in argument array.
37 // SP + 4*R4 : address of return value. 37 // SP + 4*R4 : address of return value.
38 // R5 : address of the runtime function to call. 38 // R9 : address of the runtime function to call.
39 // R4 : number of arguments to the call. 39 // R4 : number of arguments to the call.
40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { 40 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) {
41 const intptr_t thread_offset = NativeArguments::thread_offset(); 41 const intptr_t thread_offset = NativeArguments::thread_offset();
42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 42 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
43 const intptr_t argv_offset = NativeArguments::argv_offset(); 43 const intptr_t argv_offset = NativeArguments::argv_offset();
44 const intptr_t retval_offset = NativeArguments::retval_offset(); 44 const intptr_t retval_offset = NativeArguments::retval_offset();
45 45
46 __ EnterStubFrame(); 46 __ EnterStubFrame();
47 47
48 // Save exit frame information to enable stack walking as we are about 48 // Save exit frame information to enable stack walking as we are about
49 // to transition to Dart VM C++ code. 49 // to transition to Dart VM C++ code.
50 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 50 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
51 51
52 #if defined(DEBUG) 52 #if defined(DEBUG)
53 { Label ok; 53 { Label ok;
54 // Check that we are always entering from Dart code. 54 // Check that we are always entering from Dart code.
55 __ LoadFromOffset(kWord, R6, THR, Thread::vm_tag_offset()); 55 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
56 __ CompareImmediate(R6, VMTag::kDartTagId); 56 __ CompareImmediate(R8, VMTag::kDartTagId);
57 __ b(&ok, EQ); 57 __ b(&ok, EQ);
58 __ Stop("Not coming from Dart code."); 58 __ Stop("Not coming from Dart code.");
59 __ Bind(&ok); 59 __ Bind(&ok);
60 } 60 }
61 #endif 61 #endif
62 62
63 // Mark that the thread is executing VM code. 63 // Mark that the thread is executing VM code.
64 __ StoreToOffset(kWord, R5, THR, Thread::vm_tag_offset()); 64 __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
65 65
66 // Reserve space for arguments and align frame before entering C++ world. 66 // Reserve space for arguments and align frame before entering C++ world.
67 // NativeArguments are passed in registers. 67 // NativeArguments are passed in registers.
68 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); 68 ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
69 __ ReserveAlignedFrameSpace(0); 69 __ ReserveAlignedFrameSpace(0);
70 70
71 // Pass NativeArguments structure by value and call runtime. 71 // Pass NativeArguments structure by value and call runtime.
72 // Registers R0, R1, R2, and R3 are used. 72 // Registers R0, R1, R2, and R3 are used.
73 73
74 ASSERT(thread_offset == 0 * kWordSize); 74 ASSERT(thread_offset == 0 * kWordSize);
75 // Set thread in NativeArgs. 75 // Set thread in NativeArgs.
76 __ mov(R0, Operand(THR)); 76 __ mov(R0, Operand(THR));
77 77
78 // There are no runtime calls to closures, so we do not need to set the tag 78 // There are no runtime calls to closures, so we do not need to set the tag
79 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 79 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
80 ASSERT(argc_tag_offset == 1 * kWordSize); 80 ASSERT(argc_tag_offset == 1 * kWordSize);
81 __ mov(R1, Operand(R4)); // Set argc in NativeArguments. 81 __ mov(R1, Operand(R4)); // Set argc in NativeArguments.
82 82
83 ASSERT(argv_offset == 2 * kWordSize); 83 ASSERT(argv_offset == 2 * kWordSize);
84 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv. 84 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
85 // Set argv in NativeArguments. 85 // Set argv in NativeArguments.
86 __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize); 86 __ AddImmediate(R2, kParamEndSlotFromFp * kWordSize);
87 87
88 ASSERT(retval_offset == 3 * kWordSize); 88 ASSERT(retval_offset == 3 * kWordSize);
89 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument. 89 __ add(R3, R2, Operand(kWordSize)); // Retval is next to 1st argument.
90 90
91 // Call runtime or redirection via simulator. 91 // Call runtime or redirection via simulator.
92 __ blx(R5); 92 __ blx(R9);
93 93
94 // Mark that the thread is executing Dart code. 94 // Mark that the thread is executing Dart code.
95 __ LoadImmediate(R2, VMTag::kDartTagId); 95 __ LoadImmediate(R2, VMTag::kDartTagId);
96 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 96 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
97 97
98 // Reset exit frame information in Isolate structure. 98 // Reset exit frame information in Isolate structure.
99 __ LoadImmediate(R2, 0); 99 __ LoadImmediate(R2, 0);
100 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 100 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
101 101
102 __ LeaveStubFrame(); 102 __ LeaveStubFrame();
(...skipping 16 matching lines...) Expand all
119 // Call the runtime leaf function. R0 already contains the parameter. 119 // Call the runtime leaf function. R0 already contains the parameter.
120 __ CallRuntime(kPrintStopMessageRuntimeEntry, 1); 120 __ CallRuntime(kPrintStopMessageRuntimeEntry, 1);
121 __ LeaveCallRuntimeFrame(); 121 __ LeaveCallRuntimeFrame();
122 __ Ret(); 122 __ Ret();
123 } 123 }
124 124
125 125
126 // Input parameters: 126 // Input parameters:
127 // LR : return address. 127 // LR : return address.
128 // SP : address of return value. 128 // SP : address of return value.
129 // R5 : address of the native function to call. 129 // R9 : address of the native function to call.
130 // R2 : address of first argument in argument array. 130 // R2 : address of first argument in argument array.
131 // R1 : argc_tag including number of arguments and function kind. 131 // R1 : argc_tag including number of arguments and function kind.
132 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) { 132 void StubCode::GenerateCallNativeCFunctionStub(Assembler* assembler) {
133 const intptr_t thread_offset = NativeArguments::thread_offset(); 133 const intptr_t thread_offset = NativeArguments::thread_offset();
134 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 134 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
135 const intptr_t argv_offset = NativeArguments::argv_offset(); 135 const intptr_t argv_offset = NativeArguments::argv_offset();
136 const intptr_t retval_offset = NativeArguments::retval_offset(); 136 const intptr_t retval_offset = NativeArguments::retval_offset();
137 137
138 __ EnterStubFrame(); 138 __ EnterStubFrame();
139 139
140 // Save exit frame information to enable stack walking as we are about 140 // Save exit frame information to enable stack walking as we are about
141 // to transition to native code. 141 // to transition to native code.
142 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 142 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
143 143
144 #if defined(DEBUG) 144 #if defined(DEBUG)
145 { Label ok; 145 { Label ok;
146 // Check that we are always entering from Dart code. 146 // Check that we are always entering from Dart code.
147 __ LoadFromOffset(kWord, R6, THR, Thread::vm_tag_offset()); 147 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
148 __ CompareImmediate(R6, VMTag::kDartTagId); 148 __ CompareImmediate(R8, VMTag::kDartTagId);
149 __ b(&ok, EQ); 149 __ b(&ok, EQ);
150 __ Stop("Not coming from Dart code."); 150 __ Stop("Not coming from Dart code.");
151 __ Bind(&ok); 151 __ Bind(&ok);
152 } 152 }
153 #endif 153 #endif
154 154
155 // Mark that the thread is executing native code. 155 // Mark that the thread is executing native code.
156 __ StoreToOffset(kWord, R5, THR, Thread::vm_tag_offset()); 156 __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
157 157
158 // Reserve space for the native arguments structure passed on the stack (the 158 // Reserve space for the native arguments structure passed on the stack (the
159 // outgoing pointer parameter to the native arguments structure is passed in 159 // outgoing pointer parameter to the native arguments structure is passed in
160 // R0) and align frame before entering the C++ world. 160 // R0) and align frame before entering the C++ world.
161 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 161 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
162 162
163 // Initialize NativeArguments structure and call native function. 163 // Initialize NativeArguments structure and call native function.
164 // Registers R0, R1, R2, and R3 are used. 164 // Registers R0, R1, R2, and R3 are used.
165 165
166 ASSERT(thread_offset == 0 * kWordSize); 166 ASSERT(thread_offset == 0 * kWordSize);
(...skipping 11 matching lines...) Expand all
178 ASSERT(retval_offset == 3 * kWordSize); 178 ASSERT(retval_offset == 3 * kWordSize);
179 // Set retval in NativeArgs. 179 // Set retval in NativeArgs.
180 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); 180 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
181 181
182 // Passing the structure by value as in runtime calls would require changing 182 // Passing the structure by value as in runtime calls would require changing
183 // Dart API for native functions. 183 // Dart API for native functions.
184 // For now, space is reserved on the stack and we pass a pointer to it. 184 // For now, space is reserved on the stack and we pass a pointer to it.
185 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 185 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
186 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 186 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
187 187
188 __ mov(R1, Operand(R5)); // Pass the function entrypoint to call. 188 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
189 189
190 // Call native function invocation wrapper or redirection via simulator. 190 // Call native function invocation wrapper or redirection via simulator.
191 __ ldr(LR, Address(THR, Thread::native_call_wrapper_entry_point_offset())); 191 __ ldr(LR, Address(THR, Thread::native_call_wrapper_entry_point_offset()));
192 __ blx(LR); 192 __ blx(LR);
193 193
194 // Mark that the thread is executing Dart code. 194 // Mark that the thread is executing Dart code.
195 __ LoadImmediate(R2, VMTag::kDartTagId); 195 __ LoadImmediate(R2, VMTag::kDartTagId);
196 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 196 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
197 197
198 // Reset exit frame information in Isolate structure. 198 // Reset exit frame information in Isolate structure.
199 __ LoadImmediate(R2, 0); 199 __ LoadImmediate(R2, 0);
200 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 200 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
201 201
202 __ LeaveStubFrame(); 202 __ LeaveStubFrame();
203 __ Ret(); 203 __ Ret();
204 } 204 }
205 205
206 206
207 // Input parameters: 207 // Input parameters:
208 // LR : return address. 208 // LR : return address.
209 // SP : address of return value. 209 // SP : address of return value.
210 // R5 : address of the native function to call. 210 // R9 : address of the native function to call.
211 // R2 : address of first argument in argument array. 211 // R2 : address of first argument in argument array.
212 // R1 : argc_tag including number of arguments and function kind. 212 // R1 : argc_tag including number of arguments and function kind.
213 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { 213 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
214 const intptr_t thread_offset = NativeArguments::thread_offset(); 214 const intptr_t thread_offset = NativeArguments::thread_offset();
215 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 215 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
216 const intptr_t argv_offset = NativeArguments::argv_offset(); 216 const intptr_t argv_offset = NativeArguments::argv_offset();
217 const intptr_t retval_offset = NativeArguments::retval_offset(); 217 const intptr_t retval_offset = NativeArguments::retval_offset();
218 218
219 __ EnterStubFrame(); 219 __ EnterStubFrame();
220 220
221 // Save exit frame information to enable stack walking as we are about 221 // Save exit frame information to enable stack walking as we are about
222 // to transition to native code. 222 // to transition to native code.
223 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset()); 223 __ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
224 224
225 #if defined(DEBUG) 225 #if defined(DEBUG)
226 { Label ok; 226 { Label ok;
227 // Check that we are always entering from Dart code. 227 // Check that we are always entering from Dart code.
228 __ LoadFromOffset(kWord, R6, THR, Thread::vm_tag_offset()); 228 __ LoadFromOffset(kWord, R8, THR, Thread::vm_tag_offset());
229 __ CompareImmediate(R6, VMTag::kDartTagId); 229 __ CompareImmediate(R8, VMTag::kDartTagId);
230 __ b(&ok, EQ); 230 __ b(&ok, EQ);
231 __ Stop("Not coming from Dart code."); 231 __ Stop("Not coming from Dart code.");
232 __ Bind(&ok); 232 __ Bind(&ok);
233 } 233 }
234 #endif 234 #endif
235 235
236 // Mark that the thread is executing native code. 236 // Mark that the thread is executing native code.
237 __ StoreToOffset(kWord, R5, THR, Thread::vm_tag_offset()); 237 __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
238 238
239 // Reserve space for the native arguments structure passed on the stack (the 239 // Reserve space for the native arguments structure passed on the stack (the
240 // outgoing pointer parameter to the native arguments structure is passed in 240 // outgoing pointer parameter to the native arguments structure is passed in
241 // R0) and align frame before entering the C++ world. 241 // R0) and align frame before entering the C++ world.
242 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 242 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
243 243
244 // Initialize NativeArguments structure and call native function. 244 // Initialize NativeArguments structure and call native function.
245 // Registers R0, R1, R2, and R3 are used. 245 // Registers R0, R1, R2, and R3 are used.
246 246
247 ASSERT(thread_offset == 0 * kWordSize); 247 ASSERT(thread_offset == 0 * kWordSize);
(...skipping 12 matching lines...) Expand all
260 // Set retval in NativeArgs. 260 // Set retval in NativeArgs.
261 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize)); 261 __ add(R3, FP, Operand(kCallerSpSlotFromFp * kWordSize));
262 262
263 // Passing the structure by value as in runtime calls would require changing 263 // Passing the structure by value as in runtime calls would require changing
264 // Dart API for native functions. 264 // Dart API for native functions.
265 // For now, space is reserved on the stack and we pass a pointer to it. 265 // For now, space is reserved on the stack and we pass a pointer to it.
266 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3)); 266 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
267 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments. 267 __ mov(R0, Operand(SP)); // Pass the pointer to the NativeArguments.
268 268
269 // Call native function or redirection via simulator. 269 // Call native function or redirection via simulator.
270 __ blx(R5); 270 __ blx(R9);
271 271
272 // Mark that the thread is executing Dart code. 272 // Mark that the thread is executing Dart code.
273 __ LoadImmediate(R2, VMTag::kDartTagId); 273 __ LoadImmediate(R2, VMTag::kDartTagId);
274 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 274 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
275 275
276 // Reset exit frame information in Isolate structure. 276 // Reset exit frame information in Isolate structure.
277 __ LoadImmediate(R2, 0); 277 __ LoadImmediate(R2, 0);
278 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 278 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
279 279
280 __ LeaveStubFrame(); 280 __ LeaveStubFrame();
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
530 Label* call_target_function) { 530 Label* call_target_function) {
531 __ Comment("NoSuchMethodDispatch"); 531 __ Comment("NoSuchMethodDispatch");
532 // When lazily generated invocation dispatchers are disabled, the 532 // When lazily generated invocation dispatchers are disabled, the
533 // miss-handler may return null. 533 // miss-handler may return null.
534 __ CompareObject(R0, Object::null_object()); 534 __ CompareObject(R0, Object::null_object());
535 __ b(call_target_function, NE); 535 __ b(call_target_function, NE);
536 __ EnterStubFrame(); 536 __ EnterStubFrame();
537 // Load the receiver. 537 // Load the receiver.
538 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 538 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
539 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi. 539 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
540 __ ldr(R6, Address(IP, kParamEndSlotFromFp * kWordSize)); 540 __ ldr(R8, Address(IP, kParamEndSlotFromFp * kWordSize));
541 __ PushObject(Object::null_object()); 541 __ PushObject(Object::null_object());
542 __ Push(R6); 542 __ Push(R8);
543 __ Push(R5); 543 __ Push(R9);
544 __ Push(R4); 544 __ Push(R4);
545 // R2: Smi-tagged arguments array length. 545 // R2: Smi-tagged arguments array length.
546 PushArgumentsArray(assembler); 546 PushArgumentsArray(assembler);
547 const intptr_t kNumArgs = 4; 547 const intptr_t kNumArgs = 4;
548 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs); 548 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs);
549 __ Drop(4); 549 __ Drop(4);
550 __ Pop(R0); // Return value. 550 __ Pop(R0); // Return value.
551 __ LeaveStubFrame(); 551 __ LeaveStubFrame();
552 __ Ret(); 552 __ Ret();
553 } 553 }
554 554
555 555
556 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { 556 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
557 __ EnterStubFrame(); 557 __ EnterStubFrame();
558 558
559 // Load the receiver. 559 // Load the receiver.
560 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 560 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
561 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi. 561 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
562 __ ldr(R6, Address(IP, kParamEndSlotFromFp * kWordSize)); 562 __ ldr(R8, Address(IP, kParamEndSlotFromFp * kWordSize));
563 563
564 // Preserve IC data and arguments descriptor. 564 // Preserve IC data and arguments descriptor.
565 __ PushList((1 << R4) | (1 << R5)); 565 __ PushList((1 << R4) | (1 << R9));
566 566
567 // Push space for the return value.
568 // Push the receiver.
569 // Push IC data object.
570 // Push arguments descriptor array.
571 __ LoadObject(IP, Object::null_object()); 567 __ LoadObject(IP, Object::null_object());
572 __ PushList((1 << R4) | (1 << R5) | (1 << R6) | (1 << IP)); 568 __ Push(IP); // result
569 __ Push(R8); // receiver
570 __ Push(R9); // ICData
571 __ Push(R4); // arguments descriptor
573 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); 572 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3);
574 // Remove arguments. 573 // Remove arguments.
575 __ Drop(3); 574 __ Drop(3);
576 __ Pop(R0); // Get result into R0 (target function). 575 __ Pop(R0); // Get result into R0 (target function).
577 576
578 // Restore IC data and arguments descriptor. 577 // Restore IC data and arguments descriptor.
579 __ PopList((1 << R4) | (1 << R5)); 578 __ PopList((1 << R4) | (1 << R9));
580 579
581 __ RestoreCodePointer(); 580 __ RestoreCodePointer();
582 __ LeaveStubFrame(); 581 __ LeaveStubFrame();
583 582
584 if (!FLAG_lazy_dispatchers) { 583 if (!FLAG_lazy_dispatchers) {
585 Label call_target_function; 584 Label call_target_function;
586 GenerateDispatcherCode(assembler, &call_target_function); 585 GenerateDispatcherCode(assembler, &call_target_function);
587 __ Bind(&call_target_function); 586 __ Bind(&call_target_function);
588 } 587 }
589 588
(...skipping 30 matching lines...) Expand all
620 const intptr_t max_len = 619 const intptr_t max_len =
621 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); 620 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements));
622 __ CompareImmediate(R3, max_len); 621 __ CompareImmediate(R3, max_len);
623 __ b(&slow_case, GT); 622 __ b(&slow_case, GT);
624 623
625 const intptr_t cid = kArrayCid; 624 const intptr_t cid = kArrayCid;
626 __ MaybeTraceAllocation(cid, R4, &slow_case, 625 __ MaybeTraceAllocation(cid, R4, &slow_case,
627 /* inline_isolate = */ false); 626 /* inline_isolate = */ false);
628 627
629 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 628 const intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
630 __ LoadImmediate(R5, fixed_size); 629 __ LoadImmediate(R9, fixed_size);
631 __ add(R5, R5, Operand(R3, LSL, 1)); // R3 is a Smi. 630 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
632 ASSERT(kSmiTagShift == 1); 631 ASSERT(kSmiTagShift == 1);
633 __ bic(R5, R5, Operand(kObjectAlignment - 1)); 632 __ bic(R9, R9, Operand(kObjectAlignment - 1));
634 633
635 // R5: Allocation size. 634 // R9: Allocation size.
636 Heap::Space space = Heap::SpaceForAllocation(cid); 635 Heap::Space space = Heap::SpaceForAllocation(cid);
637 __ LoadIsolate(R6); 636 __ LoadIsolate(R8);
638 __ ldr(R6, Address(R6, Isolate::heap_offset())); 637 __ ldr(R8, Address(R8, Isolate::heap_offset()));
639 // Potential new object start. 638 // Potential new object start.
640 __ ldr(R0, Address(R6, Heap::TopOffset(space))); 639 __ ldr(R0, Address(R8, Heap::TopOffset(space)));
641 __ adds(R7, R0, Operand(R5)); // Potential next object start. 640 __ adds(R7, R0, Operand(R9)); // Potential next object start.
642 __ b(&slow_case, CS); // Branch if unsigned overflow. 641 __ b(&slow_case, CS); // Branch if unsigned overflow.
643 642
644 // Check if the allocation fits into the remaining space. 643 // Check if the allocation fits into the remaining space.
645 // R0: potential new object start. 644 // R0: potential new object start.
646 // R7: potential next object start. 645 // R7: potential next object start.
647 // R5: allocation size. 646 // R9: allocation size.
648 __ ldr(R3, Address(R6, Heap::EndOffset(space))); 647 __ ldr(R3, Address(R8, Heap::EndOffset(space)));
649 __ cmp(R7, Operand(R3)); 648 __ cmp(R7, Operand(R3));
650 __ b(&slow_case, CS); 649 __ b(&slow_case, CS);
651 650
652 // Successfully allocated the object(s), now update top to point to 651 // Successfully allocated the object(s), now update top to point to
653 // next object start and initialize the object. 652 // next object start and initialize the object.
654 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false); 653 __ LoadAllocationStatsAddress(R3, cid, /* inline_isolate = */ false);
655 __ str(R7, Address(R6, Heap::TopOffset(space))); 654 __ str(R7, Address(R8, Heap::TopOffset(space)));
656 __ add(R0, R0, Operand(kHeapObjectTag)); 655 __ add(R0, R0, Operand(kHeapObjectTag));
657 656
658 // Initialize the tags. 657 // Initialize the tags.
659 // R0: new object start as a tagged pointer. 658 // R0: new object start as a tagged pointer.
660 // R3: allocation stats address. 659 // R3: allocation stats address.
661 // R7: new object end address. 660 // R7: new object end address.
662 // R5: allocation size. 661 // R9: allocation size.
663 { 662 {
664 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 663 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
665 664
666 __ CompareImmediate(R5, RawObject::SizeTag::kMaxSizeTag); 665 __ CompareImmediate(R9, RawObject::SizeTag::kMaxSizeTag);
667 __ mov(R6, Operand(R5, LSL, shift), LS); 666 __ mov(R8, Operand(R9, LSL, shift), LS);
668 __ mov(R6, Operand(0), HI); 667 __ mov(R8, Operand(0), HI);
669 668
670 // Get the class index and insert it into the tags. 669 // Get the class index and insert it into the tags.
671 // R6: size and bit tags. 670 // R8: size and bit tags.
672 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); 671 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
673 __ orr(R6, R6, Operand(TMP)); 672 __ orr(R8, R8, Operand(TMP));
674 __ str(R6, FieldAddress(R0, Array::tags_offset())); // Store tags. 673 __ str(R8, FieldAddress(R0, Array::tags_offset())); // Store tags.
675 } 674 }
676 675
677 // R0: new object start as a tagged pointer. 676 // R0: new object start as a tagged pointer.
678 // R7: new object end address. 677 // R7: new object end address.
679 // Store the type argument field. 678 // Store the type argument field.
680 __ InitializeFieldNoBarrier(R0, 679 __ InitializeFieldNoBarrier(R0,
681 FieldAddress(R0, Array::type_arguments_offset()), 680 FieldAddress(R0, Array::type_arguments_offset()),
682 R1); 681 R1);
683 682
684 // Set the length field. 683 // Set the length field.
685 __ InitializeFieldNoBarrier(R0, 684 __ InitializeFieldNoBarrier(R0,
686 FieldAddress(R0, Array::length_offset()), 685 FieldAddress(R0, Array::length_offset()),
687 R2); 686 R2);
688 687
689 // Initialize all array elements to raw_null. 688 // Initialize all array elements to raw_null.
690 // R0: new object start as a tagged pointer. 689 // R0: new object start as a tagged pointer.
691 // R3: allocation stats address. 690 // R3: allocation stats address.
692 // R4, R5: null 691 // R8, R9: null
693 // R6: iterator which initially points to the start of the variable 692 // R4: iterator which initially points to the start of the variable
694 // data area to be initialized. 693 // data area to be initialized.
695 // R7: new object end address. 694 // R7: new object end address.
696 // R5: allocation size. 695 // R9: allocation size.
697 __ IncrementAllocationStatsWithSize(R3, R5, space); 696 __ IncrementAllocationStatsWithSize(R3, R9, space);
698 697
699 __ LoadObject(R4, Object::null_object()); 698 __ LoadObject(R8, Object::null_object());
700 __ mov(R5, Operand(R4)); 699 __ mov(R9, Operand(R8));
701 __ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag); 700 __ AddImmediate(R4, R0, sizeof(RawArray) - kHeapObjectTag);
702 __ InitializeFieldsNoBarrier(R0, R6, R7, R4, R5); 701 __ InitializeFieldsNoBarrier(R0, R4, R7, R8, R9);
703 __ Ret(); // Returns the newly allocated object in R0. 702 __ Ret(); // Returns the newly allocated object in R0.
704 // Unable to allocate the array using the fast inline code, just call 703 // Unable to allocate the array using the fast inline code, just call
705 // into the runtime. 704 // into the runtime.
706 __ Bind(&slow_case); 705 __ Bind(&slow_case);
707 706
708 // Create a stub frame as we are pushing some objects on the stack before 707 // Create a stub frame as we are pushing some objects on the stack before
709 // calling into the runtime. 708 // calling into the runtime.
710 __ EnterStubFrame(); 709 __ EnterStubFrame();
711 __ LoadObject(IP, Object::null_object()); 710 __ LoadObject(IP, Object::null_object());
712 // Setup space on stack for return value. 711 // Setup space on stack for return value.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
747 } else { 746 } else {
748 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize)); 747 __ sub(SP, SP, Operand(kAbiPreservedFpuRegCount * kFpuRegisterSize));
749 } 748 }
750 749
751 // Set up THR, which caches the current thread in Dart code. 750 // Set up THR, which caches the current thread in Dart code.
752 if (THR != R3) { 751 if (THR != R3) {
753 __ mov(THR, Operand(R3)); 752 __ mov(THR, Operand(R3));
754 } 753 }
755 754
756 // Save the current VMTag on the stack. 755 // Save the current VMTag on the stack.
757 __ LoadFromOffset(kWord, R5, THR, Thread::vm_tag_offset()); 756 __ LoadFromOffset(kWord, R9, THR, Thread::vm_tag_offset());
758 __ Push(R5); 757 __ Push(R9);
759 758
760 // Mark that the thread is executing Dart code. 759 // Mark that the thread is executing Dart code.
761 __ LoadImmediate(R5, VMTag::kDartTagId); 760 __ LoadImmediate(R9, VMTag::kDartTagId);
762 __ StoreToOffset(kWord, R5, THR, Thread::vm_tag_offset()); 761 __ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
763 762
764 // Save top resource and top exit frame info. Use R4-6 as temporary registers. 763 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
765 // StackFrameIterator reads the top exit frame info saved in this frame. 764 // StackFrameIterator reads the top exit frame info saved in this frame.
766 __ LoadFromOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 765 __ LoadFromOffset(kWord, R9, THR, Thread::top_exit_frame_info_offset());
767 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset()); 766 __ LoadFromOffset(kWord, R4, THR, Thread::top_resource_offset());
768 __ LoadImmediate(R6, 0); 767 __ LoadImmediate(R8, 0);
769 __ StoreToOffset(kWord, R6, THR, Thread::top_resource_offset()); 768 __ StoreToOffset(kWord, R8, THR, Thread::top_resource_offset());
770 __ StoreToOffset(kWord, R6, THR, Thread::top_exit_frame_info_offset()); 769 __ StoreToOffset(kWord, R8, THR, Thread::top_exit_frame_info_offset());
771 770
772 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. 771 // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
773 __ Push(R4); 772 __ Push(R4);
774 ASSERT(kExitLinkSlotFromEntryFp == -27); 773 ASSERT(kExitLinkSlotFromEntryFp == -27);
775 __ Push(R5); 774 __ Push(R9);
776 775
777 // Load arguments descriptor array into R4, which is passed to Dart code. 776 // Load arguments descriptor array into R4, which is passed to Dart code.
778 __ ldr(R4, Address(R1, VMHandles::kOffsetOfRawPtrInHandle)); 777 __ ldr(R4, Address(R1, VMHandles::kOffsetOfRawPtrInHandle));
779 778
780 // Load number of arguments into R5. 779 // Load number of arguments into R9.
781 __ ldr(R5, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 780 __ ldr(R9, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
782 __ SmiUntag(R5); 781 __ SmiUntag(R9);
783 782
784 // Compute address of 'arguments array' data area into R2. 783 // Compute address of 'arguments array' data area into R2.
785 __ ldr(R2, Address(R2, VMHandles::kOffsetOfRawPtrInHandle)); 784 __ ldr(R2, Address(R2, VMHandles::kOffsetOfRawPtrInHandle));
786 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag); 785 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag);
787 786
788 // Set up arguments for the Dart call. 787 // Set up arguments for the Dart call.
789 Label push_arguments; 788 Label push_arguments;
790 Label done_push_arguments; 789 Label done_push_arguments;
791 __ CompareImmediate(R5, 0); // check if there are arguments. 790 __ CompareImmediate(R9, 0); // check if there are arguments.
792 __ b(&done_push_arguments, EQ); 791 __ b(&done_push_arguments, EQ);
793 __ LoadImmediate(R1, 0); 792 __ LoadImmediate(R1, 0);
794 __ Bind(&push_arguments); 793 __ Bind(&push_arguments);
795 __ ldr(R3, Address(R2)); 794 __ ldr(R3, Address(R2));
796 __ Push(R3); 795 __ Push(R3);
797 __ AddImmediate(R2, kWordSize); 796 __ AddImmediate(R2, kWordSize);
798 __ AddImmediate(R1, 1); 797 __ AddImmediate(R1, 1);
799 __ cmp(R1, Operand(R5)); 798 __ cmp(R1, Operand(R9));
800 __ b(&push_arguments, LT); 799 __ b(&push_arguments, LT);
801 __ Bind(&done_push_arguments); 800 __ Bind(&done_push_arguments);
802 801
803 // Call the Dart code entrypoint. 802 // Call the Dart code entrypoint.
804 __ LoadImmediate(PP, 0); // GC safe value into PP. 803 __ LoadImmediate(PP, 0); // GC safe value into PP.
805 __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle)); 804 __ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
806 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); 805 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
807 __ blx(R0); // R4 is the arguments descriptor array. 806 __ blx(R0); // R4 is the arguments descriptor array.
808 807
809 // Get rid of arguments pushed on the stack. 808 // Get rid of arguments pushed on the stack.
810 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); 809 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
811 810
812 // Restore the saved top exit frame info and top resource back into the 811 // Restore the saved top exit frame info and top resource back into the
813 // Isolate structure. Uses R5 as a temporary register for this. 812 // Isolate structure. Uses R9 as a temporary register for this.
814 __ Pop(R5); 813 __ Pop(R9);
815 __ StoreToOffset(kWord, R5, THR, Thread::top_exit_frame_info_offset()); 814 __ StoreToOffset(kWord, R9, THR, Thread::top_exit_frame_info_offset());
816 __ Pop(R5); 815 __ Pop(R9);
817 __ StoreToOffset(kWord, R5, THR, Thread::top_resource_offset()); 816 __ StoreToOffset(kWord, R9, THR, Thread::top_resource_offset());
818 817
819 // Restore the current VMTag from the stack. 818 // Restore the current VMTag from the stack.
820 __ Pop(R4); 819 __ Pop(R4);
821 __ StoreToOffset(kWord, R4, THR, Thread::vm_tag_offset()); 820 __ StoreToOffset(kWord, R4, THR, Thread::vm_tag_offset());
822 821
823 // Restore C++ ABI callee-saved registers. 822 // Restore C++ ABI callee-saved registers.
824 if (TargetCPUFeatures::vfp_supported()) { 823 if (TargetCPUFeatures::vfp_supported()) {
825 // Restore FPU registers. 2 D registers per Q register. 824 // Restore FPU registers. 2 D registers per Q register.
826 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount); 825 __ vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
827 } else { 826 } else {
(...skipping 18 matching lines...) Expand all
846 if (FLAG_inline_alloc) { 845 if (FLAG_inline_alloc) {
847 Label slow_case; 846 Label slow_case;
848 // First compute the rounded instance size. 847 // First compute the rounded instance size.
849 // R1: number of context variables. 848 // R1: number of context variables.
850 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; 849 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1;
851 __ LoadImmediate(R2, fixed_size); 850 __ LoadImmediate(R2, fixed_size);
852 __ add(R2, R2, Operand(R1, LSL, 2)); 851 __ add(R2, R2, Operand(R1, LSL, 2));
853 ASSERT(kSmiTagShift == 1); 852 ASSERT(kSmiTagShift == 1);
854 __ bic(R2, R2, Operand(kObjectAlignment - 1)); 853 __ bic(R2, R2, Operand(kObjectAlignment - 1));
855 854
856 __ MaybeTraceAllocation(kContextCid, R4, &slow_case, 855 __ MaybeTraceAllocation(kContextCid, R8, &slow_case,
857 /* inline_isolate = */ false); 856 /* inline_isolate = */ false);
858 // Now allocate the object. 857 // Now allocate the object.
859 // R1: number of context variables. 858 // R1: number of context variables.
860 // R2: object size. 859 // R2: object size.
861 const intptr_t cid = kContextCid; 860 const intptr_t cid = kContextCid;
862 Heap::Space space = Heap::SpaceForAllocation(cid); 861 Heap::Space space = Heap::SpaceForAllocation(cid);
863 __ LoadIsolate(R5); 862 __ LoadIsolate(R9);
864 __ ldr(R5, Address(R5, Isolate::heap_offset())); 863 __ ldr(R9, Address(R9, Isolate::heap_offset()));
865 __ ldr(R0, Address(R5, Heap::TopOffset(space))); 864 __ ldr(R0, Address(R9, Heap::TopOffset(space)));
866 __ add(R3, R2, Operand(R0)); 865 __ add(R3, R2, Operand(R0));
867 // Check if the allocation fits into the remaining space. 866 // Check if the allocation fits into the remaining space.
868 // R0: potential new object. 867 // R0: potential new object.
869 // R1: number of context variables. 868 // R1: number of context variables.
870 // R2: object size. 869 // R2: object size.
871 // R3: potential next object start. 870 // R3: potential next object start.
872 // R5: heap. 871 // R9: heap.
873 __ ldr(IP, Address(R5, Heap::EndOffset(space))); 872 __ ldr(IP, Address(R9, Heap::EndOffset(space)));
874 __ cmp(R3, Operand(IP)); 873 __ cmp(R3, Operand(IP));
875 if (FLAG_use_slow_path) { 874 if (FLAG_use_slow_path) {
876 __ b(&slow_case); 875 __ b(&slow_case);
877 } else { 876 } else {
878 __ b(&slow_case, CS); // Branch if unsigned higher or equal. 877 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
879 } 878 }
880 879
881 // Successfully allocated the object, now update top to point to 880 // Successfully allocated the object, now update top to point to
882 // next object start and initialize the object. 881 // next object start and initialize the object.
883 // R0: new object start (untagged). 882 // R0: new object start (untagged).
884 // R1: number of context variables. 883 // R1: number of context variables.
885 // R2: object size. 884 // R2: object size.
886 // R3: next object start. 885 // R3: next object start.
887 // R5: heap. 886 // R9: heap.
888 __ LoadAllocationStatsAddress(R6, cid, /* inline_isolate = */ false); 887 __ LoadAllocationStatsAddress(R4, cid, /* inline_isolate = */ false);
889 __ str(R3, Address(R5, Heap::TopOffset(space))); 888 __ str(R3, Address(R9, Heap::TopOffset(space)));
890 __ add(R0, R0, Operand(kHeapObjectTag)); 889 __ add(R0, R0, Operand(kHeapObjectTag));
891 890
892 // Calculate the size tag. 891 // Calculate the size tag.
893 // R0: new object (tagged). 892 // R0: new object (tagged).
894 // R1: number of context variables. 893 // R1: number of context variables.
895 // R2: object size. 894 // R2: object size.
896 // R3: next object start. 895 // R3: next object start.
897 // R6: allocation stats address. 896 // R4: allocation stats address.
898 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 897 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
899 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); 898 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
900 // If no size tag overflow, shift R2 left, else set R2 to zero. 899 // If no size tag overflow, shift R2 left, else set R2 to zero.
901 __ mov(R5, Operand(R2, LSL, shift), LS); 900 __ mov(R9, Operand(R2, LSL, shift), LS);
902 __ mov(R5, Operand(0), HI); 901 __ mov(R9, Operand(0), HI);
903 902
904 // Get the class index and insert it into the tags. 903 // Get the class index and insert it into the tags.
905 // R5: size and bit tags. 904 // R9: size and bit tags.
906 __ LoadImmediate(IP, RawObject::ClassIdTag::encode(cid)); 905 __ LoadImmediate(IP, RawObject::ClassIdTag::encode(cid));
907 __ orr(R5, R5, Operand(IP)); 906 __ orr(R9, R9, Operand(IP));
908 __ str(R5, FieldAddress(R0, Context::tags_offset())); 907 __ str(R9, FieldAddress(R0, Context::tags_offset()));
909 908
910 // Setup up number of context variables field. 909 // Setup up number of context variables field.
911 // R0: new object. 910 // R0: new object.
912 // R1: number of context variables as integer value (not object). 911 // R1: number of context variables as integer value (not object).
913 // R2: object size. 912 // R2: object size.
914 // R3: next object start. 913 // R3: next object start.
915 // R6: allocation stats address. 914 // R4: allocation stats address.
916 __ str(R1, FieldAddress(R0, Context::num_variables_offset())); 915 __ str(R1, FieldAddress(R0, Context::num_variables_offset()));
917 916
918 // Setup the parent field. 917 // Setup the parent field.
919 // R0: new object. 918 // R0: new object.
920 // R1: number of context variables. 919 // R1: number of context variables.
921 // R2: object size. 920 // R2: object size.
922 // R3: next object start. 921 // R3: next object start.
923 // R6: allocation stats address. 922 // R4: allocation stats address.
924 __ LoadObject(R4, Object::null_object()); 923 __ LoadObject(R8, Object::null_object());
925 __ InitializeFieldNoBarrier(R0, FieldAddress(R0, Context::parent_offset()), 924 __ InitializeFieldNoBarrier(R0, FieldAddress(R0, Context::parent_offset()),
926 R4); 925 R8);
927 926
928 // Initialize the context variables. 927 // Initialize the context variables.
929 // R0: new object. 928 // R0: new object.
930 // R1: number of context variables. 929 // R1: number of context variables.
931 // R2: object size. 930 // R2: object size.
932 // R3: next object start. 931 // R3: next object start.
933 // R4, R5: raw null. 932 // R8, R9: raw null.
934 // R6: allocation stats address. 933 // R4: allocation stats address.
935 Label loop; 934 Label loop;
936 __ AddImmediate(R7, R0, Context::variable_offset(0) - kHeapObjectTag); 935 __ AddImmediate(R7, R0, Context::variable_offset(0) - kHeapObjectTag);
937 __ InitializeFieldsNoBarrier(R0, R7, R3, R4, R5); 936 __ InitializeFieldsNoBarrier(R0, R7, R3, R8, R9);
938 __ IncrementAllocationStatsWithSize(R6, R2, space); 937 __ IncrementAllocationStatsWithSize(R4, R2, space);
939 938
940 // Done allocating and initializing the context. 939 // Done allocating and initializing the context.
941 // R0: new object. 940 // R0: new object.
942 __ Ret(); 941 __ Ret();
943 942
944 __ Bind(&slow_case); 943 __ Bind(&slow_case);
945 } 944 }
946 // Create a stub frame as we are pushing some objects on the stack before 945 // Create a stub frame as we are pushing some objects on the stack before
947 // calling into the runtime. 946 // calling into the runtime.
948 __ EnterStubFrame(); 947 __ EnterStubFrame();
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
1052 const int kInlineInstanceSize = 12; 1051 const int kInlineInstanceSize = 12;
1053 const intptr_t instance_size = cls.instance_size(); 1052 const intptr_t instance_size = cls.instance_size();
1054 ASSERT(instance_size > 0); 1053 ASSERT(instance_size > 0);
1055 Isolate* isolate = Isolate::Current(); 1054 Isolate* isolate = Isolate::Current();
1056 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && 1055 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
1057 !cls.TraceAllocation(isolate)) { 1056 !cls.TraceAllocation(isolate)) {
1058 Label slow_case; 1057 Label slow_case;
1059 // Allocate the object and update top to point to 1058 // Allocate the object and update top to point to
1060 // next object start and initialize the allocated object. 1059 // next object start and initialize the allocated object.
1061 Heap::Space space = Heap::SpaceForAllocation(cls.id()); 1060 Heap::Space space = Heap::SpaceForAllocation(cls.id());
1062 __ ldr(R5, Address(THR, Thread::heap_offset())); 1061 __ ldr(R9, Address(THR, Thread::heap_offset()));
1063 __ ldr(R0, Address(R5, Heap::TopOffset(space))); 1062 __ ldr(R0, Address(R9, Heap::TopOffset(space)));
1064 __ AddImmediate(R1, R0, instance_size); 1063 __ AddImmediate(R1, R0, instance_size);
1065 // Check if the allocation fits into the remaining space. 1064 // Check if the allocation fits into the remaining space.
1066 // R0: potential new object start. 1065 // R0: potential new object start.
1067 // R1: potential next object start. 1066 // R1: potential next object start.
1068 // R5: heap. 1067 // R9: heap.
1069 __ ldr(IP, Address(R5, Heap::EndOffset(space))); 1068 __ ldr(IP, Address(R9, Heap::EndOffset(space)));
1070 __ cmp(R1, Operand(IP)); 1069 __ cmp(R1, Operand(IP));
1071 if (FLAG_use_slow_path) { 1070 if (FLAG_use_slow_path) {
1072 __ b(&slow_case); 1071 __ b(&slow_case);
1073 } else { 1072 } else {
1074 __ b(&slow_case, CS); // Unsigned higher or equal. 1073 __ b(&slow_case, CS); // Unsigned higher or equal.
1075 } 1074 }
1076 __ str(R1, Address(R5, Heap::TopOffset(space))); 1075 __ str(R1, Address(R9, Heap::TopOffset(space)));
1077 1076
1078 // Load the address of the allocation stats table. We split up the load 1077 // Load the address of the allocation stats table. We split up the load
1079 // and the increment so that the dependent load is not too nearby. 1078 // and the increment so that the dependent load is not too nearby.
1080 __ LoadAllocationStatsAddress(R5, cls.id(), /* inline_isolate = */ false); 1079 __ LoadAllocationStatsAddress(R9, cls.id(), /* inline_isolate = */ false);
1081 1080
1082 // R0: new object start. 1081 // R0: new object start.
1083 // R1: next object start. 1082 // R1: next object start.
1084 // R5: allocation stats table. 1083 // R9: allocation stats table.
1085 // Set the tags. 1084 // Set the tags.
1086 uword tags = 0; 1085 uword tags = 0;
1087 tags = RawObject::SizeTag::update(instance_size, tags); 1086 tags = RawObject::SizeTag::update(instance_size, tags);
1088 ASSERT(cls.id() != kIllegalCid); 1087 ASSERT(cls.id() != kIllegalCid);
1089 tags = RawObject::ClassIdTag::update(cls.id(), tags); 1088 tags = RawObject::ClassIdTag::update(cls.id(), tags);
1090 __ LoadImmediate(R2, tags); 1089 __ LoadImmediate(R2, tags);
1091 __ str(R2, Address(R0, Instance::tags_offset())); 1090 __ str(R2, Address(R0, Instance::tags_offset()));
1092 __ add(R0, R0, Operand(kHeapObjectTag)); 1091 __ add(R0, R0, Operand(kHeapObjectTag));
1093 1092
1094 // Initialize the remaining words of the object. 1093 // Initialize the remaining words of the object.
1095 __ LoadObject(R2, Object::null_object()); 1094 __ LoadObject(R2, Object::null_object());
1096 1095
1097 // R2: raw null. 1096 // R2: raw null.
1098 // R0: new object (tagged). 1097 // R0: new object (tagged).
1099 // R1: next object start. 1098 // R1: next object start.
1100 // R5: allocation stats table. 1099 // R9: allocation stats table.
1101 // First try inlining the initialization without a loop. 1100 // First try inlining the initialization without a loop.
1102 if (instance_size < (kInlineInstanceSize * kWordSize)) { 1101 if (instance_size < (kInlineInstanceSize * kWordSize)) {
1103 // Small objects are initialized using a consecutive set of writes. 1102 // Small objects are initialized using a consecutive set of writes.
1104 intptr_t begin_offset = Instance::NextFieldOffset() - kHeapObjectTag; 1103 intptr_t begin_offset = Instance::NextFieldOffset() - kHeapObjectTag;
1105 intptr_t end_offset = instance_size - kHeapObjectTag; 1104 intptr_t end_offset = instance_size - kHeapObjectTag;
1106 // Save one move if less than two fields. 1105 // Save one move if less than two fields.
1107 if ((end_offset - begin_offset) >= (2 * kWordSize)) { 1106 if ((end_offset - begin_offset) >= (2 * kWordSize)) {
1108 __ mov(R3, Operand(R2)); 1107 __ mov(R3, Operand(R2));
1109 } 1108 }
1110 __ InitializeFieldsNoBarrierUnrolled(R0, R0, begin_offset, end_offset, 1109 __ InitializeFieldsNoBarrierUnrolled(R0, R0, begin_offset, end_offset,
1111 R2, R3); 1110 R2, R3);
1112 } else { 1111 } else {
1113 // There are more than kInlineInstanceSize(12) fields 1112 // There are more than kInlineInstanceSize(12) fields
1114 __ add(R4, R0, Operand(Instance::NextFieldOffset() - kHeapObjectTag)); 1113 __ add(R4, R0, Operand(Instance::NextFieldOffset() - kHeapObjectTag));
1115 __ mov(R3, Operand(R2)); 1114 __ mov(R3, Operand(R2));
1116 // Loop until the whole object is initialized. 1115 // Loop until the whole object is initialized.
1117 // R2: raw null. 1116 // R2: raw null.
1118 // R3: raw null. 1117 // R3: raw null.
1119 // R0: new object (tagged). 1118 // R0: new object (tagged).
1120 // R1: next object start. 1119 // R1: next object start.
1121 // R4: next word to be initialized. 1120 // R4: next word to be initialized.
1122 // R5: allocation stats table. 1121 // R9: allocation stats table.
1123 __ InitializeFieldsNoBarrier(R0, R4, R1, R2, R3); 1122 __ InitializeFieldsNoBarrier(R0, R4, R1, R2, R3);
1124 } 1123 }
1125 if (is_cls_parameterized) { 1124 if (is_cls_parameterized) {
1126 // Set the type arguments in the new object. 1125 // Set the type arguments in the new object.
1127 __ ldr(R4, Address(SP, 0)); 1126 __ ldr(R4, Address(SP, 0));
1128 FieldAddress type_args(R0, cls.type_arguments_field_offset()); 1127 FieldAddress type_args(R0, cls.type_arguments_field_offset());
1129 __ InitializeFieldNoBarrier(R0, type_args, R4); 1128 __ InitializeFieldNoBarrier(R0, type_args, R4);
1130 } 1129 }
1131 1130
1132 // Done allocating and initializing the instance. 1131 // Done allocating and initializing the instance.
1133 // R0: new object (tagged). 1132 // R0: new object (tagged).
1134 // R5: allocation stats table. 1133 // R9: allocation stats table.
1135 1134
1136 // Update allocation stats. 1135 // Update allocation stats.
1137 __ IncrementAllocationStats(R5, cls.id(), space); 1136 __ IncrementAllocationStats(R9, cls.id(), space);
1138 1137
1139 // R0: new object (tagged). 1138 // R0: new object (tagged).
1140 __ Ret(); 1139 __ Ret();
1141 1140
1142 __ Bind(&slow_case); 1141 __ Bind(&slow_case);
1143 } 1142 }
1144 if (is_cls_parameterized) { 1143 if (is_cls_parameterized) {
1145 // Load the type arguments. 1144 // Load the type arguments.
1146 __ ldr(R4, Address(SP, 0)); 1145 __ ldr(R4, Address(SP, 0));
1147 } 1146 }
(...skipping 28 matching lines...) Expand all
1176 // Input parameters: 1175 // Input parameters:
1177 // LR : return address. 1176 // LR : return address.
1178 // SP : address of last argument. 1177 // SP : address of last argument.
1179 // R4: arguments descriptor array. 1178 // R4: arguments descriptor array.
1180 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) { 1179 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
1181 __ EnterStubFrame(); 1180 __ EnterStubFrame();
1182 1181
1183 // Load the receiver. 1182 // Load the receiver.
1184 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 1183 __ ldr(R2, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
1185 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi. 1184 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
1186 __ ldr(R6, Address(IP, kParamEndSlotFromFp * kWordSize)); 1185 __ ldr(R8, Address(IP, kParamEndSlotFromFp * kWordSize));
1187 1186
1188 // Push space for the return value. 1187 // Push space for the return value.
1189 // Push the receiver. 1188 // Push the receiver.
1190 // Push arguments descriptor array. 1189 // Push arguments descriptor array.
1191 __ LoadObject(IP, Object::null_object()); 1190 __ LoadObject(IP, Object::null_object());
1192 __ PushList((1 << R4) | (1 << R6) | (1 << IP)); 1191 __ PushList((1 << R4) | (1 << R8) | (1 << IP));
1193 1192
1194 // R2: Smi-tagged arguments array length. 1193 // R2: Smi-tagged arguments array length.
1195 PushArgumentsArray(assembler); 1194 PushArgumentsArray(assembler);
1196 1195
1197 const intptr_t kNumArgs = 3; 1196 const intptr_t kNumArgs = 3;
1198 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs); 1197 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs);
1199 // noSuchMethod on closures always throws an error, so it will never return. 1198 // noSuchMethod on closures always throws an error, so it will never return.
1200 __ bkpt(0); 1199 __ bkpt(0);
1201 } 1200 }
1202 1201
1203 1202
1204 // R6: function object. 1203 // R8: function object.
1205 // R5: inline cache data object. 1204 // R9: inline cache data object.
1206 // Cannot use function object from ICData as it may be the inlined 1205 // Cannot use function object from ICData as it may be the inlined
1207 // function and not the top-scope function. 1206 // function and not the top-scope function.
1208 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { 1207 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) {
1209 Register ic_reg = R5; 1208 Register ic_reg = R9;
1210 Register func_reg = R6; 1209 Register func_reg = R8;
1211 if (FLAG_trace_optimized_ic_calls) { 1210 if (FLAG_trace_optimized_ic_calls) {
1212 __ EnterStubFrame(); 1211 __ EnterStubFrame();
1213 __ PushList((1 << R5) | (1 << R6)); // Preserve. 1212 __ PushList((1 << R9) | (1 << R8)); // Preserve.
1214 __ Push(ic_reg); // Argument. 1213 __ Push(ic_reg); // Argument.
1215 __ Push(func_reg); // Argument. 1214 __ Push(func_reg); // Argument.
1216 __ CallRuntime(kTraceICCallRuntimeEntry, 2); 1215 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1217 __ Drop(2); // Discard argument; 1216 __ Drop(2); // Discard argument;
1218 __ PopList((1 << R5) | (1 << R6)); // Restore. 1217 __ PopList((1 << R9) | (1 << R8)); // Restore.
1219 __ LeaveStubFrame(); 1218 __ LeaveStubFrame();
1220 } 1219 }
1221 __ ldr(R7, FieldAddress(func_reg, Function::usage_counter_offset())); 1220 __ ldr(R7, FieldAddress(func_reg, Function::usage_counter_offset()));
1222 __ add(R7, R7, Operand(1)); 1221 __ add(R7, R7, Operand(1));
1223 __ str(R7, FieldAddress(func_reg, Function::usage_counter_offset())); 1222 __ str(R7, FieldAddress(func_reg, Function::usage_counter_offset()));
1224 } 1223 }
1225 1224
1226 1225
1227 // Loads function into 'temp_reg'. 1226 // Loads function into 'temp_reg'.
1228 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, 1227 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
1229 Register temp_reg) { 1228 Register temp_reg) {
1230 if (FLAG_optimization_counter_threshold >= 0) { 1229 if (FLAG_optimization_counter_threshold >= 0) {
1231 Register ic_reg = R5; 1230 Register ic_reg = R9;
1232 Register func_reg = temp_reg; 1231 Register func_reg = temp_reg;
1233 ASSERT(temp_reg == R6); 1232 ASSERT(temp_reg == R8);
1234 __ Comment("Increment function counter"); 1233 __ Comment("Increment function counter");
1235 __ ldr(func_reg, FieldAddress(ic_reg, ICData::owner_offset())); 1234 __ ldr(func_reg, FieldAddress(ic_reg, ICData::owner_offset()));
1236 __ ldr(R7, FieldAddress(func_reg, Function::usage_counter_offset())); 1235 __ ldr(R7, FieldAddress(func_reg, Function::usage_counter_offset()));
1237 __ add(R7, R7, Operand(1)); 1236 __ add(R7, R7, Operand(1));
1238 __ str(R7, FieldAddress(func_reg, Function::usage_counter_offset())); 1237 __ str(R7, FieldAddress(func_reg, Function::usage_counter_offset()));
1239 } 1238 }
1240 } 1239 }
1241 1240
1242 1241
1243 // Note: R5 must be preserved. 1242 // Note: R9 must be preserved.
1244 // Attempt a quick Smi operation for known operations ('kind'). The ICData 1243 // Attempt a quick Smi operation for known operations ('kind'). The ICData
1245 // must have been primed with a Smi/Smi check that will be used for counting 1244 // must have been primed with a Smi/Smi check that will be used for counting
1246 // the invocations. 1245 // the invocations.
1247 static void EmitFastSmiOp(Assembler* assembler, 1246 static void EmitFastSmiOp(Assembler* assembler,
1248 Token::Kind kind, 1247 Token::Kind kind,
1249 intptr_t num_args, 1248 intptr_t num_args,
1250 Label* not_smi_or_overflow) { 1249 Label* not_smi_or_overflow) {
1251 __ Comment("Fast Smi op"); 1250 __ Comment("Fast Smi op");
1252 __ ldr(R0, Address(SP, 0 * kWordSize)); 1251 __ ldr(R0, Address(SP, 0 * kWordSize));
1253 __ ldr(R1, Address(SP, 1 * kWordSize)); 1252 __ ldr(R1, Address(SP, 1 * kWordSize));
(...skipping 12 matching lines...) Expand all
1266 break; 1265 break;
1267 } 1266 }
1268 case Token::kEQ: { 1267 case Token::kEQ: {
1269 __ cmp(R0, Operand(R1)); 1268 __ cmp(R0, Operand(R1));
1270 __ LoadObject(R0, Bool::True(), EQ); 1269 __ LoadObject(R0, Bool::True(), EQ);
1271 __ LoadObject(R0, Bool::False(), NE); 1270 __ LoadObject(R0, Bool::False(), NE);
1272 break; 1271 break;
1273 } 1272 }
1274 default: UNIMPLEMENTED(); 1273 default: UNIMPLEMENTED();
1275 } 1274 }
1276 // R5: IC data object (preserved). 1275 // R9: IC data object (preserved).
1277 __ ldr(R6, FieldAddress(R5, ICData::ic_data_offset())); 1276 __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
1278 // R6: ic_data_array with check entries: classes and target functions. 1277 // R8: ic_data_array with check entries: classes and target functions.
1279 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); 1278 __ AddImmediate(R8, R8, Array::data_offset() - kHeapObjectTag);
1280 // R6: points directly to the first ic data array element. 1279 // R8: points directly to the first ic data array element.
1281 #if defined(DEBUG) 1280 #if defined(DEBUG)
1282 // Check that first entry is for Smi/Smi. 1281 // Check that first entry is for Smi/Smi.
1283 Label error, ok; 1282 Label error, ok;
1284 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid)); 1283 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid));
1285 __ ldr(R1, Address(R6, 0)); 1284 __ ldr(R1, Address(R8, 0));
1286 __ CompareImmediate(R1, imm_smi_cid); 1285 __ CompareImmediate(R1, imm_smi_cid);
1287 __ b(&error, NE); 1286 __ b(&error, NE);
1288 __ ldr(R1, Address(R6, kWordSize)); 1287 __ ldr(R1, Address(R8, kWordSize));
1289 __ CompareImmediate(R1, imm_smi_cid); 1288 __ CompareImmediate(R1, imm_smi_cid);
1290 __ b(&ok, EQ); 1289 __ b(&ok, EQ);
1291 __ Bind(&error); 1290 __ Bind(&error);
1292 __ Stop("Incorrect IC data"); 1291 __ Stop("Incorrect IC data");
1293 __ Bind(&ok); 1292 __ Bind(&ok);
1294 #endif 1293 #endif
1295 if (FLAG_optimization_counter_threshold >= 0) { 1294 if (FLAG_optimization_counter_threshold >= 0) {
1296 // Update counter. 1295 // Update counter.
1297 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; 1296 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
1298 __ LoadFromOffset(kWord, R1, R6, count_offset); 1297 __ LoadFromOffset(kWord, R1, R8, count_offset);
1299 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1298 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1300 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. 1299 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow.
1301 __ StoreIntoSmiField(Address(R6, count_offset), R1); 1300 __ StoreIntoSmiField(Address(R8, count_offset), R1);
1302 } 1301 }
1303 __ Ret(); 1302 __ Ret();
1304 } 1303 }
1305 1304
1306 1305
1307 // Generate inline cache check for 'num_args'. 1306 // Generate inline cache check for 'num_args'.
1308 // LR: return address. 1307 // LR: return address.
1309 // R5: inline cache data object. 1308 // R9: inline cache data object.
1310 // Control flow: 1309 // Control flow:
1311 // - If receiver is null -> jump to IC miss. 1310 // - If receiver is null -> jump to IC miss.
1312 // - If receiver is Smi -> load Smi class. 1311 // - If receiver is Smi -> load Smi class.
1313 // - If receiver is not-Smi -> load receiver's class. 1312 // - If receiver is not-Smi -> load receiver's class.
1314 // - Check if 'num_args' (including receiver) match any IC data group. 1313 // - Check if 'num_args' (including receiver) match any IC data group.
1315 // - Match found -> jump to target. 1314 // - Match found -> jump to target.
1316 // - Match not found -> jump to IC miss. 1315 // - Match not found -> jump to IC miss.
1317 void StubCode::GenerateNArgsCheckInlineCacheStub( 1316 void StubCode::GenerateNArgsCheckInlineCacheStub(
1318 Assembler* assembler, 1317 Assembler* assembler,
1319 intptr_t num_args, 1318 intptr_t num_args,
1320 const RuntimeEntry& handle_ic_miss, 1319 const RuntimeEntry& handle_ic_miss,
1321 Token::Kind kind, 1320 Token::Kind kind,
1322 RangeCollectionMode range_collection_mode, 1321 RangeCollectionMode range_collection_mode,
1323 bool optimized) { 1322 bool optimized) {
1324 __ CheckCodePointer(); 1323 __ CheckCodePointer();
1325 ASSERT(num_args > 0); 1324 ASSERT(num_args > 0);
1326 #if defined(DEBUG) 1325 #if defined(DEBUG)
1327 { Label ok; 1326 { Label ok;
1328 // Check that the IC data array has NumArgsTested() == num_args. 1327 // Check that the IC data array has NumArgsTested() == num_args.
1329 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1328 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1330 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); 1329 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
1331 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1330 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1332 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); 1331 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
1333 __ CompareImmediate(R6, num_args); 1332 __ CompareImmediate(R8, num_args);
1334 __ b(&ok, EQ); 1333 __ b(&ok, EQ);
1335 __ Stop("Incorrect stub for IC data"); 1334 __ Stop("Incorrect stub for IC data");
1336 __ Bind(&ok); 1335 __ Bind(&ok);
1337 } 1336 }
1338 #endif // DEBUG 1337 #endif // DEBUG
1339 1338
1340 Label stepping, done_stepping; 1339 Label stepping, done_stepping;
1341 if (FLAG_support_debugger && !optimized) { 1340 if (FLAG_support_debugger && !optimized) {
1342 __ Comment("Check single stepping"); 1341 __ Comment("Check single stepping");
1343 __ LoadIsolate(R6); 1342 __ LoadIsolate(R8);
1344 __ ldrb(R6, Address(R6, Isolate::single_step_offset())); 1343 __ ldrb(R8, Address(R8, Isolate::single_step_offset()));
1345 __ CompareImmediate(R6, 0); 1344 __ CompareImmediate(R8, 0);
1346 __ b(&stepping, NE); 1345 __ b(&stepping, NE);
1347 __ Bind(&done_stepping); 1346 __ Bind(&done_stepping);
1348 } 1347 }
1349 1348
1350 __ Comment("Range feedback collection"); 1349 __ Comment("Range feedback collection");
1351 Label not_smi_or_overflow; 1350 Label not_smi_or_overflow;
1352 if (range_collection_mode == kCollectRanges) { 1351 if (range_collection_mode == kCollectRanges) {
1353 ASSERT((num_args == 1) || (num_args == 2)); 1352 ASSERT((num_args == 1) || (num_args == 2));
1354 if (num_args == 2) { 1353 if (num_args == 2) {
1355 __ ldr(R0, Address(SP, 1 * kWordSize)); 1354 __ ldr(R0, Address(SP, 1 * kWordSize));
1356 __ UpdateRangeFeedback(R0, 0, R5, R1, R4, &not_smi_or_overflow); 1355 __ UpdateRangeFeedback(R0, 0, R9, R1, R4, &not_smi_or_overflow);
1357 } 1356 }
1358 1357
1359 __ ldr(R0, Address(SP, 0 * kWordSize)); 1358 __ ldr(R0, Address(SP, 0 * kWordSize));
1360 __ UpdateRangeFeedback(R0, num_args - 1, R5, R1, R4, &not_smi_or_overflow); 1359 __ UpdateRangeFeedback(R0, num_args - 1, R9, R1, R4, &not_smi_or_overflow);
1361 } 1360 }
1362 if (kind != Token::kILLEGAL) { 1361 if (kind != Token::kILLEGAL) {
1363 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow); 1362 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
1364 } 1363 }
1365 __ Bind(&not_smi_or_overflow); 1364 __ Bind(&not_smi_or_overflow);
1366 1365
1367 __ Comment("Extract ICData initial values and receiver cid"); 1366 __ Comment("Extract ICData initial values and receiver cid");
1368 // Load arguments descriptor into R4. 1367 // Load arguments descriptor into R4.
1369 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); 1368 __ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
1370 // Loop that checks if there is an IC data match. 1369 // Loop that checks if there is an IC data match.
1371 Label loop, update, test, found; 1370 Label loop, update, test, found;
1372 // R5: IC data object (preserved). 1371 // R9: IC data object (preserved).
1373 __ ldr(R6, FieldAddress(R5, ICData::ic_data_offset())); 1372 __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
1374 // R6: ic_data_array with check entries: classes and target functions. 1373 // R8: ic_data_array with check entries: classes and target functions.
1375 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); 1374 __ AddImmediate(R8, R8, Array::data_offset() - kHeapObjectTag);
1376 // R6: points directly to the first ic data array element. 1375 // R8: points directly to the first ic data array element.
1377 1376
1378 // Get the receiver's class ID (first read number of arguments from 1377 // Get the receiver's class ID (first read number of arguments from
1379 // arguments descriptor array and then access the receiver from the stack). 1378 // arguments descriptor array and then access the receiver from the stack).
1380 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 1379 __ ldr(R7, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
1381 __ sub(R7, R7, Operand(Smi::RawValue(1))); 1380 __ sub(R7, R7, Operand(Smi::RawValue(1)));
1382 __ ldr(R0, Address(SP, R7, LSL, 1)); // R7 (argument_count - 1) is smi. 1381 __ ldr(R0, Address(SP, R7, LSL, 1)); // R7 (argument_count - 1) is smi.
1383 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1382 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1384 // R7: argument_count - 1 (smi). 1383 // R7: argument_count - 1 (smi).
1385 // R0: receiver's class ID (smi). 1384 // R0: receiver's class ID (smi).
1386 __ ldr(R1, Address(R6, 0)); // First class id (smi) to check. 1385 __ ldr(R1, Address(R8, 0)); // First class id (smi) to check.
1387 __ b(&test); 1386 __ b(&test);
1388 1387
1389 __ Comment("ICData loop"); 1388 __ Comment("ICData loop");
1390 __ Bind(&loop); 1389 __ Bind(&loop);
1391 for (int i = 0; i < num_args; i++) { 1390 for (int i = 0; i < num_args; i++) {
1392 if (i > 0) { 1391 if (i > 0) {
1393 // If not the first, load the next argument's class ID. 1392 // If not the first, load the next argument's class ID.
1394 __ AddImmediate(R0, R7, Smi::RawValue(-i)); 1393 __ AddImmediate(R0, R7, Smi::RawValue(-i));
1395 __ ldr(R0, Address(SP, R0, LSL, 1)); 1394 __ ldr(R0, Address(SP, R0, LSL, 1));
1396 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1395 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1397 // R0: next argument class ID (smi). 1396 // R0: next argument class ID (smi).
1398 __ LoadFromOffset(kWord, R1, R6, i * kWordSize); 1397 __ LoadFromOffset(kWord, R1, R8, i * kWordSize);
1399 // R1: next class ID to check (smi). 1398 // R1: next class ID to check (smi).
1400 } 1399 }
1401 __ cmp(R0, Operand(R1)); // Class id match? 1400 __ cmp(R0, Operand(R1)); // Class id match?
1402 if (i < (num_args - 1)) { 1401 if (i < (num_args - 1)) {
1403 __ b(&update, NE); // Continue. 1402 __ b(&update, NE); // Continue.
1404 } else { 1403 } else {
1405 // Last check, all checks before matched. 1404 // Last check, all checks before matched.
1406 __ b(&found, EQ); // Break. 1405 __ b(&found, EQ); // Break.
1407 } 1406 }
1408 } 1407 }
1409 __ Bind(&update); 1408 __ Bind(&update);
1410 // Reload receiver class ID. It has not been destroyed when num_args == 1. 1409 // Reload receiver class ID. It has not been destroyed when num_args == 1.
1411 if (num_args > 1) { 1410 if (num_args > 1) {
1412 __ ldr(R0, Address(SP, R7, LSL, 1)); 1411 __ ldr(R0, Address(SP, R7, LSL, 1));
1413 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1412 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1414 } 1413 }
1415 1414
1416 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; 1415 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize;
1417 __ AddImmediate(R6, entry_size); // Next entry. 1416 __ AddImmediate(R8, entry_size); // Next entry.
1418 __ ldr(R1, Address(R6, 0)); // Next class ID. 1417 __ ldr(R1, Address(R8, 0)); // Next class ID.
1419 1418
1420 __ Bind(&test); 1419 __ Bind(&test);
1421 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done? 1420 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done?
1422 __ b(&loop, NE); 1421 __ b(&loop, NE);
1423 1422
1424 __ Comment("IC miss"); 1423 __ Comment("IC miss");
1425 // Compute address of arguments. 1424 // Compute address of arguments.
1426 // R7: argument_count - 1 (smi). 1425 // R7: argument_count - 1 (smi).
1427 __ add(R7, SP, Operand(R7, LSL, 1)); // R7 is Smi. 1426 __ add(R7, SP, Operand(R7, LSL, 1)); // R7 is Smi.
1428 // R7: address of receiver. 1427 // R7: address of receiver.
1429 // Create a stub frame as we are pushing some objects on the stack before 1428 // Create a stub frame as we are pushing some objects on the stack before
1430 // calling into the runtime. 1429 // calling into the runtime.
1431 __ EnterStubFrame(); 1430 __ EnterStubFrame();
1432 __ LoadObject(R0, Object::null_object()); 1431 __ LoadObject(R0, Object::null_object());
1433 // Preserve IC data object and arguments descriptor array and 1432 // Preserve IC data object and arguments descriptor array and
1434 // setup space on stack for result (target code object). 1433 // setup space on stack for result (target code object).
1435 __ PushList((1 << R0) | (1 << R4) | (1 << R5)); 1434 __ PushList((1 << R0) | (1 << R4) | (1 << R9));
1436 // Push call arguments. 1435 // Push call arguments.
1437 for (intptr_t i = 0; i < num_args; i++) { 1436 for (intptr_t i = 0; i < num_args; i++) {
1438 __ LoadFromOffset(kWord, IP, R7, -i * kWordSize); 1437 __ LoadFromOffset(kWord, IP, R7, -i * kWordSize);
1439 __ Push(IP); 1438 __ Push(IP);
1440 } 1439 }
1441 // Pass IC data object. 1440 // Pass IC data object.
1442 __ Push(R5); 1441 __ Push(R9);
1443 __ CallRuntime(handle_ic_miss, num_args + 1); 1442 __ CallRuntime(handle_ic_miss, num_args + 1);
1444 // Remove the call arguments pushed earlier, including the IC data object. 1443 // Remove the call arguments pushed earlier, including the IC data object.
1445 __ Drop(num_args + 1); 1444 __ Drop(num_args + 1);
1446 // Pop returned function object into R0. 1445 // Pop returned function object into R0.
1447 // Restore arguments descriptor array and IC data array. 1446 // Restore arguments descriptor array and IC data array.
1448 __ PopList((1 << R0) | (1 << R4) | (1 << R5)); 1447 __ PopList((1 << R0) | (1 << R4) | (1 << R9));
1449 if (range_collection_mode == kCollectRanges) { 1448 if (range_collection_mode == kCollectRanges) {
1450 __ RestoreCodePointer(); 1449 __ RestoreCodePointer();
1451 } 1450 }
1452 __ LeaveStubFrame(); 1451 __ LeaveStubFrame();
1453 Label call_target_function; 1452 Label call_target_function;
1454 if (!FLAG_lazy_dispatchers) { 1453 if (!FLAG_lazy_dispatchers) {
1455 GenerateDispatcherCode(assembler, &call_target_function); 1454 GenerateDispatcherCode(assembler, &call_target_function);
1456 } else { 1455 } else {
1457 __ b(&call_target_function); 1456 __ b(&call_target_function);
1458 } 1457 }
1459 1458
1460 __ Bind(&found); 1459 __ Bind(&found);
1461 // R6: pointer to an IC data check group. 1460 // R8: pointer to an IC data check group.
1462 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize; 1461 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
1463 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; 1462 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
1464 __ LoadFromOffset(kWord, R0, R6, target_offset); 1463 __ LoadFromOffset(kWord, R0, R8, target_offset);
1465 1464
1466 if (FLAG_optimization_counter_threshold >= 0) { 1465 if (FLAG_optimization_counter_threshold >= 0) {
1467 __ Comment("Update caller's counter"); 1466 __ Comment("Update caller's counter");
1468 __ LoadFromOffset(kWord, R1, R6, count_offset); 1467 __ LoadFromOffset(kWord, R1, R8, count_offset);
1469 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1468 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1470 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. 1469 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow.
1471 __ StoreIntoSmiField(Address(R6, count_offset), R1); 1470 __ StoreIntoSmiField(Address(R8, count_offset), R1);
1472 } 1471 }
1473 1472
1474 __ Comment("Call target"); 1473 __ Comment("Call target");
1475 __ Bind(&call_target_function); 1474 __ Bind(&call_target_function);
1476 // R0: target function. 1475 // R0: target function.
1477 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1476 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1478 if (range_collection_mode == kCollectRanges) { 1477 if (range_collection_mode == kCollectRanges) {
1479 __ ldr(R1, Address(SP, 0 * kWordSize)); 1478 __ ldr(R1, Address(SP, 0 * kWordSize));
1480 if (num_args == 2) { 1479 if (num_args == 2) {
1481 __ ldr(R3, Address(SP, 1 * kWordSize)); 1480 __ ldr(R3, Address(SP, 1 * kWordSize));
1482 } 1481 }
1483 __ EnterStubFrame(); 1482 __ EnterStubFrame();
1484 if (num_args == 2) { 1483 if (num_args == 2) {
1485 __ PushList((1 << R1) | (1 << R3) | (1 << R5)); 1484 __ PushList((1 << R1) | (1 << R3) | (1 << R9));
1486 } else { 1485 } else {
1487 __ PushList((1 << R1) | (1 << R5)); 1486 __ PushList((1 << R1) | (1 << R9));
1488 } 1487 }
1489 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1488 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1490 __ blx(R2); 1489 __ blx(R2);
1491 1490
1492 Label done; 1491 Label done;
1493 __ ldr(R5, Address(FP, kFirstLocalSlotFromFp * kWordSize)); 1492 __ ldr(R9, Address(FP, kFirstLocalSlotFromFp * kWordSize));
1494 __ UpdateRangeFeedback(R0, 2, R5, R1, R4, &done); 1493 __ UpdateRangeFeedback(R0, 2, R9, R1, R4, &done);
1495 __ Bind(&done); 1494 __ Bind(&done);
1496 __ RestoreCodePointer(); 1495 __ RestoreCodePointer();
1497 __ LeaveStubFrame(); 1496 __ LeaveStubFrame();
1498 __ Ret(); 1497 __ Ret();
1499 } else { 1498 } else {
1500 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1499 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1501 __ bx(R2); 1500 __ bx(R2);
1502 } 1501 }
1503 1502
1504 if (FLAG_support_debugger && !optimized) { 1503 if (FLAG_support_debugger && !optimized) {
1505 __ Bind(&stepping); 1504 __ Bind(&stepping);
1506 __ EnterStubFrame(); 1505 __ EnterStubFrame();
1507 __ Push(R5); // Preserve IC data. 1506 __ Push(R9); // Preserve IC data.
1508 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1507 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1509 __ Pop(R5); 1508 __ Pop(R9);
1510 __ RestoreCodePointer(); 1509 __ RestoreCodePointer();
1511 __ LeaveStubFrame(); 1510 __ LeaveStubFrame();
1512 __ b(&done_stepping); 1511 __ b(&done_stepping);
1513 } 1512 }
1514 } 1513 }
1515 1514
1516 1515
1517 // Use inline cache data array to invoke the target or continue in inline 1516 // Use inline cache data array to invoke the target or continue in inline
1518 // cache miss handler. Stub for 1-argument check (receiver class). 1517 // cache miss handler. Stub for 1-argument check (receiver class).
1519 // LR: return address. 1518 // LR: return address.
1520 // R5: inline cache data object. 1519 // R9: inline cache data object.
1521 // Inline cache data object structure: 1520 // Inline cache data object structure:
1522 // 0: function-name 1521 // 0: function-name
1523 // 1: N, number of arguments checked. 1522 // 1: N, number of arguments checked.
1524 // 2 .. (length - 1): group of checks, each check containing: 1523 // 2 .. (length - 1): group of checks, each check containing:
1525 // - N classes. 1524 // - N classes.
1526 // - 1 target function. 1525 // - 1 target function.
1527 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { 1526 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) {
1528 GenerateUsageCounterIncrement(assembler, R6); 1527 GenerateUsageCounterIncrement(assembler, R8);
1529 GenerateNArgsCheckInlineCacheStub(assembler, 1528 GenerateNArgsCheckInlineCacheStub(assembler,
1530 1, 1529 1,
1531 kInlineCacheMissHandlerOneArgRuntimeEntry, 1530 kInlineCacheMissHandlerOneArgRuntimeEntry,
1532 Token::kILLEGAL, 1531 Token::kILLEGAL,
1533 kIgnoreRanges); 1532 kIgnoreRanges);
1534 } 1533 }
1535 1534
1536 1535
1537 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { 1536 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) {
1538 GenerateUsageCounterIncrement(assembler, R6); 1537 GenerateUsageCounterIncrement(assembler, R8);
1539 GenerateNArgsCheckInlineCacheStub(assembler, 1538 GenerateNArgsCheckInlineCacheStub(assembler,
1540 2, 1539 2,
1541 kInlineCacheMissHandlerTwoArgsRuntimeEntry, 1540 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1542 Token::kILLEGAL, 1541 Token::kILLEGAL,
1543 kIgnoreRanges); 1542 kIgnoreRanges);
1544 } 1543 }
1545 1544
1546 1545
1547 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { 1546 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
1548 GenerateUsageCounterIncrement(assembler, R6); 1547 GenerateUsageCounterIncrement(assembler, R8);
1549 GenerateNArgsCheckInlineCacheStub(assembler, 1548 GenerateNArgsCheckInlineCacheStub(assembler,
1550 2, 1549 2,
1551 kInlineCacheMissHandlerTwoArgsRuntimeEntry, 1550 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1552 Token::kADD, 1551 Token::kADD,
1553 kCollectRanges); 1552 kCollectRanges);
1554 } 1553 }
1555 1554
1556 1555
1557 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { 1556 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) {
1558 GenerateUsageCounterIncrement(assembler, R6); 1557 GenerateUsageCounterIncrement(assembler, R8);
1559 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1558 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1560 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB, 1559 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB,
1561 kCollectRanges); 1560 kCollectRanges);
1562 } 1561 }
1563 1562
1564 1563
1565 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { 1564 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
1566 GenerateUsageCounterIncrement(assembler, R6); 1565 GenerateUsageCounterIncrement(assembler, R8);
1567 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1566 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1568 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, 1567 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
1569 kIgnoreRanges); 1568 kIgnoreRanges);
1570 } 1569 }
1571 1570
1572 1571
1573 void StubCode::GenerateUnaryRangeCollectingInlineCacheStub( 1572 void StubCode::GenerateUnaryRangeCollectingInlineCacheStub(
1574 Assembler* assembler) { 1573 Assembler* assembler) {
1575 GenerateUsageCounterIncrement(assembler, R6); 1574 GenerateUsageCounterIncrement(assembler, R8);
1576 GenerateNArgsCheckInlineCacheStub(assembler, 1, 1575 GenerateNArgsCheckInlineCacheStub(assembler, 1,
1577 kInlineCacheMissHandlerOneArgRuntimeEntry, 1576 kInlineCacheMissHandlerOneArgRuntimeEntry,
1578 Token::kILLEGAL, 1577 Token::kILLEGAL,
1579 kCollectRanges); 1578 kCollectRanges);
1580 } 1579 }
1581 1580
1582 1581
1583 void StubCode::GenerateBinaryRangeCollectingInlineCacheStub( 1582 void StubCode::GenerateBinaryRangeCollectingInlineCacheStub(
1584 Assembler* assembler) { 1583 Assembler* assembler) {
1585 GenerateUsageCounterIncrement(assembler, R6); 1584 GenerateUsageCounterIncrement(assembler, R8);
1586 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1585 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1587 kInlineCacheMissHandlerTwoArgsRuntimeEntry, 1586 kInlineCacheMissHandlerTwoArgsRuntimeEntry,
1588 Token::kILLEGAL, 1587 Token::kILLEGAL,
1589 kCollectRanges); 1588 kCollectRanges);
1590 } 1589 }
1591 1590
1592 1591
1593 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( 1592 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub(
1594 Assembler* assembler) { 1593 Assembler* assembler) {
1595 GenerateOptimizedUsageCounterIncrement(assembler); 1594 GenerateOptimizedUsageCounterIncrement(assembler);
1596 GenerateNArgsCheckInlineCacheStub(assembler, 1, 1595 GenerateNArgsCheckInlineCacheStub(assembler, 1,
1597 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, 1596 kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1598 kIgnoreRanges, true /* optimized */); 1597 kIgnoreRanges, true /* optimized */);
1599 } 1598 }
1600 1599
1601 1600
1602 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( 1601 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub(
1603 Assembler* assembler) { 1602 Assembler* assembler) {
1604 GenerateOptimizedUsageCounterIncrement(assembler); 1603 GenerateOptimizedUsageCounterIncrement(assembler);
1605 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1604 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1606 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, 1605 kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
1607 kIgnoreRanges, true /* optimized */); 1606 kIgnoreRanges, true /* optimized */);
1608 } 1607 }
1609 1608
1610 1609
1611 // Intermediary stub between a static call and its target. ICData contains 1610 // Intermediary stub between a static call and its target. ICData contains
1612 // the target function and the call count. 1611 // the target function and the call count.
1613 // R5: ICData 1612 // R9: ICData
1614 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { 1613 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
1615 GenerateUsageCounterIncrement(assembler, R6); 1614 GenerateUsageCounterIncrement(assembler, R8);
1616 #if defined(DEBUG) 1615 #if defined(DEBUG)
1617 { Label ok; 1616 { Label ok;
1618 // Check that the IC data array has NumArgsTested() == 0. 1617 // Check that the IC data array has NumArgsTested() == 0.
1619 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1618 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1620 __ ldr(R6, FieldAddress(R5, ICData::state_bits_offset())); 1619 __ ldr(R8, FieldAddress(R9, ICData::state_bits_offset()));
1621 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1620 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1622 __ and_(R6, R6, Operand(ICData::NumArgsTestedMask())); 1621 __ and_(R8, R8, Operand(ICData::NumArgsTestedMask()));
1623 __ CompareImmediate(R6, 0); 1622 __ CompareImmediate(R8, 0);
1624 __ b(&ok, EQ); 1623 __ b(&ok, EQ);
1625 __ Stop("Incorrect IC data for unoptimized static call"); 1624 __ Stop("Incorrect IC data for unoptimized static call");
1626 __ Bind(&ok); 1625 __ Bind(&ok);
1627 } 1626 }
1628 #endif // DEBUG 1627 #endif // DEBUG
1629 1628
1630 // Check single stepping. 1629 // Check single stepping.
1631 Label stepping, done_stepping; 1630 Label stepping, done_stepping;
1632 if (FLAG_support_debugger) { 1631 if (FLAG_support_debugger) {
1633 __ LoadIsolate(R6); 1632 __ LoadIsolate(R8);
1634 __ ldrb(R6, Address(R6, Isolate::single_step_offset())); 1633 __ ldrb(R8, Address(R8, Isolate::single_step_offset()));
1635 __ CompareImmediate(R6, 0); 1634 __ CompareImmediate(R8, 0);
1636 __ b(&stepping, NE); 1635 __ b(&stepping, NE);
1637 __ Bind(&done_stepping); 1636 __ Bind(&done_stepping);
1638 } 1637 }
1639 1638
1640 // R5: IC data object (preserved). 1639 // R9: IC data object (preserved).
1641 __ ldr(R6, FieldAddress(R5, ICData::ic_data_offset())); 1640 __ ldr(R8, FieldAddress(R9, ICData::ic_data_offset()));
1642 // R6: ic_data_array with entries: target functions and count. 1641 // R8: ic_data_array with entries: target functions and count.
1643 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag); 1642 __ AddImmediate(R8, R8, Array::data_offset() - kHeapObjectTag);
1644 // R6: points directly to the first ic data array element. 1643 // R8: points directly to the first ic data array element.
1645 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize; 1644 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
1646 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize; 1645 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
1647 1646
1648 if (FLAG_optimization_counter_threshold >= 0) { 1647 if (FLAG_optimization_counter_threshold >= 0) {
1649 // Increment count for this call. 1648 // Increment count for this call.
1650 __ LoadFromOffset(kWord, R1, R6, count_offset); 1649 __ LoadFromOffset(kWord, R1, R8, count_offset);
1651 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1650 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1652 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow. 1651 __ LoadImmediate(R1, Smi::RawValue(Smi::kMaxValue), VS); // Overflow.
1653 __ StoreIntoSmiField(Address(R6, count_offset), R1); 1652 __ StoreIntoSmiField(Address(R8, count_offset), R1);
1654 } 1653 }
1655 1654
1656 // Load arguments descriptor into R4. 1655 // Load arguments descriptor into R4.
1657 __ ldr(R4, FieldAddress(R5, ICData::arguments_descriptor_offset())); 1656 __ ldr(R4, FieldAddress(R9, ICData::arguments_descriptor_offset()));
1658 1657
1659 // Get function and call it, if possible. 1658 // Get function and call it, if possible.
1660 __ LoadFromOffset(kWord, R0, R6, target_offset); 1659 __ LoadFromOffset(kWord, R0, R8, target_offset);
1661 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1660 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1662 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1661 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1663 __ bx(R2); 1662 __ bx(R2);
1664 1663
1665 if (FLAG_support_debugger) { 1664 if (FLAG_support_debugger) {
1666 __ Bind(&stepping); 1665 __ Bind(&stepping);
1667 __ EnterStubFrame(); 1666 __ EnterStubFrame();
1668 __ Push(R5); // Preserve IC data. 1667 __ Push(R9); // Preserve IC data.
1669 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1668 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1670 __ Pop(R5); 1669 __ Pop(R9);
1671 __ RestoreCodePointer(); 1670 __ RestoreCodePointer();
1672 __ LeaveStubFrame(); 1671 __ LeaveStubFrame();
1673 __ b(&done_stepping); 1672 __ b(&done_stepping);
1674 } 1673 }
1675 } 1674 }
1676 1675
1677 1676
1678 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { 1677 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) {
1679 GenerateUsageCounterIncrement(assembler, R6); 1678 GenerateUsageCounterIncrement(assembler, R8);
1680 GenerateNArgsCheckInlineCacheStub( 1679 GenerateNArgsCheckInlineCacheStub(
1681 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, 1680 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1682 kIgnoreRanges); 1681 kIgnoreRanges);
1683 } 1682 }
1684 1683
1685 1684
1686 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { 1685 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) {
1687 GenerateUsageCounterIncrement(assembler, R6); 1686 GenerateUsageCounterIncrement(assembler, R8);
1688 GenerateNArgsCheckInlineCacheStub(assembler, 2, 1687 GenerateNArgsCheckInlineCacheStub(assembler, 2,
1689 kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL, 1688 kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
1690 kIgnoreRanges); 1689 kIgnoreRanges);
1691 } 1690 }
1692 1691
1693 1692
1694 // Stub for compiling a function and jumping to the compiled code. 1693 // Stub for compiling a function and jumping to the compiled code.
1695 // R5: IC-Data (for methods). 1694 // R9: IC-Data (for methods).
1696 // R4: Arguments descriptor. 1695 // R4: Arguments descriptor.
1697 // R0: Function. 1696 // R0: Function.
1698 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { 1697 void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
1699 // Preserve arg desc. and IC data object. 1698 // Preserve arg desc. and IC data object.
1700 __ EnterStubFrame(); 1699 __ EnterStubFrame();
1701 __ PushList((1 << R4) | (1 << R5)); 1700 __ PushList((1 << R4) | (1 << R9));
1702 __ Push(R0); // Pass function. 1701 __ Push(R0); // Pass function.
1703 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); 1702 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
1704 __ Pop(R0); // Restore argument. 1703 __ Pop(R0); // Restore argument.
1705 __ PopList((1 << R4) | (1 << R5)); // Restore arg desc. and IC data. 1704 __ PopList((1 << R4) | (1 << R9)); // Restore arg desc. and IC data.
1706 __ LeaveStubFrame(); 1705 __ LeaveStubFrame();
1707 1706
1708 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset())); 1707 __ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
1709 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset())); 1708 __ ldr(R2, FieldAddress(R0, Function::entry_point_offset()));
1710 __ bx(R2); 1709 __ bx(R2);
1711 } 1710 }
1712 1711
1713 1712
1714 // R5: Contains an ICData. 1713 // R9: Contains an ICData.
1715 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { 1714 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
1716 __ EnterStubFrame(); 1715 __ EnterStubFrame();
1717 __ LoadObject(R0, Object::null_object()); 1716 __ LoadObject(R0, Object::null_object());
1718 // Preserve arguments descriptor and make room for result. 1717 // Preserve arguments descriptor and make room for result.
1719 __ PushList((1 << R0) | (1 << R5)); 1718 __ PushList((1 << R0) | (1 << R9));
1720 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1719 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1721 __ PopList((1 << R0) | (1 << R5)); 1720 __ PopList((1 << R0) | (1 << R9));
1722 __ LeaveStubFrame(); 1721 __ LeaveStubFrame();
1723 __ mov(CODE_REG, Operand(R0)); 1722 __ mov(CODE_REG, Operand(R0));
1724 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset())); 1723 __ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
1725 __ bx(R0); 1724 __ bx(R0);
1726 } 1725 }
1727 1726
1728 1727
1729 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { 1728 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
1730 __ EnterStubFrame(); 1729 __ EnterStubFrame();
1731 __ LoadObject(R0, Object::null_object()); 1730 __ LoadObject(R0, Object::null_object());
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1766 // R2: cache array. 1765 // R2: cache array.
1767 // Result in R1: null -> not found, otherwise result (true or false). 1766 // Result in R1: null -> not found, otherwise result (true or false).
1768 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { 1767 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
1769 ASSERT((1 <= n) && (n <= 3)); 1768 ASSERT((1 <= n) && (n <= 3));
1770 if (n > 1) { 1769 if (n > 1) {
1771 // Get instance type arguments. 1770 // Get instance type arguments.
1772 __ LoadClass(R3, R0, R4); 1771 __ LoadClass(R3, R0, R4);
1773 // Compute instance type arguments into R4. 1772 // Compute instance type arguments into R4.
1774 Label has_no_type_arguments; 1773 Label has_no_type_arguments;
1775 __ LoadObject(R4, Object::null_object()); 1774 __ LoadObject(R4, Object::null_object());
1776 __ ldr(R5, FieldAddress(R3, 1775 __ ldr(R9, FieldAddress(R3,
1777 Class::type_arguments_field_offset_in_words_offset())); 1776 Class::type_arguments_field_offset_in_words_offset()));
1778 __ CompareImmediate(R5, Class::kNoTypeArguments); 1777 __ CompareImmediate(R9, Class::kNoTypeArguments);
1779 __ b(&has_no_type_arguments, EQ); 1778 __ b(&has_no_type_arguments, EQ);
1780 __ add(R5, R0, Operand(R5, LSL, 2)); 1779 __ add(R9, R0, Operand(R9, LSL, 2));
1781 __ ldr(R4, FieldAddress(R5, 0)); 1780 __ ldr(R4, FieldAddress(R9, 0));
1782 __ Bind(&has_no_type_arguments); 1781 __ Bind(&has_no_type_arguments);
1783 } 1782 }
1784 __ LoadClassId(R3, R0); 1783 __ LoadClassId(R3, R0);
1785 // R0: instance. 1784 // R0: instance.
1786 // R1: instantiator type arguments or NULL. 1785 // R1: instantiator type arguments or NULL.
1787 // R2: SubtypeTestCache. 1786 // R2: SubtypeTestCache.
1788 // R3: instance class id. 1787 // R3: instance class id.
1789 // R4: instance type arguments (null if none), used only if n > 1. 1788 // R4: instance type arguments (null if none), used only if n > 1.
1790 __ ldr(R2, FieldAddress(R2, SubtypeTestCache::cache_offset())); 1789 __ ldr(R2, FieldAddress(R2, SubtypeTestCache::cache_offset()));
1791 __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag); 1790 __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag);
1792 1791
1793 Label loop, found, not_found, next_iteration; 1792 Label loop, found, not_found, next_iteration;
1794 // R2: entry start. 1793 // R2: entry start.
1795 // R3: instance class id. 1794 // R3: instance class id.
1796 // R4: instance type arguments. 1795 // R4: instance type arguments.
1797 __ SmiTag(R3); 1796 __ SmiTag(R3);
1798 __ Bind(&loop); 1797 __ Bind(&loop);
1799 __ ldr(R5, Address(R2, kWordSize * SubtypeTestCache::kInstanceClassId)); 1798 __ ldr(R9, Address(R2, kWordSize * SubtypeTestCache::kInstanceClassId));
1800 __ CompareObject(R5, Object::null_object()); 1799 __ CompareObject(R9, Object::null_object());
1801 __ b(&not_found, EQ); 1800 __ b(&not_found, EQ);
1802 __ cmp(R5, Operand(R3)); 1801 __ cmp(R9, Operand(R3));
1803 if (n == 1) { 1802 if (n == 1) {
1804 __ b(&found, EQ); 1803 __ b(&found, EQ);
1805 } else { 1804 } else {
1806 __ b(&next_iteration, NE); 1805 __ b(&next_iteration, NE);
1807 __ ldr(R5, 1806 __ ldr(R9,
1808 Address(R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); 1807 Address(R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments));
1809 __ cmp(R5, Operand(R4)); 1808 __ cmp(R9, Operand(R4));
1810 if (n == 2) { 1809 if (n == 2) {
1811 __ b(&found, EQ); 1810 __ b(&found, EQ);
1812 } else { 1811 } else {
1813 __ b(&next_iteration, NE); 1812 __ b(&next_iteration, NE);
1814 __ ldr(R5, Address(R2, kWordSize * 1813 __ ldr(R9, Address(R2, kWordSize *
1815 SubtypeTestCache::kInstantiatorTypeArguments)); 1814 SubtypeTestCache::kInstantiatorTypeArguments));
1816 __ cmp(R5, Operand(R1)); 1815 __ cmp(R9, Operand(R1));
1817 __ b(&found, EQ); 1816 __ b(&found, EQ);
1818 } 1817 }
1819 } 1818 }
1820 __ Bind(&next_iteration); 1819 __ Bind(&next_iteration);
1821 __ AddImmediate(R2, kWordSize * SubtypeTestCache::kTestEntryLength); 1820 __ AddImmediate(R2, kWordSize * SubtypeTestCache::kTestEntryLength);
1822 __ b(&loop); 1821 __ b(&loop);
1823 // Fall through to not found. 1822 // Fall through to not found.
1824 __ Bind(&not_found); 1823 __ Bind(&not_found);
1825 __ LoadObject(R1, Object::null_object()); 1824 __ LoadObject(R1, Object::null_object());
1826 __ Ret(); 1825 __ Ret();
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1894 __ LoadImmediate(R2, VMTag::kDartTagId); 1893 __ LoadImmediate(R2, VMTag::kDartTagId);
1895 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset()); 1894 __ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
1896 // Clear top exit frame. 1895 // Clear top exit frame.
1897 __ LoadImmediate(R2, 0); 1896 __ LoadImmediate(R2, 0);
1898 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset()); 1897 __ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
1899 __ bx(LR); // Jump to the exception handler code. 1898 __ bx(LR); // Jump to the exception handler code.
1900 } 1899 }
1901 1900
1902 1901
1903 // Calls to the runtime to optimize the given function. 1902 // Calls to the runtime to optimize the given function.
1904 // R6: function to be reoptimized. 1903 // R8: function to be reoptimized.
1905 // R4: argument descriptor (preserved). 1904 // R4: argument descriptor (preserved).
1906 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { 1905 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
1907 __ EnterStubFrame(); 1906 __ EnterStubFrame();
1908 __ Push(R4); 1907 __ Push(R4);
1909 __ LoadObject(IP, Object::null_object()); 1908 __ LoadObject(IP, Object::null_object());
1910 __ Push(IP); // Setup space on stack for return value. 1909 __ Push(IP); // Setup space on stack for return value.
1911 __ Push(R6); 1910 __ Push(R8);
1912 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); 1911 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
1913 __ Pop(R0); // Discard argument. 1912 __ Pop(R0); // Discard argument.
1914 __ Pop(R0); // Get Code object 1913 __ Pop(R0); // Get Code object
1915 __ Pop(R4); // Restore argument descriptor. 1914 __ Pop(R4); // Restore argument descriptor.
1916 __ LeaveStubFrame(); 1915 __ LeaveStubFrame();
1917 __ mov(CODE_REG, Operand(R0)); 1916 __ mov(CODE_REG, Operand(R0));
1918 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset())); 1917 __ ldr(R0, FieldAddress(R0, Code::entry_point_offset()));
1919 __ bx(R0); 1918 __ bx(R0);
1920 __ bkpt(0); 1919 __ bkpt(0);
1921 } 1920 }
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
2088 // Result: 2087 // Result:
2089 // R1: entry point. 2088 // R1: entry point.
2090 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { 2089 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) {
2091 EmitMegamorphicLookup(assembler, R0, R1, R1); 2090 EmitMegamorphicLookup(assembler, R0, R1, R1);
2092 __ Ret(); 2091 __ Ret();
2093 } 2092 }
2094 2093
2095 } // namespace dart 2094 } // namespace dart
2096 2095
2097 #endif // defined TARGET_ARCH_ARM 2096 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/runtime_entry_arm.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698