Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(426)

Side by Side Diff: runtime/vm/stub_code_arm64.cc

Issue 1264543002: Simplify constant pool usage in arm64 code generator (by removing extra argument (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: address comments Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/runtime_entry_arm64.cc ('k') | runtime/vm/stub_code_arm64_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/code_generator.h" 9 #include "vm/code_generator.h"
10 #include "vm/compiler.h" 10 #include "vm/compiler.h"
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 45
46 __ SetPrologueOffset(); 46 __ SetPrologueOffset();
47 __ Comment("CallToRuntimeStub"); 47 __ Comment("CallToRuntimeStub");
48 __ EnterStubFrame(); 48 __ EnterStubFrame();
49 49
50 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0); 50 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0);
51 __ LoadIsolate(R28); 51 __ LoadIsolate(R28);
52 52
53 // Save exit frame information to enable stack walking as we are about 53 // Save exit frame information to enable stack walking as we are about
54 // to transition to Dart VM C++ code. 54 // to transition to Dart VM C++ code.
55 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset(), kNoPP); 55 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
56 56
57 #if defined(DEBUG) 57 #if defined(DEBUG)
58 { Label ok; 58 { Label ok;
59 // Check that we are always entering from Dart code. 59 // Check that we are always entering from Dart code.
60 __ LoadFromOffset(R8, R28, Isolate::vm_tag_offset(), kNoPP); 60 __ LoadFromOffset(R8, R28, Isolate::vm_tag_offset());
61 __ CompareImmediate(R8, VMTag::kDartTagId, kNoPP); 61 __ CompareImmediate(R8, VMTag::kDartTagId);
62 __ b(&ok, EQ); 62 __ b(&ok, EQ);
63 __ Stop("Not coming from Dart code."); 63 __ Stop("Not coming from Dart code.");
64 __ Bind(&ok); 64 __ Bind(&ok);
65 } 65 }
66 #endif 66 #endif
67 67
68 // Mark that the isolate is executing VM code. 68 // Mark that the isolate is executing VM code.
69 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset(), kNoPP); 69 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset());
70 70
71 // Reserve space for arguments and align frame before entering C++ world. 71 // Reserve space for arguments and align frame before entering C++ world.
72 // NativeArguments are passed in registers. 72 // NativeArguments are passed in registers.
73 __ Comment("align stack"); 73 __ Comment("align stack");
74 // Reserve space for arguments. 74 // Reserve space for arguments.
75 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); 75 ASSERT(sizeof(NativeArguments) == 4 * kWordSize);
76 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 76 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
77 77
78 // Pass NativeArguments structure by value and call runtime. 78 // Pass NativeArguments structure by value and call runtime.
79 // Registers R0, R1, R2, and R3 are used. 79 // Registers R0, R1, R2, and R3 are used.
80 80
81 ASSERT(thread_offset == 0 * kWordSize); 81 ASSERT(thread_offset == 0 * kWordSize);
82 // Set thread in NativeArgs. 82 // Set thread in NativeArgs.
83 __ mov(R0, THR); 83 __ mov(R0, THR);
84 84
85 // There are no runtime calls to closures, so we do not need to set the tag 85 // There are no runtime calls to closures, so we do not need to set the tag
86 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 86 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
87 ASSERT(argc_tag_offset == 1 * kWordSize); 87 ASSERT(argc_tag_offset == 1 * kWordSize);
88 __ mov(R1, R4); // Set argc in NativeArguments. 88 __ mov(R1, R4); // Set argc in NativeArguments.
89 89
90 ASSERT(argv_offset == 2 * kWordSize); 90 ASSERT(argv_offset == 2 * kWordSize);
91 __ add(R2, ZR, Operand(R4, LSL, 3)); 91 __ add(R2, ZR, Operand(R4, LSL, 3));
92 __ add(R2, FP, Operand(R2)); // Compute argv. 92 __ add(R2, FP, Operand(R2)); // Compute argv.
93 // Set argv in NativeArguments. 93 // Set argv in NativeArguments.
94 __ AddImmediate(R2, R2, exitframe_last_param_slot_from_fp * kWordSize, kNoPP); 94 __ AddImmediate(R2, R2, exitframe_last_param_slot_from_fp * kWordSize);
95 95
96 ASSERT(retval_offset == 3 * kWordSize); 96 ASSERT(retval_offset == 3 * kWordSize);
97 __ AddImmediate(R3, R2, kWordSize, kNoPP); 97 __ AddImmediate(R3, R2, kWordSize);
98 98
99 __ StoreToOffset(R0, SP, thread_offset, kNoPP); 99 __ StoreToOffset(R0, SP, thread_offset);
100 __ StoreToOffset(R1, SP, argc_tag_offset, kNoPP); 100 __ StoreToOffset(R1, SP, argc_tag_offset);
101 __ StoreToOffset(R2, SP, argv_offset, kNoPP); 101 __ StoreToOffset(R2, SP, argv_offset);
102 __ StoreToOffset(R3, SP, retval_offset, kNoPP); 102 __ StoreToOffset(R3, SP, retval_offset);
103 __ mov(R0, SP); // Pass the pointer to the NativeArguments. 103 __ mov(R0, SP); // Pass the pointer to the NativeArguments.
104 104
105 // We are entering runtime code, so the C stack pointer must be restored from 105 // We are entering runtime code, so the C stack pointer must be restored from
106 // the stack limit to the top of the stack. We cache the stack limit address 106 // the stack limit to the top of the stack. We cache the stack limit address
107 // in a callee-saved register. 107 // in a callee-saved register.
108 __ mov(R26, CSP); 108 __ mov(R26, CSP);
109 __ mov(CSP, SP); 109 __ mov(CSP, SP);
110 110
111 __ blr(R5); 111 __ blr(R5);
112 __ Comment("CallToRuntimeStub return"); 112 __ Comment("CallToRuntimeStub return");
113 113
114 // Restore SP and CSP. 114 // Restore SP and CSP.
115 __ mov(SP, CSP); 115 __ mov(SP, CSP);
116 __ mov(CSP, R26); 116 __ mov(CSP, R26);
117 117
118 // Retval is next to 1st argument. 118 // Retval is next to 1st argument.
119 // Mark that the isolate is executing Dart code. 119 // Mark that the isolate is executing Dart code.
120 __ LoadImmediate(R2, VMTag::kDartTagId, kNoPP); 120 __ LoadImmediate(R2, VMTag::kDartTagId);
121 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset(), kNoPP); 121 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset());
122 122
123 // Reset exit frame information in Isolate structure. 123 // Reset exit frame information in Isolate structure.
124 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset(), kNoPP); 124 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
125 125
126 __ LeaveStubFrame(); 126 __ LeaveStubFrame();
127 __ ret(); 127 __ ret();
128 } 128 }
129 129
130 130
131 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) { 131 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) {
132 __ Stop("GeneratePrintStopMessageStub"); 132 __ Stop("GeneratePrintStopMessageStub");
133 } 133 }
134 134
(...skipping 10 matching lines...) Expand all
145 const intptr_t argv_offset = NativeArguments::argv_offset(); 145 const intptr_t argv_offset = NativeArguments::argv_offset();
146 const intptr_t retval_offset = NativeArguments::retval_offset(); 146 const intptr_t retval_offset = NativeArguments::retval_offset();
147 147
148 __ EnterStubFrame(); 148 __ EnterStubFrame();
149 149
150 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0); 150 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0);
151 __ LoadIsolate(R28); 151 __ LoadIsolate(R28);
152 152
153 // Save exit frame information to enable stack walking as we are about 153 // Save exit frame information to enable stack walking as we are about
154 // to transition to native code. 154 // to transition to native code.
155 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset(), kNoPP); 155 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
156 156
157 #if defined(DEBUG) 157 #if defined(DEBUG)
158 { Label ok; 158 { Label ok;
159 // Check that we are always entering from Dart code. 159 // Check that we are always entering from Dart code.
160 __ LoadFromOffset(R6, R28, Isolate::vm_tag_offset(), kNoPP); 160 __ LoadFromOffset(R6, R28, Isolate::vm_tag_offset());
161 __ CompareImmediate(R6, VMTag::kDartTagId, kNoPP); 161 __ CompareImmediate(R6, VMTag::kDartTagId);
162 __ b(&ok, EQ); 162 __ b(&ok, EQ);
163 __ Stop("Not coming from Dart code."); 163 __ Stop("Not coming from Dart code.");
164 __ Bind(&ok); 164 __ Bind(&ok);
165 } 165 }
166 #endif 166 #endif
167 167
168 // Mark that the isolate is executing Native code. 168 // Mark that the isolate is executing Native code.
169 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset(), kNoPP); 169 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset());
170 170
171 // Reserve space for the native arguments structure passed on the stack (the 171 // Reserve space for the native arguments structure passed on the stack (the
172 // outgoing pointer parameter to the native arguments structure is passed in 172 // outgoing pointer parameter to the native arguments structure is passed in
173 // R0) and align frame before entering the C++ world. 173 // R0) and align frame before entering the C++ world.
174 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 174 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
175 175
176 // Initialize NativeArguments structure and call native function. 176 // Initialize NativeArguments structure and call native function.
177 // Registers R0, R1, R2, and R3 are used. 177 // Registers R0, R1, R2, and R3 are used.
178 178
179 ASSERT(thread_offset == 0 * kWordSize); 179 ASSERT(thread_offset == 0 * kWordSize);
180 // Set thread in NativeArgs. 180 // Set thread in NativeArgs.
181 __ mov(R0, THR); 181 __ mov(R0, THR);
182 182
183 // There are no native calls to closures, so we do not need to set the tag 183 // There are no native calls to closures, so we do not need to set the tag
184 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 184 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
185 ASSERT(argc_tag_offset == 1 * kWordSize); 185 ASSERT(argc_tag_offset == 1 * kWordSize);
186 // Set argc in NativeArguments: R1 already contains argc. 186 // Set argc in NativeArguments: R1 already contains argc.
187 187
188 ASSERT(argv_offset == 2 * kWordSize); 188 ASSERT(argv_offset == 2 * kWordSize);
189 // Set argv in NativeArguments: R2 already contains argv. 189 // Set argv in NativeArguments: R2 already contains argv.
190 190
191 // Set retval in NativeArgs. 191 // Set retval in NativeArgs.
192 ASSERT(retval_offset == 3 * kWordSize); 192 ASSERT(retval_offset == 3 * kWordSize);
193 __ AddImmediate(R3, FP, 2 * kWordSize, kNoPP); 193 __ AddImmediate(R3, FP, 2 * kWordSize);
194 194
195 // Passing the structure by value as in runtime calls would require changing 195 // Passing the structure by value as in runtime calls would require changing
196 // Dart API for native functions. 196 // Dart API for native functions.
197 // For now, space is reserved on the stack and we pass a pointer to it. 197 // For now, space is reserved on the stack and we pass a pointer to it.
198 __ StoreToOffset(R0, SP, thread_offset, kNoPP); 198 __ StoreToOffset(R0, SP, thread_offset);
199 __ StoreToOffset(R1, SP, argc_tag_offset, kNoPP); 199 __ StoreToOffset(R1, SP, argc_tag_offset);
200 __ StoreToOffset(R2, SP, argv_offset, kNoPP); 200 __ StoreToOffset(R2, SP, argv_offset);
201 __ StoreToOffset(R3, SP, retval_offset, kNoPP); 201 __ StoreToOffset(R3, SP, retval_offset);
202 __ mov(R0, SP); // Pass the pointer to the NativeArguments. 202 __ mov(R0, SP); // Pass the pointer to the NativeArguments.
203 203
204 // We are entering runtime code, so the C stack pointer must be restored from 204 // We are entering runtime code, so the C stack pointer must be restored from
205 // the stack limit to the top of the stack. We cache the stack limit address 205 // the stack limit to the top of the stack. We cache the stack limit address
206 // in the Dart SP register, which is callee-saved in the C ABI. 206 // in the Dart SP register, which is callee-saved in the C ABI.
207 __ mov(R26, CSP); 207 __ mov(R26, CSP);
208 __ mov(CSP, SP); 208 __ mov(CSP, SP);
209 209
210 __ mov(R1, R5); // Pass the function entrypoint to call. 210 __ mov(R1, R5); // Pass the function entrypoint to call.
211 // Call native function invocation wrapper or redirection via simulator. 211 // Call native function invocation wrapper or redirection via simulator.
212 #if defined(USING_SIMULATOR) 212 #if defined(USING_SIMULATOR)
213 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper); 213 uword entry = reinterpret_cast<uword>(NativeEntry::NativeCallWrapper);
214 entry = Simulator::RedirectExternalReference( 214 entry = Simulator::RedirectExternalReference(
215 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments); 215 entry, Simulator::kNativeCall, NativeEntry::kNumCallWrapperArguments);
216 __ LoadImmediate(R2, entry, kNoPP); 216 __ LoadImmediate(R2, entry);
217 __ blr(R2); 217 __ blr(R2);
218 #else 218 #else
219 __ BranchLink(&NativeEntry::NativeCallWrapperLabel(), kNoPP); 219 __ BranchLink(&NativeEntry::NativeCallWrapperLabel());
220 #endif 220 #endif
221 221
222 // Restore SP and CSP. 222 // Restore SP and CSP.
223 __ mov(SP, CSP); 223 __ mov(SP, CSP);
224 __ mov(CSP, R26); 224 __ mov(CSP, R26);
225 225
226 // Mark that the isolate is executing Dart code. 226 // Mark that the isolate is executing Dart code.
227 __ LoadImmediate(R2, VMTag::kDartTagId, kNoPP); 227 __ LoadImmediate(R2, VMTag::kDartTagId);
228 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset(), kNoPP); 228 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset());
229 229
230 // Reset exit frame information in Isolate structure. 230 // Reset exit frame information in Isolate structure.
231 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset(), kNoPP); 231 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
232 232
233 __ LeaveStubFrame(); 233 __ LeaveStubFrame();
234 __ ret(); 234 __ ret();
235 } 235 }
236 236
237 237
238 // Input parameters: 238 // Input parameters:
239 // LR : return address. 239 // LR : return address.
240 // SP : address of return value. 240 // SP : address of return value.
241 // R5 : address of the native function to call. 241 // R5 : address of the native function to call.
242 // R2 : address of first argument in argument array. 242 // R2 : address of first argument in argument array.
243 // R1 : argc_tag including number of arguments and function kind. 243 // R1 : argc_tag including number of arguments and function kind.
244 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) { 244 void StubCode::GenerateCallBootstrapCFunctionStub(Assembler* assembler) {
245 const intptr_t thread_offset = NativeArguments::thread_offset(); 245 const intptr_t thread_offset = NativeArguments::thread_offset();
246 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); 246 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset();
247 const intptr_t argv_offset = NativeArguments::argv_offset(); 247 const intptr_t argv_offset = NativeArguments::argv_offset();
248 const intptr_t retval_offset = NativeArguments::retval_offset(); 248 const intptr_t retval_offset = NativeArguments::retval_offset();
249 249
250 __ EnterStubFrame(); 250 __ EnterStubFrame();
251 251
252 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0); 252 COMPILE_ASSERT((kAbiPreservedCpuRegs & (1 << R28)) != 0);
253 __ LoadIsolate(R28); 253 __ LoadIsolate(R28);
254 254
255 // Save exit frame information to enable stack walking as we are about 255 // Save exit frame information to enable stack walking as we are about
256 // to transition to native code. 256 // to transition to native code.
257 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset(), kNoPP); 257 __ StoreToOffset(FP, THR, Thread::top_exit_frame_info_offset());
258 258
259 #if defined(DEBUG) 259 #if defined(DEBUG)
260 { Label ok; 260 { Label ok;
261 // Check that we are always entering from Dart code. 261 // Check that we are always entering from Dart code.
262 __ LoadFromOffset(R6, R28, Isolate::vm_tag_offset(), kNoPP); 262 __ LoadFromOffset(R6, R28, Isolate::vm_tag_offset());
263 __ CompareImmediate(R6, VMTag::kDartTagId, kNoPP); 263 __ CompareImmediate(R6, VMTag::kDartTagId);
264 __ b(&ok, EQ); 264 __ b(&ok, EQ);
265 __ Stop("Not coming from Dart code."); 265 __ Stop("Not coming from Dart code.");
266 __ Bind(&ok); 266 __ Bind(&ok);
267 } 267 }
268 #endif 268 #endif
269 269
270 // Mark that the isolate is executing Native code. 270 // Mark that the isolate is executing Native code.
271 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset(), kNoPP); 271 __ StoreToOffset(R5, R28, Isolate::vm_tag_offset());
272 272
273 // Reserve space for the native arguments structure passed on the stack (the 273 // Reserve space for the native arguments structure passed on the stack (the
274 // outgoing pointer parameter to the native arguments structure is passed in 274 // outgoing pointer parameter to the native arguments structure is passed in
275 // R0) and align frame before entering the C++ world. 275 // R0) and align frame before entering the C++ world.
276 __ ReserveAlignedFrameSpace(sizeof(NativeArguments)); 276 __ ReserveAlignedFrameSpace(sizeof(NativeArguments));
277 277
278 // Initialize NativeArguments structure and call native function. 278 // Initialize NativeArguments structure and call native function.
279 // Registers R0, R1, R2, and R3 are used. 279 // Registers R0, R1, R2, and R3 are used.
280 280
281 ASSERT(thread_offset == 0 * kWordSize); 281 ASSERT(thread_offset == 0 * kWordSize);
282 // Set thread in NativeArgs. 282 // Set thread in NativeArgs.
283 __ mov(R0, THR); 283 __ mov(R0, THR);
284 284
285 // There are no native calls to closures, so we do not need to set the tag 285 // There are no native calls to closures, so we do not need to set the tag
286 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. 286 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_.
287 ASSERT(argc_tag_offset == 1 * kWordSize); 287 ASSERT(argc_tag_offset == 1 * kWordSize);
288 // Set argc in NativeArguments: R1 already contains argc. 288 // Set argc in NativeArguments: R1 already contains argc.
289 289
290 ASSERT(argv_offset == 2 * kWordSize); 290 ASSERT(argv_offset == 2 * kWordSize);
291 // Set argv in NativeArguments: R2 already contains argv. 291 // Set argv in NativeArguments: R2 already contains argv.
292 292
293 // Set retval in NativeArgs. 293 // Set retval in NativeArgs.
294 ASSERT(retval_offset == 3 * kWordSize); 294 ASSERT(retval_offset == 3 * kWordSize);
295 __ AddImmediate(R3, FP, 2 * kWordSize, kNoPP); 295 __ AddImmediate(R3, FP, 2 * kWordSize);
296 296
297 // Passing the structure by value as in runtime calls would require changing 297 // Passing the structure by value as in runtime calls would require changing
298 // Dart API for native functions. 298 // Dart API for native functions.
299 // For now, space is reserved on the stack and we pass a pointer to it. 299 // For now, space is reserved on the stack and we pass a pointer to it.
300 __ StoreToOffset(R0, SP, thread_offset, kNoPP); 300 __ StoreToOffset(R0, SP, thread_offset);
301 __ StoreToOffset(R1, SP, argc_tag_offset, kNoPP); 301 __ StoreToOffset(R1, SP, argc_tag_offset);
302 __ StoreToOffset(R2, SP, argv_offset, kNoPP); 302 __ StoreToOffset(R2, SP, argv_offset);
303 __ StoreToOffset(R3, SP, retval_offset, kNoPP); 303 __ StoreToOffset(R3, SP, retval_offset);
304 __ mov(R0, SP); // Pass the pointer to the NativeArguments. 304 __ mov(R0, SP); // Pass the pointer to the NativeArguments.
305 305
306 // We are entering runtime code, so the C stack pointer must be restored from 306 // We are entering runtime code, so the C stack pointer must be restored from
307 // the stack limit to the top of the stack. We cache the stack limit address 307 // the stack limit to the top of the stack. We cache the stack limit address
308 // in the Dart SP register, which is callee-saved in the C ABI. 308 // in the Dart SP register, which is callee-saved in the C ABI.
309 __ mov(R26, CSP); 309 __ mov(R26, CSP);
310 __ mov(CSP, SP); 310 __ mov(CSP, SP);
311 311
312 // Call native function or redirection via simulator. 312 // Call native function or redirection via simulator.
313 __ blr(R5); 313 __ blr(R5);
314 314
315 // Restore SP and CSP. 315 // Restore SP and CSP.
316 __ mov(SP, CSP); 316 __ mov(SP, CSP);
317 __ mov(CSP, R26); 317 __ mov(CSP, R26);
318 318
319 // Mark that the isolate is executing Dart code. 319 // Mark that the isolate is executing Dart code.
320 __ LoadImmediate(R2, VMTag::kDartTagId, kNoPP); 320 __ LoadImmediate(R2, VMTag::kDartTagId);
321 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset(), kNoPP); 321 __ StoreToOffset(R2, R28, Isolate::vm_tag_offset());
322 322
323 // Reset exit frame information in Isolate structure. 323 // Reset exit frame information in Isolate structure.
324 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset(), kNoPP); 324 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
325 325
326 __ LeaveStubFrame(); 326 __ LeaveStubFrame();
327 __ ret(); 327 __ ret();
328 } 328 }
329 329
330 330
331 // Input parameters: 331 // Input parameters:
332 // R4: arguments descriptor array. 332 // R4: arguments descriptor array.
333 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { 333 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) {
334 // Create a stub frame as we are pushing some objects on the stack before 334 // Create a stub frame as we are pushing some objects on the stack before
335 // calling into the runtime. 335 // calling into the runtime.
336 __ EnterStubFrame(); 336 __ EnterStubFrame();
337 // Setup space on stack for return value and preserve arguments descriptor. 337 // Setup space on stack for return value and preserve arguments descriptor.
338 __ Push(R4); 338 __ Push(R4);
339 __ PushObject(Object::null_object(), PP); 339 __ PushObject(Object::null_object());
340 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); 340 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
341 // Get Code object result and restore arguments descriptor array. 341 // Get Code object result and restore arguments descriptor array.
342 __ Pop(R0); 342 __ Pop(R0);
343 __ Pop(R4); 343 __ Pop(R4);
344 // Remove the stub frame. 344 // Remove the stub frame.
345 __ LeaveStubFrame(); 345 __ LeaveStubFrame();
346 // Jump to the dart function. 346 // Jump to the dart function.
347 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset(), kNoPP); 347 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset());
348 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 348 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag);
349 __ br(R0); 349 __ br(R0);
350 } 350 }
351 351
352 352
353 // Called from a static call only when an invalid code has been entered 353 // Called from a static call only when an invalid code has been entered
354 // (invalid because its function was optimized or deoptimized). 354 // (invalid because its function was optimized or deoptimized).
355 // R4: arguments descriptor array. 355 // R4: arguments descriptor array.
356 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { 356 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) {
357 // Create a stub frame as we are pushing some objects on the stack before 357 // Create a stub frame as we are pushing some objects on the stack before
358 // calling into the runtime. 358 // calling into the runtime.
359 __ EnterStubFrame(); 359 __ EnterStubFrame();
360 // Setup space on stack for return value and preserve arguments descriptor. 360 // Setup space on stack for return value and preserve arguments descriptor.
361 __ Push(R4); 361 __ Push(R4);
362 __ PushObject(Object::null_object(), PP); 362 __ PushObject(Object::null_object());
363 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); 363 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
364 // Get Code object result and restore arguments descriptor array. 364 // Get Code object result and restore arguments descriptor array.
365 __ Pop(R0); 365 __ Pop(R0);
366 __ Pop(R4); 366 __ Pop(R4);
367 // Remove the stub frame. 367 // Remove the stub frame.
368 __ LeaveStubFrame(); 368 __ LeaveStubFrame();
369 // Jump to the dart function. 369 // Jump to the dart function.
370 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset(), kNoPP); 370 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset());
371 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 371 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag);
372 __ br(R0); 372 __ br(R0);
373 } 373 }
374 374
375 375
376 // Called from object allocate instruction when the allocation stub has been 376 // Called from object allocate instruction when the allocation stub has been
377 // disabled. 377 // disabled.
378 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { 378 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) {
379 __ EnterStubFrame(); 379 __ EnterStubFrame();
380 // Setup space on stack for return value. 380 // Setup space on stack for return value.
381 __ PushObject(Object::null_object(), PP); 381 __ PushObject(Object::null_object());
382 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); 382 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
383 // Get Code object result. 383 // Get Code object result.
384 __ Pop(R0); 384 __ Pop(R0);
385 // Remove the stub frame. 385 // Remove the stub frame.
386 __ LeaveStubFrame(); 386 __ LeaveStubFrame();
387 // Jump to the dart function. 387 // Jump to the dart function.
388 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset(), kNoPP); 388 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset());
389 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 389 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag);
390 __ br(R0); 390 __ br(R0);
391 } 391 }
392 392
393 393
394 // Input parameters: 394 // Input parameters:
395 // R2: smi-tagged argument count, may be zero. 395 // R2: smi-tagged argument count, may be zero.
396 // FP[kParamEndSlotFromFp + 1]: last argument. 396 // FP[kParamEndSlotFromFp + 1]: last argument.
397 static void PushArgumentsArray(Assembler* assembler) { 397 static void PushArgumentsArray(Assembler* assembler) {
398 // Allocate array to store arguments of caller. 398 // Allocate array to store arguments of caller.
399 __ LoadObject(R1, Object::null_object(), PP); 399 __ LoadObject(R1, Object::null_object());
400 // R1: null element type for raw Array. 400 // R1: null element type for raw Array.
401 // R2: smi-tagged argument count, may be zero. 401 // R2: smi-tagged argument count, may be zero.
402 const ExternalLabel array_label(StubCode::AllocateArrayEntryPoint()); 402 const ExternalLabel array_label(StubCode::AllocateArrayEntryPoint());
403 __ BranchLink(&array_label, PP); 403 __ BranchLink(&array_label);
404 // R0: newly allocated array. 404 // R0: newly allocated array.
405 // R2: smi-tagged argument count, may be zero (was preserved by the stub). 405 // R2: smi-tagged argument count, may be zero (was preserved by the stub).
406 __ Push(R0); // Array is in R0 and on top of stack. 406 __ Push(R0); // Array is in R0 and on top of stack.
407 __ add(R1, FP, Operand(R2, LSL, 2)); 407 __ add(R1, FP, Operand(R2, LSL, 2));
408 __ AddImmediate(R1, R1, kParamEndSlotFromFp * kWordSize, PP); 408 __ AddImmediate(R1, R1, kParamEndSlotFromFp * kWordSize);
409 __ AddImmediate(R3, R0, Array::data_offset() - kHeapObjectTag, PP); 409 __ AddImmediate(R3, R0, Array::data_offset() - kHeapObjectTag);
410 // R1: address of first argument on stack. 410 // R1: address of first argument on stack.
411 // R3: address of first argument in array. 411 // R3: address of first argument in array.
412 412
413 Label loop, loop_exit; 413 Label loop, loop_exit;
414 __ CompareRegisters(R2, ZR); 414 __ CompareRegisters(R2, ZR);
415 __ b(&loop_exit, LE); 415 __ b(&loop_exit, LE);
416 __ Bind(&loop); 416 __ Bind(&loop);
417 __ ldr(R7, Address(R1)); 417 __ ldr(R7, Address(R1));
418 __ AddImmediate(R1, R1, -kWordSize, PP); 418 __ AddImmediate(R1, R1, -kWordSize);
419 __ AddImmediate(R3, R3, kWordSize, PP); 419 __ AddImmediate(R3, R3, kWordSize);
420 __ AddImmediateSetFlags(R2, R2, -Smi::RawValue(1), PP); 420 __ AddImmediateSetFlags(R2, R2, -Smi::RawValue(1));
421 __ str(R7, Address(R3, -kWordSize)); 421 __ str(R7, Address(R3, -kWordSize));
422 __ b(&loop, GE); 422 __ b(&loop, GE);
423 __ Bind(&loop_exit); 423 __ Bind(&loop_exit);
424 } 424 }
425 425
426 426
427 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t, DeoptimizeCopyFrame, 427 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t, DeoptimizeCopyFrame,
428 intptr_t deopt_reason, 428 intptr_t deopt_reason,
429 uword saved_registers_address); 429 uword saved_registers_address);
430 430
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 __ PushQuad(vreg); 480 __ PushQuad(vreg);
481 } 481 }
482 482
483 __ mov(R0, SP); // Pass address of saved registers block. 483 __ mov(R0, SP); // Pass address of saved registers block.
484 __ ReserveAlignedFrameSpace(0); 484 __ ReserveAlignedFrameSpace(0);
485 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1); 485 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 1);
486 // Result (R0) is stack-size (FP - SP) in bytes. 486 // Result (R0) is stack-size (FP - SP) in bytes.
487 487
488 if (preserve_result) { 488 if (preserve_result) {
489 // Restore result into R1 temporarily. 489 // Restore result into R1 temporarily.
490 __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize, kNoPP); 490 __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * kWordSize);
491 } 491 }
492 492
493 // There is a Dart Frame on the stack. We must restore PP and leave frame. 493 // There is a Dart Frame on the stack. We must restore PP and leave frame.
494 __ LeaveDartFrame(); 494 __ LeaveDartFrame();
495 __ sub(SP, FP, Operand(R0)); 495 __ sub(SP, FP, Operand(R0));
496 496
497 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there 497 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
498 // is no need to set the correct PC marker or load PP, since they get patched. 498 // is no need to set the correct PC marker or load PP, since they get patched.
499 __ EnterFrame(0); 499 __ EnterFrame(0);
500 __ TagAndPushPPAndPcMarker(ZR); 500 __ TagAndPushPPAndPcMarker(ZR);
501 501
502 if (preserve_result) { 502 if (preserve_result) {
503 __ Push(R1); // Preserve result as first local. 503 __ Push(R1); // Preserve result as first local.
504 } 504 }
505 __ ReserveAlignedFrameSpace(0); 505 __ ReserveAlignedFrameSpace(0);
506 __ mov(R0, FP); // Pass last FP as parameter in R0. 506 __ mov(R0, FP); // Pass last FP as parameter in R0.
507 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); 507 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
508 if (preserve_result) { 508 if (preserve_result) {
509 // Restore result into R1. 509 // Restore result into R1.
510 __ LoadFromOffset(R1, FP, kFirstLocalSlotFromFp * kWordSize, kNoPP); 510 __ LoadFromOffset(R1, FP, kFirstLocalSlotFromFp * kWordSize);
511 } 511 }
512 // Code above cannot cause GC. 512 // Code above cannot cause GC.
513 // There is a Dart Frame on the stack. We must restore PP and leave frame. 513 // There is a Dart Frame on the stack. We must restore PP and leave frame.
514 __ LeaveDartFrame(); 514 __ LeaveDartFrame();
515 515
516 // Frame is fully rewritten at this point and it is safe to perform a GC. 516 // Frame is fully rewritten at this point and it is safe to perform a GC.
517 // Materialize any objects that were deferred by FillFrame because they 517 // Materialize any objects that were deferred by FillFrame because they
518 // require allocation. 518 // require allocation.
519 // Enter stub frame with loading PP. The caller's PP is not materialized yet. 519 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
520 __ EnterStubFrame(); 520 __ EnterStubFrame();
(...skipping 14 matching lines...) Expand all
535 __ LeaveStubFrame(); 535 __ LeaveStubFrame();
536 // Remove materialization arguments. 536 // Remove materialization arguments.
537 __ add(SP, SP, Operand(R1)); 537 __ add(SP, SP, Operand(R1));
538 __ ret(); 538 __ ret();
539 } 539 }
540 540
541 541
542 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) { 542 void StubCode::GenerateDeoptimizeLazyStub(Assembler* assembler) {
543 // Correct return address to point just after the call that is being 543 // Correct return address to point just after the call that is being
544 // deoptimized. 544 // deoptimized.
545 __ AddImmediate(LR, LR, -CallPattern::kLengthInBytes, kNoPP); 545 __ AddImmediate(LR, LR, -CallPattern::kLengthInBytes);
546 GenerateDeoptimizationSequence(assembler, true); // Preserve R0. 546 GenerateDeoptimizationSequence(assembler, true); // Preserve R0.
547 } 547 }
548 548
549 549
550 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { 550 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) {
551 GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0. 551 GenerateDeoptimizationSequence(assembler, false); // Don't preserve R0.
552 } 552 }
553 553
554 554
555 static void GenerateDispatcherCode(Assembler* assembler, 555 static void GenerateDispatcherCode(Assembler* assembler,
556 Label* call_target_function) { 556 Label* call_target_function) {
557 __ Comment("NoSuchMethodDispatch"); 557 __ Comment("NoSuchMethodDispatch");
558 // When lazily generated invocation dispatchers are disabled, the 558 // When lazily generated invocation dispatchers are disabled, the
559 // miss-handler may return null. 559 // miss-handler may return null.
560 __ CompareObject(R0, Object::null_object(), PP); 560 __ CompareObject(R0, Object::null_object());
561 __ b(call_target_function, NE); 561 __ b(call_target_function, NE);
562 __ EnterStubFrame(); 562 __ EnterStubFrame();
563 563
564 // Load the receiver. 564 // Load the receiver.
565 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset(), kNoPP); 565 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
566 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi. 566 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
567 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize, kNoPP); 567 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize);
568 __ PushObject(Object::null_object(), PP); 568 __ PushObject(Object::null_object());
569 __ Push(R6); 569 __ Push(R6);
570 __ Push(R5); 570 __ Push(R5);
571 __ Push(R4); 571 __ Push(R4);
572 // R2: Smi-tagged arguments array length. 572 // R2: Smi-tagged arguments array length.
573 PushArgumentsArray(assembler); 573 PushArgumentsArray(assembler);
574 const intptr_t kNumArgs = 4; 574 const intptr_t kNumArgs = 4;
575 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs); 575 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs);
576 __ Drop(4); 576 __ Drop(4);
577 __ Pop(R0); // Return value. 577 __ Pop(R0); // Return value.
578 __ LeaveStubFrame(); 578 __ LeaveStubFrame();
579 __ ret(); 579 __ ret();
580 } 580 }
581 581
582 582
583 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { 583 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) {
584 __ EnterStubFrame(); 584 __ EnterStubFrame();
585 585
586 // Load the receiver. 586 // Load the receiver.
587 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset(), kNoPP); 587 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
588 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi. 588 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
589 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize, kNoPP); 589 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize);
590 590
591 // Preserve IC data and arguments descriptor. 591 // Preserve IC data and arguments descriptor.
592 __ Push(R5); 592 __ Push(R5);
593 __ Push(R4); 593 __ Push(R4);
594 594
595 // Push space for the return value. 595 // Push space for the return value.
596 // Push the receiver. 596 // Push the receiver.
597 // Push IC data object. 597 // Push IC data object.
598 // Push arguments descriptor array. 598 // Push arguments descriptor array.
599 __ PushObject(Object::null_object(), PP); 599 __ PushObject(Object::null_object());
600 __ Push(R6); 600 __ Push(R6);
601 __ Push(R5); 601 __ Push(R5);
602 __ Push(R4); 602 __ Push(R4);
603 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); 603 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3);
604 // Remove arguments. 604 // Remove arguments.
605 __ Drop(3); 605 __ Drop(3);
606 __ Pop(R0); // Get result into R0 (target function). 606 __ Pop(R0); // Get result into R0 (target function).
607 607
608 // Restore IC data and arguments descriptor. 608 // Restore IC data and arguments descriptor.
609 __ Pop(R4); 609 __ Pop(R4);
610 __ Pop(R5); 610 __ Pop(R5);
611 611
612 __ LeaveStubFrame(); 612 __ LeaveStubFrame();
613 613
614 if (!FLAG_lazy_dispatchers) { 614 if (!FLAG_lazy_dispatchers) {
615 Label call_target_function; 615 Label call_target_function;
616 GenerateDispatcherCode(assembler, &call_target_function); 616 GenerateDispatcherCode(assembler, &call_target_function);
617 __ Bind(&call_target_function); 617 __ Bind(&call_target_function);
618 } 618 }
619 619
620 // Tail-call to target function. 620 // Tail-call to target function.
621 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset(), kNoPP); 621 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset());
622 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP); 622 __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag);
623 __ br(R2); 623 __ br(R2);
624 } 624 }
625 625
626 626
627 // Called for inline allocation of arrays. 627 // Called for inline allocation of arrays.
628 // Input parameters: 628 // Input parameters:
629 // LR: return address. 629 // LR: return address.
630 // R2: array length as Smi. 630 // R2: array length as Smi.
631 // R1: array element type (either NULL or an instantiated type). 631 // R1: array element type (either NULL or an instantiated type).
632 // NOTE: R2 cannot be clobbered here as the caller relies on it being saved. 632 // NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
633 // The newly allocated object is returned in R0. 633 // The newly allocated object is returned in R0.
634 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { 634 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
635 Label slow_case; 635 Label slow_case;
636 // Compute the size to be allocated, it is based on the array length 636 // Compute the size to be allocated, it is based on the array length
637 // and is computed as: 637 // and is computed as:
638 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). 638 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)).
639 // Assert that length is a Smi. 639 // Assert that length is a Smi.
640 __ tsti(R2, Immediate(kSmiTagMask)); 640 __ tsti(R2, Immediate(kSmiTagMask));
641 if (FLAG_use_slow_path) { 641 if (FLAG_use_slow_path) {
642 __ b(&slow_case); 642 __ b(&slow_case);
643 } else { 643 } else {
644 __ b(&slow_case, NE); 644 __ b(&slow_case, NE);
645 } 645 }
646 __ cmp(R2, Operand(0)); 646 __ cmp(R2, Operand(0));
647 __ b(&slow_case, LT); 647 __ b(&slow_case, LT);
648 648
649 // Check for maximum allowed length. 649 // Check for maximum allowed length.
650 const intptr_t max_len = 650 const intptr_t max_len =
651 reinterpret_cast<intptr_t>(Smi::New(Array::kMaxElements)); 651 reinterpret_cast<intptr_t>(Smi::New(Array::kMaxElements));
652 __ CompareImmediate(R2, max_len, kNoPP); 652 __ CompareImmediate(R2, max_len);
653 __ b(&slow_case, GT); 653 __ b(&slow_case, GT);
654 654
655 const intptr_t cid = kArrayCid; 655 const intptr_t cid = kArrayCid;
656 __ MaybeTraceAllocation(kArrayCid, R4, kNoPP, &slow_case, 656 __ MaybeTraceAllocation(kArrayCid, R4, &slow_case,
657 /* inline_isolate = */ false); 657 /* inline_isolate = */ false);
658 658
659 Heap::Space space = Heap::SpaceForAllocation(cid); 659 Heap::Space space = Heap::SpaceForAllocation(cid);
660 __ LoadIsolate(R8); 660 __ LoadIsolate(R8);
661 __ ldr(R8, Address(R8, Isolate::heap_offset())); 661 __ ldr(R8, Address(R8, Isolate::heap_offset()));
662 662
663 // Calculate and align allocation size. 663 // Calculate and align allocation size.
664 // Load new object start and calculate next object start. 664 // Load new object start and calculate next object start.
665 // R1: array element type. 665 // R1: array element type.
666 // R2: array length as Smi. 666 // R2: array length as Smi.
667 // R8: heap. 667 // R8: heap.
668 __ LoadFromOffset(R0, R8, Heap::TopOffset(space), kNoPP); 668 __ LoadFromOffset(R0, R8, Heap::TopOffset(space));
669 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1; 669 intptr_t fixed_size = sizeof(RawArray) + kObjectAlignment - 1;
670 __ LoadImmediate(R3, fixed_size, kNoPP); 670 __ LoadImmediate(R3, fixed_size);
671 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi. 671 __ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi.
672 ASSERT(kSmiTagShift == 1); 672 ASSERT(kSmiTagShift == 1);
673 __ andi(R3, R3, Immediate(~(kObjectAlignment - 1))); 673 __ andi(R3, R3, Immediate(~(kObjectAlignment - 1)));
674 // R0: potential new object start. 674 // R0: potential new object start.
675 // R3: object size in bytes. 675 // R3: object size in bytes.
676 __ adds(R7, R3, Operand(R0)); 676 __ adds(R7, R3, Operand(R0));
677 __ b(&slow_case, CS); // Branch if unsigned overflow. 677 __ b(&slow_case, CS); // Branch if unsigned overflow.
678 678
679 // Check if the allocation fits into the remaining space. 679 // Check if the allocation fits into the remaining space.
680 // R0: potential new object start. 680 // R0: potential new object start.
681 // R1: array element type. 681 // R1: array element type.
682 // R2: array length as Smi. 682 // R2: array length as Smi.
683 // R3: array size. 683 // R3: array size.
684 // R7: potential next object start. 684 // R7: potential next object start.
685 // R8: heap. 685 // R8: heap.
686 __ LoadFromOffset(TMP, R8, Heap::EndOffset(space), kNoPP); 686 __ LoadFromOffset(TMP, R8, Heap::EndOffset(space));
687 __ CompareRegisters(R7, TMP); 687 __ CompareRegisters(R7, TMP);
688 __ b(&slow_case, CS); // Branch if unsigned higher or equal. 688 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
689 689
690 // Successfully allocated the object(s), now update top to point to 690 // Successfully allocated the object(s), now update top to point to
691 // next object start and initialize the object. 691 // next object start and initialize the object.
692 // R0: potential new object start. 692 // R0: potential new object start.
693 // R3: array size. 693 // R3: array size.
694 // R7: potential next object start. 694 // R7: potential next object start.
695 // R8: heap. 695 // R8: heap.
696 __ StoreToOffset(R7, R8, Heap::TopOffset(space), kNoPP); 696 __ StoreToOffset(R7, R8, Heap::TopOffset(space));
697 __ add(R0, R0, Operand(kHeapObjectTag)); 697 __ add(R0, R0, Operand(kHeapObjectTag));
698 __ UpdateAllocationStatsWithSize(cid, R3, kNoPP, space, 698 __ UpdateAllocationStatsWithSize(cid, R3, space,
699 /* inline_isolate = */ false); 699 /* inline_isolate = */ false);
700 700
701 // R0: new object start as a tagged pointer. 701 // R0: new object start as a tagged pointer.
702 // R1: array element type. 702 // R1: array element type.
703 // R2: array length as Smi. 703 // R2: array length as Smi.
704 // R3: array size. 704 // R3: array size.
705 // R7: new object end address. 705 // R7: new object end address.
706 706
707 // Store the type argument field. 707 // Store the type argument field.
708 __ StoreIntoObjectOffsetNoBarrier( 708 __ StoreIntoObjectOffsetNoBarrier(
709 R0, Array::type_arguments_offset(), R1, PP); 709 R0, Array::type_arguments_offset(), R1);
710 710
711 // Set the length field. 711 // Set the length field.
712 __ StoreIntoObjectOffsetNoBarrier(R0, Array::length_offset(), R2, PP); 712 __ StoreIntoObjectOffsetNoBarrier(R0, Array::length_offset(), R2);
713 713
714 // Calculate the size tag. 714 // Calculate the size tag.
715 // R0: new object start as a tagged pointer. 715 // R0: new object start as a tagged pointer.
716 // R2: array length as Smi. 716 // R2: array length as Smi.
717 // R3: array size. 717 // R3: array size.
718 // R7: new object end address. 718 // R7: new object end address.
719 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 719 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
720 __ CompareImmediate(R3, RawObject::SizeTag::kMaxSizeTag, kNoPP); 720 __ CompareImmediate(R3, RawObject::SizeTag::kMaxSizeTag);
721 // If no size tag overflow, shift R1 left, else set R1 to zero. 721 // If no size tag overflow, shift R1 left, else set R1 to zero.
722 __ LslImmediate(TMP, R3, shift); 722 __ LslImmediate(TMP, R3, shift);
723 __ csel(R1, TMP, R1, LS); 723 __ csel(R1, TMP, R1, LS);
724 __ csel(R1, ZR, R1, HI); 724 __ csel(R1, ZR, R1, HI);
725 725
726 // Get the class index and insert it into the tags. 726 // Get the class index and insert it into the tags.
727 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid), kNoPP); 727 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
728 __ orr(R1, R1, Operand(TMP)); 728 __ orr(R1, R1, Operand(TMP));
729 __ StoreFieldToOffset(R1, R0, Array::tags_offset(), kNoPP); 729 __ StoreFieldToOffset(R1, R0, Array::tags_offset());
730 730
731 // Initialize all array elements to raw_null. 731 // Initialize all array elements to raw_null.
732 // R0: new object start as a tagged pointer. 732 // R0: new object start as a tagged pointer.
733 // R7: new object end address. 733 // R7: new object end address.
734 // R2: array length as Smi. 734 // R2: array length as Smi.
735 __ AddImmediate(R1, R0, Array::data_offset() - kHeapObjectTag, kNoPP); 735 __ AddImmediate(R1, R0, Array::data_offset() - kHeapObjectTag);
736 // R1: iterator which initially points to the start of the variable 736 // R1: iterator which initially points to the start of the variable
737 // data area to be initialized. 737 // data area to be initialized.
738 __ LoadObject(TMP, Object::null_object(), PP); 738 __ LoadObject(TMP, Object::null_object());
739 Label loop, done; 739 Label loop, done;
740 __ Bind(&loop); 740 __ Bind(&loop);
741 // TODO(cshapiro): StoreIntoObjectNoBarrier 741 // TODO(cshapiro): StoreIntoObjectNoBarrier
742 __ CompareRegisters(R1, R7); 742 __ CompareRegisters(R1, R7);
743 __ b(&done, CS); 743 __ b(&done, CS);
744 __ str(TMP, Address(R1)); // Store if unsigned lower. 744 __ str(TMP, Address(R1)); // Store if unsigned lower.
745 __ AddImmediate(R1, R1, kWordSize, kNoPP); 745 __ AddImmediate(R1, R1, kWordSize);
746 __ b(&loop); // Loop until R1 == R7. 746 __ b(&loop); // Loop until R1 == R7.
747 __ Bind(&done); 747 __ Bind(&done);
748 748
749 // Done allocating and initializing the array. 749 // Done allocating and initializing the array.
750 // R0: new object. 750 // R0: new object.
751 // R2: array length as Smi (preserved for the caller.) 751 // R2: array length as Smi (preserved for the caller.)
752 __ ret(); 752 __ ret();
753 753
754 // Unable to allocate the array using the fast inline code, just call 754 // Unable to allocate the array using the fast inline code, just call
755 // into the runtime. 755 // into the runtime.
756 __ Bind(&slow_case); 756 __ Bind(&slow_case);
757 // Create a stub frame as we are pushing some objects on the stack before 757 // Create a stub frame as we are pushing some objects on the stack before
758 // calling into the runtime. 758 // calling into the runtime.
759 __ EnterStubFrame(); 759 __ EnterStubFrame();
760 // Setup space on stack for return value. 760 // Setup space on stack for return value.
761 // Push array length as Smi and element type. 761 // Push array length as Smi and element type.
762 __ PushObject(Object::null_object(), PP); 762 __ PushObject(Object::null_object());
763 __ Push(R2); 763 __ Push(R2);
764 __ Push(R1); 764 __ Push(R1);
765 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); 765 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
766 // Pop arguments; result is popped in IP. 766 // Pop arguments; result is popped in IP.
767 __ Pop(R1); 767 __ Pop(R1);
768 __ Pop(R2); 768 __ Pop(R2);
769 __ Pop(R0); 769 __ Pop(R0);
770 __ LeaveStubFrame(); 770 __ LeaveStubFrame();
771 __ ret(); 771 __ ret();
772 } 772 }
(...skipping 25 matching lines...) Expand all
798 798
799 // Save the bottom 64-bits of callee-saved V registers. 799 // Save the bottom 64-bits of callee-saved V registers.
800 for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) { 800 for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
801 const VRegister r = static_cast<VRegister>(i); 801 const VRegister r = static_cast<VRegister>(i);
802 __ PushDouble(r); 802 __ PushDouble(r);
803 } 803 }
804 804
805 // We now load the pool pointer(PP) as we are about to invoke dart code and we 805 // We now load the pool pointer(PP) as we are about to invoke dart code and we
806 // could potentially invoke some intrinsic functions which need the PP to be 806 // could potentially invoke some intrinsic functions which need the PP to be
807 // set up. 807 // set up.
808 __ LoadPoolPointer(PP); 808 __ LoadPoolPointer();
809 809
810 // Set up THR, which caches the current thread in Dart code. 810 // Set up THR, which caches the current thread in Dart code.
811 if (THR != R3) { 811 if (THR != R3) {
812 __ mov(THR, R3); 812 __ mov(THR, R3);
813 } 813 }
814 // Load Isolate pointer into temporary register R5. 814 // Load Isolate pointer into temporary register R5.
815 __ LoadIsolate(R5); 815 __ LoadIsolate(R5);
816 816
817 // Save the current VMTag on the stack. 817 // Save the current VMTag on the stack.
818 __ LoadFromOffset(R4, R5, Isolate::vm_tag_offset(), PP); 818 __ LoadFromOffset(R4, R5, Isolate::vm_tag_offset());
819 __ Push(R4); 819 __ Push(R4);
820 820
821 // Mark that the isolate is executing Dart code. 821 // Mark that the isolate is executing Dart code.
822 __ LoadImmediate(R6, VMTag::kDartTagId, PP); 822 __ LoadImmediate(R6, VMTag::kDartTagId);
823 __ StoreToOffset(R6, R5, Isolate::vm_tag_offset(), PP); 823 __ StoreToOffset(R6, R5, Isolate::vm_tag_offset());
824 824
825 // Save top resource and top exit frame info. Use R6 as a temporary register. 825 // Save top resource and top exit frame info. Use R6 as a temporary register.
826 // StackFrameIterator reads the top exit frame info saved in this frame. 826 // StackFrameIterator reads the top exit frame info saved in this frame.
827 __ LoadFromOffset(R6, THR, Thread::top_resource_offset(), PP); 827 __ LoadFromOffset(R6, THR, Thread::top_resource_offset());
828 __ StoreToOffset(ZR, THR, Thread::top_resource_offset(), PP); 828 __ StoreToOffset(ZR, THR, Thread::top_resource_offset());
829 __ Push(R6); 829 __ Push(R6);
830 __ LoadFromOffset(R6, THR, Thread::top_exit_frame_info_offset(), PP); 830 __ LoadFromOffset(R6, THR, Thread::top_exit_frame_info_offset());
831 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset(), PP); 831 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
832 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. 832 // kExitLinkSlotFromEntryFp must be kept in sync with the code below.
833 ASSERT(kExitLinkSlotFromEntryFp == -21); 833 ASSERT(kExitLinkSlotFromEntryFp == -21);
834 __ Push(R6); 834 __ Push(R6);
835 835
836 // Load arguments descriptor array into R4, which is passed to Dart code. 836 // Load arguments descriptor array into R4, which is passed to Dart code.
837 __ LoadFromOffset(R4, R1, VMHandles::kOffsetOfRawPtrInHandle, PP); 837 __ LoadFromOffset(R4, R1, VMHandles::kOffsetOfRawPtrInHandle);
838 838
839 // Load number of arguments into S5. 839 // Load number of arguments into S5.
840 __ LoadFieldFromOffset(R5, R4, ArgumentsDescriptor::count_offset(), PP); 840 __ LoadFieldFromOffset(R5, R4, ArgumentsDescriptor::count_offset());
841 __ SmiUntag(R5); 841 __ SmiUntag(R5);
842 842
843 // Compute address of 'arguments array' data area into R2. 843 // Compute address of 'arguments array' data area into R2.
844 __ LoadFromOffset(R2, R2, VMHandles::kOffsetOfRawPtrInHandle, PP); 844 __ LoadFromOffset(R2, R2, VMHandles::kOffsetOfRawPtrInHandle);
845 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag, PP); 845 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag);
846 846
847 // Set up arguments for the Dart call. 847 // Set up arguments for the Dart call.
848 Label push_arguments; 848 Label push_arguments;
849 Label done_push_arguments; 849 Label done_push_arguments;
850 __ cmp(R5, Operand(0)); 850 __ cmp(R5, Operand(0));
851 __ b(&done_push_arguments, EQ); // check if there are arguments. 851 __ b(&done_push_arguments, EQ); // check if there are arguments.
852 __ LoadImmediate(R1, 0, PP); 852 __ LoadImmediate(R1, 0);
853 __ Bind(&push_arguments); 853 __ Bind(&push_arguments);
854 __ ldr(R3, Address(R2)); 854 __ ldr(R3, Address(R2));
855 __ Push(R3); 855 __ Push(R3);
856 __ add(R1, R1, Operand(1)); 856 __ add(R1, R1, Operand(1));
857 __ add(R2, R2, Operand(kWordSize)); 857 __ add(R2, R2, Operand(kWordSize));
858 __ cmp(R1, Operand(R5)); 858 __ cmp(R1, Operand(R5));
859 __ b(&push_arguments, LT); 859 __ b(&push_arguments, LT);
860 __ Bind(&done_push_arguments); 860 __ Bind(&done_push_arguments);
861 861
862 // Call the Dart code entrypoint. 862 // Call the Dart code entrypoint.
863 __ blr(R0); // R4 is the arguments descriptor array. 863 __ blr(R0); // R4 is the arguments descriptor array.
864 __ Comment("InvokeDartCodeStub return"); 864 __ Comment("InvokeDartCodeStub return");
865 865
866 // Restore constant pool pointer after return. 866 // Restore constant pool pointer after return.
867 __ LoadPoolPointer(PP); 867 __ LoadPoolPointer();
868 868
869 // Get rid of arguments pushed on the stack. 869 // Get rid of arguments pushed on the stack.
870 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize, PP); 870 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize);
871 871
872 __ LoadIsolate(R28); 872 __ LoadIsolate(R28);
873 873
874 // Restore the saved top exit frame info and top resource back into the 874 // Restore the saved top exit frame info and top resource back into the
875 // Isolate structure. Uses R6 as a temporary register for this. 875 // Isolate structure. Uses R6 as a temporary register for this.
876 __ Pop(R6); 876 __ Pop(R6);
877 __ StoreToOffset(R6, THR, Thread::top_exit_frame_info_offset(), PP); 877 __ StoreToOffset(R6, THR, Thread::top_exit_frame_info_offset());
878 __ Pop(R6); 878 __ Pop(R6);
879 __ StoreToOffset(R6, THR, Thread::top_resource_offset(), PP); 879 __ StoreToOffset(R6, THR, Thread::top_resource_offset());
880 880
881 // Restore the current VMTag from the stack. 881 // Restore the current VMTag from the stack.
882 __ Pop(R4); 882 __ Pop(R4);
883 __ StoreToOffset(R4, R28, Isolate::vm_tag_offset(), PP); 883 __ StoreToOffset(R4, R28, Isolate::vm_tag_offset());
884 884
885 // Restore the bottom 64-bits of callee-saved V registers. 885 // Restore the bottom 64-bits of callee-saved V registers.
886 for (int i = kAbiLastPreservedFpuReg; i >= kAbiFirstPreservedFpuReg; i--) { 886 for (int i = kAbiLastPreservedFpuReg; i >= kAbiFirstPreservedFpuReg; i--) {
887 const VRegister r = static_cast<VRegister>(i); 887 const VRegister r = static_cast<VRegister>(i);
888 __ PopDouble(r); 888 __ PopDouble(r);
889 } 889 }
890 890
891 // Restore C++ ABI callee-saved registers. 891 // Restore C++ ABI callee-saved registers.
892 for (int i = kAbiLastPreservedCpuReg; i >= kAbiFirstPreservedCpuReg; i--) { 892 for (int i = kAbiLastPreservedCpuReg; i >= kAbiFirstPreservedCpuReg; i--) {
893 Register r = static_cast<Register>(i); 893 Register r = static_cast<Register>(i);
894 // We use ldr instead of the Pop macro because we will be popping the PP 894 // We use ldr instead of the Pop macro because we will be popping the PP
895 // register when it is not holding a pool-pointer since we are returning to 895 // register when it is not holding a pool-pointer since we are returning to
896 // C++ code. We also skip the dart stack pointer SP, since we are still 896 // C++ code. We also skip the dart stack pointer SP, since we are still
897 // using it as the stack pointer. 897 // using it as the stack pointer.
898 __ ldr(r, Address(SP, 1 * kWordSize, Address::PostIndex)); 898 __ ldr(r, Address(SP, 1 * kWordSize, Address::PostIndex));
899 } 899 }
900 __ set_constant_pool_allowed(false);
900 901
901 // Restore the frame pointer and C stack pointer and return. 902 // Restore the frame pointer and C stack pointer and return.
902 __ LeaveFrame(); 903 __ LeaveFrame();
903 __ mov(CSP, SP); 904 __ mov(CSP, SP);
904 __ ret(); 905 __ ret();
905 } 906 }
906 907
907 908
908 // Called for inline allocation of contexts. 909 // Called for inline allocation of contexts.
909 // Input: 910 // Input:
910 // R1: number of context variables. 911 // R1: number of context variables.
911 // Output: 912 // Output:
912 // R0: new allocated RawContext object. 913 // R0: new allocated RawContext object.
913 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { 914 void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
914 if (FLAG_inline_alloc) { 915 if (FLAG_inline_alloc) {
915 Label slow_case; 916 Label slow_case;
916 // First compute the rounded instance size. 917 // First compute the rounded instance size.
917 // R1: number of context variables. 918 // R1: number of context variables.
918 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1; 919 intptr_t fixed_size = sizeof(RawContext) + kObjectAlignment - 1;
919 __ LoadImmediate(R2, fixed_size, kNoPP); 920 __ LoadImmediate(R2, fixed_size);
920 __ add(R2, R2, Operand(R1, LSL, 3)); 921 __ add(R2, R2, Operand(R1, LSL, 3));
921 ASSERT(kSmiTagShift == 1); 922 ASSERT(kSmiTagShift == 1);
922 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); 923 __ andi(R2, R2, Immediate(~(kObjectAlignment - 1)));
923 924
924 // Now allocate the object. 925 // Now allocate the object.
925 // R1: number of context variables. 926 // R1: number of context variables.
926 // R2: object size. 927 // R2: object size.
927 const intptr_t cid = kContextCid; 928 const intptr_t cid = kContextCid;
928 Heap::Space space = Heap::SpaceForAllocation(cid); 929 Heap::Space space = Heap::SpaceForAllocation(cid);
929 __ LoadIsolate(R5); 930 __ LoadIsolate(R5);
(...skipping 16 matching lines...) Expand all
946 947
947 // Successfully allocated the object, now update top to point to 948 // Successfully allocated the object, now update top to point to
948 // next object start and initialize the object. 949 // next object start and initialize the object.
949 // R0: new object. 950 // R0: new object.
950 // R1: number of context variables. 951 // R1: number of context variables.
951 // R2: object size. 952 // R2: object size.
952 // R3: next object start. 953 // R3: next object start.
953 // R5: heap. 954 // R5: heap.
954 __ str(R3, Address(R5, Heap::TopOffset(space))); 955 __ str(R3, Address(R5, Heap::TopOffset(space)));
955 __ add(R0, R0, Operand(kHeapObjectTag)); 956 __ add(R0, R0, Operand(kHeapObjectTag));
956 __ UpdateAllocationStatsWithSize(cid, R2, kNoPP, space, 957 __ UpdateAllocationStatsWithSize(cid, R2, space,
957 /* inline_isolate = */ false); 958 /* inline_isolate = */ false);
958 959
959 // Calculate the size tag. 960 // Calculate the size tag.
960 // R0: new object. 961 // R0: new object.
961 // R1: number of context variables. 962 // R1: number of context variables.
962 // R2: object size. 963 // R2: object size.
963 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 964 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
964 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag, kNoPP); 965 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
965 // If no size tag overflow, shift R2 left, else set R2 to zero. 966 // If no size tag overflow, shift R2 left, else set R2 to zero.
966 __ LslImmediate(TMP, R2, shift); 967 __ LslImmediate(TMP, R2, shift);
967 __ csel(R2, TMP, R2, LS); 968 __ csel(R2, TMP, R2, LS);
968 __ csel(R2, ZR, R2, HI); 969 __ csel(R2, ZR, R2, HI);
969 970
970 // Get the class index and insert it into the tags. 971 // Get the class index and insert it into the tags.
971 // R2: size and bit tags. 972 // R2: size and bit tags.
972 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid), kNoPP); 973 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid));
973 __ orr(R2, R2, Operand(TMP)); 974 __ orr(R2, R2, Operand(TMP));
974 __ StoreFieldToOffset(R2, R0, Context::tags_offset(), kNoPP); 975 __ StoreFieldToOffset(R2, R0, Context::tags_offset());
975 976
976 // Setup up number of context variables field. 977 // Setup up number of context variables field.
977 // R0: new object. 978 // R0: new object.
978 // R1: number of context variables as integer value (not object). 979 // R1: number of context variables as integer value (not object).
979 __ StoreFieldToOffset(R1, R0, Context::num_variables_offset(), kNoPP); 980 __ StoreFieldToOffset(R1, R0, Context::num_variables_offset());
980 981
981 // Setup the parent field. 982 // Setup the parent field.
982 // R0: new object. 983 // R0: new object.
983 // R1: number of context variables. 984 // R1: number of context variables.
984 __ LoadObject(R2, Object::null_object(), PP); 985 __ LoadObject(R2, Object::null_object());
985 __ StoreFieldToOffset(R2, R0, Context::parent_offset(), kNoPP); 986 __ StoreFieldToOffset(R2, R0, Context::parent_offset());
986 987
987 // Initialize the context variables. 988 // Initialize the context variables.
988 // R0: new object. 989 // R0: new object.
989 // R1: number of context variables. 990 // R1: number of context variables.
990 // R2: raw null. 991 // R2: raw null.
991 Label loop, done; 992 Label loop, done;
992 __ AddImmediate( 993 __ AddImmediate(
993 R3, R0, Context::variable_offset(0) - kHeapObjectTag, kNoPP); 994 R3, R0, Context::variable_offset(0) - kHeapObjectTag);
994 __ Bind(&loop); 995 __ Bind(&loop);
995 __ subs(R1, R1, Operand(1)); 996 __ subs(R1, R1, Operand(1));
996 __ b(&done, MI); 997 __ b(&done, MI);
997 __ str(R2, Address(R3, R1, UXTX, Address::Scaled)); 998 __ str(R2, Address(R3, R1, UXTX, Address::Scaled));
998 __ b(&loop, NE); // Loop if R1 not zero. 999 __ b(&loop, NE); // Loop if R1 not zero.
999 __ Bind(&done); 1000 __ Bind(&done);
1000 1001
1001 // Done allocating and initializing the context. 1002 // Done allocating and initializing the context.
1002 // R0: new object. 1003 // R0: new object.
1003 __ ret(); 1004 __ ret();
1004 1005
1005 __ Bind(&slow_case); 1006 __ Bind(&slow_case);
1006 } 1007 }
1007 // Create a stub frame as we are pushing some objects on the stack before 1008 // Create a stub frame as we are pushing some objects on the stack before
1008 // calling into the runtime. 1009 // calling into the runtime.
1009 __ EnterStubFrame(); 1010 __ EnterStubFrame();
1010 // Setup space on stack for return value. 1011 // Setup space on stack for return value.
1011 __ SmiTag(R1); 1012 __ SmiTag(R1);
1012 __ PushObject(Object::null_object(), PP); 1013 __ PushObject(Object::null_object());
1013 __ Push(R1); 1014 __ Push(R1);
1014 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. 1015 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1015 __ Drop(1); // Pop number of context variables argument. 1016 __ Drop(1); // Pop number of context variables argument.
1016 __ Pop(R0); // Pop the new context object. 1017 __ Pop(R0); // Pop the new context object.
1017 // R0: new object 1018 // R0: new object
1018 // Restore the frame pointer. 1019 // Restore the frame pointer.
1019 __ LeaveStubFrame(); 1020 __ LeaveStubFrame();
1020 __ ret(); 1021 __ ret();
1021 } 1022 }
1022 1023
1023 1024
1024 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate); 1025 DECLARE_LEAF_RUNTIME_ENTRY(void, StoreBufferBlockProcess, Isolate* isolate);
1025 1026
1026 // Helper stub to implement Assembler::StoreIntoObject. 1027 // Helper stub to implement Assembler::StoreIntoObject.
1027 // Input parameters: 1028 // Input parameters:
1028 // R0: Address being stored 1029 // R0: Address being stored
1029 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { 1030 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) {
1030 Label add_to_buffer; 1031 Label add_to_buffer;
1031 // Check whether this object has already been remembered. Skip adding to the 1032 // Check whether this object has already been remembered. Skip adding to the
1032 // store buffer if the object is in the store buffer already. 1033 // store buffer if the object is in the store buffer already.
1033 __ LoadFieldFromOffset(TMP, R0, Object::tags_offset(), kNoPP); 1034 __ LoadFieldFromOffset(TMP, R0, Object::tags_offset());
1034 __ tsti(TMP, Immediate(1 << RawObject::kRememberedBit)); 1035 __ tsti(TMP, Immediate(1 << RawObject::kRememberedBit));
1035 __ b(&add_to_buffer, EQ); 1036 __ b(&add_to_buffer, EQ);
1036 __ ret(); 1037 __ ret();
1037 1038
1038 __ Bind(&add_to_buffer); 1039 __ Bind(&add_to_buffer);
1039 // Save values being destroyed. 1040 // Save values being destroyed.
1040 __ Push(R1); 1041 __ Push(R1);
1041 __ Push(R2); 1042 __ Push(R2);
1042 __ Push(R3); 1043 __ Push(R3);
1043 1044
1044 __ orri(R2, TMP, Immediate(1 << RawObject::kRememberedBit)); 1045 __ orri(R2, TMP, Immediate(1 << RawObject::kRememberedBit));
1045 __ StoreFieldToOffset(R2, R0, Object::tags_offset(), kNoPP); 1046 __ StoreFieldToOffset(R2, R0, Object::tags_offset());
1046 1047
1047 // Load the StoreBuffer block out of the thread. Then load top_ out of the 1048 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1048 // StoreBufferBlock and add the address to the pointers_. 1049 // StoreBufferBlock and add the address to the pointers_.
1049 __ LoadFromOffset(R1, THR, Thread::store_buffer_block_offset(), kNoPP); 1050 __ LoadFromOffset(R1, THR, Thread::store_buffer_block_offset());
1050 __ LoadFromOffset(R2, R1, StoreBufferBlock::top_offset(), 1051 __ LoadFromOffset(R2, R1, StoreBufferBlock::top_offset(), kUnsignedWord);
1051 kNoPP, kUnsignedWord);
1052 __ add(R3, R1, Operand(R2, LSL, 3)); 1052 __ add(R3, R1, Operand(R2, LSL, 3));
1053 __ StoreToOffset(R0, R3, StoreBufferBlock::pointers_offset(), kNoPP); 1053 __ StoreToOffset(R0, R3, StoreBufferBlock::pointers_offset());
1054 1054
1055 // Increment top_ and check for overflow. 1055 // Increment top_ and check for overflow.
1056 // R2: top_. 1056 // R2: top_.
1057 // R1: StoreBufferBlock. 1057 // R1: StoreBufferBlock.
1058 Label L; 1058 Label L;
1059 __ add(R2, R2, Operand(1)); 1059 __ add(R2, R2, Operand(1));
1060 __ StoreToOffset(R2, R1, StoreBufferBlock::top_offset(), 1060 __ StoreToOffset(R2, R1, StoreBufferBlock::top_offset(), kUnsignedWord);
1061 kNoPP, kUnsignedWord); 1061 __ CompareImmediate(R2, StoreBufferBlock::kSize);
1062 __ CompareImmediate(R2, StoreBufferBlock::kSize, kNoPP);
1063 // Restore values. 1062 // Restore values.
1064 __ Pop(R3); 1063 __ Pop(R3);
1065 __ Pop(R2); 1064 __ Pop(R2);
1066 __ Pop(R1); 1065 __ Pop(R1);
1067 __ b(&L, EQ); 1066 __ b(&L, EQ);
1068 __ ret(); 1067 __ ret();
1069 1068
1070 // Handle overflow: Call the runtime leaf function. 1069 // Handle overflow: Call the runtime leaf function.
1071 __ Bind(&L); 1070 __ Bind(&L);
1072 // Setup frame, push callee-saved registers. 1071 // Setup frame, push callee-saved registers.
(...skipping 30 matching lines...) Expand all
1103 // R1: instantiated type arguments. 1102 // R1: instantiated type arguments.
1104 } 1103 }
1105 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && 1104 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
1106 !cls.trace_allocation()) { 1105 !cls.trace_allocation()) {
1107 Label slow_case; 1106 Label slow_case;
1108 // Allocate the object and update top to point to 1107 // Allocate the object and update top to point to
1109 // next object start and initialize the allocated object. 1108 // next object start and initialize the allocated object.
1110 // R1: instantiated type arguments (if is_cls_parameterized). 1109 // R1: instantiated type arguments (if is_cls_parameterized).
1111 Heap* heap = Isolate::Current()->heap(); 1110 Heap* heap = Isolate::Current()->heap();
1112 Heap::Space space = Heap::SpaceForAllocation(cls.id()); 1111 Heap::Space space = Heap::SpaceForAllocation(cls.id());
1113 __ LoadImmediate(R5, heap->TopAddress(space), kNoPP); 1112 __ LoadImmediate(R5, heap->TopAddress(space));
1114 __ ldr(R2, Address(R5)); 1113 __ ldr(R2, Address(R5));
1115 __ AddImmediate(R3, R2, instance_size, kNoPP); 1114 __ AddImmediate(R3, R2, instance_size);
1116 // Check if the allocation fits into the remaining space. 1115 // Check if the allocation fits into the remaining space.
1117 // R2: potential new object start. 1116 // R2: potential new object start.
1118 // R3: potential next object start. 1117 // R3: potential next object start.
1119 __ LoadImmediate(TMP, heap->EndAddress(space), kNoPP); 1118 __ LoadImmediate(TMP, heap->EndAddress(space));
1120 __ ldr(TMP, Address(TMP)); 1119 __ ldr(TMP, Address(TMP));
1121 __ CompareRegisters(R3, TMP); 1120 __ CompareRegisters(R3, TMP);
1122 if (FLAG_use_slow_path) { 1121 if (FLAG_use_slow_path) {
1123 __ b(&slow_case); 1122 __ b(&slow_case);
1124 } else { 1123 } else {
1125 __ b(&slow_case, CS); // Unsigned higher or equal. 1124 __ b(&slow_case, CS); // Unsigned higher or equal.
1126 } 1125 }
1127 __ str(R3, Address(R5)); 1126 __ str(R3, Address(R5));
1128 __ UpdateAllocationStats(cls.id(), kNoPP, space); 1127 __ UpdateAllocationStats(cls.id(), space);
1129 1128
1130 // R2: new object start. 1129 // R2: new object start.
1131 // R3: next object start. 1130 // R3: next object start.
1132 // R1: new object type arguments (if is_cls_parameterized). 1131 // R1: new object type arguments (if is_cls_parameterized).
1133 // Set the tags. 1132 // Set the tags.
1134 uword tags = 0; 1133 uword tags = 0;
1135 tags = RawObject::SizeTag::update(instance_size, tags); 1134 tags = RawObject::SizeTag::update(instance_size, tags);
1136 ASSERT(cls.id() != kIllegalCid); 1135 ASSERT(cls.id() != kIllegalCid);
1137 tags = RawObject::ClassIdTag::update(cls.id(), tags); 1136 tags = RawObject::ClassIdTag::update(cls.id(), tags);
1138 __ LoadImmediate(R0, tags, kNoPP); 1137 __ LoadImmediate(R0, tags);
1139 __ StoreToOffset(R0, R2, Instance::tags_offset(), kNoPP); 1138 __ StoreToOffset(R0, R2, Instance::tags_offset());
1140 1139
1141 // Initialize the remaining words of the object. 1140 // Initialize the remaining words of the object.
1142 __ LoadObject(R0, Object::null_object(), PP); 1141 __ LoadObject(R0, Object::null_object());
1143 1142
1144 // R0: raw null. 1143 // R0: raw null.
1145 // R2: new object start. 1144 // R2: new object start.
1146 // R3: next object start. 1145 // R3: next object start.
1147 // R1: new object type arguments (if is_cls_parameterized). 1146 // R1: new object type arguments (if is_cls_parameterized).
1148 // First try inlining the initialization without a loop. 1147 // First try inlining the initialization without a loop.
1149 if (instance_size < (kInlineInstanceSize * kWordSize)) { 1148 if (instance_size < (kInlineInstanceSize * kWordSize)) {
1150 // Check if the object contains any non-header fields. 1149 // Check if the object contains any non-header fields.
1151 // Small objects are initialized using a consecutive set of writes. 1150 // Small objects are initialized using a consecutive set of writes.
1152 for (intptr_t current_offset = Instance::NextFieldOffset(); 1151 for (intptr_t current_offset = Instance::NextFieldOffset();
1153 current_offset < instance_size; 1152 current_offset < instance_size;
1154 current_offset += kWordSize) { 1153 current_offset += kWordSize) {
1155 __ StoreToOffset(R0, R2, current_offset, kNoPP); 1154 __ StoreToOffset(R0, R2, current_offset);
1156 } 1155 }
1157 } else { 1156 } else {
1158 __ AddImmediate(R4, R2, Instance::NextFieldOffset(), kNoPP); 1157 __ AddImmediate(R4, R2, Instance::NextFieldOffset());
1159 // Loop until the whole object is initialized. 1158 // Loop until the whole object is initialized.
1160 // R0: raw null. 1159 // R0: raw null.
1161 // R2: new object. 1160 // R2: new object.
1162 // R3: next object start. 1161 // R3: next object start.
1163 // R4: next word to be initialized. 1162 // R4: next word to be initialized.
1164 // R1: new object type arguments (if is_cls_parameterized). 1163 // R1: new object type arguments (if is_cls_parameterized).
1165 Label init_loop; 1164 Label init_loop;
1166 Label done; 1165 Label done;
1167 __ Bind(&init_loop); 1166 __ Bind(&init_loop);
1168 __ CompareRegisters(R4, R3); 1167 __ CompareRegisters(R4, R3);
1169 __ b(&done, CS); 1168 __ b(&done, CS);
1170 __ str(R0, Address(R4)); 1169 __ str(R0, Address(R4));
1171 __ AddImmediate(R4, R4, kWordSize, kNoPP); 1170 __ AddImmediate(R4, R4, kWordSize);
1172 __ b(&init_loop); 1171 __ b(&init_loop);
1173 __ Bind(&done); 1172 __ Bind(&done);
1174 } 1173 }
1175 if (is_cls_parameterized) { 1174 if (is_cls_parameterized) {
1176 // R1: new object type arguments. 1175 // R1: new object type arguments.
1177 // Set the type arguments in the new object. 1176 // Set the type arguments in the new object.
1178 __ StoreToOffset(R1, R2, cls.type_arguments_field_offset(), kNoPP); 1177 __ StoreToOffset(R1, R2, cls.type_arguments_field_offset());
1179 } 1178 }
1180 // Done allocating and initializing the instance. 1179 // Done allocating and initializing the instance.
1181 // R2: new object still missing its heap tag. 1180 // R2: new object still missing its heap tag.
1182 __ add(R0, R2, Operand(kHeapObjectTag)); 1181 __ add(R0, R2, Operand(kHeapObjectTag));
1183 // R0: new object. 1182 // R0: new object.
1184 __ ret(); 1183 __ ret();
1185 1184
1186 __ Bind(&slow_case); 1185 __ Bind(&slow_case);
1187 } 1186 }
1188 // If is_cls_parameterized: 1187 // If is_cls_parameterized:
1189 // R1: new object type arguments. 1188 // R1: new object type arguments.
1190 // Create a stub frame as we are pushing some objects on the stack before 1189 // Create a stub frame as we are pushing some objects on the stack before
1191 // calling into the runtime. 1190 // calling into the runtime.
1192 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. 1191 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime.
1193 // Setup space on stack for return value. 1192 // Setup space on stack for return value.
1194 __ PushObject(Object::null_object(), PP); 1193 __ PushObject(Object::null_object());
1195 __ PushObject(cls, PP); // Push class of object to be allocated. 1194 __ PushObject(cls); // Push class of object to be allocated.
1196 if (is_cls_parameterized) { 1195 if (is_cls_parameterized) {
1197 // Push type arguments. 1196 // Push type arguments.
1198 __ Push(R1); 1197 __ Push(R1);
1199 } else { 1198 } else {
1200 // Push null type arguments. 1199 // Push null type arguments.
1201 __ PushObject(Object::null_object(), PP); 1200 __ PushObject(Object::null_object());
1202 } 1201 }
1203 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. 1202 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object.
1204 __ Drop(2); // Pop arguments. 1203 __ Drop(2); // Pop arguments.
1205 __ Pop(R0); // Pop result (newly allocated object). 1204 __ Pop(R0); // Pop result (newly allocated object).
1206 // R0: new object 1205 // R0: new object
1207 // Restore the frame pointer. 1206 // Restore the frame pointer.
1208 __ LeaveStubFrame(); 1207 __ LeaveStubFrame();
1209 __ ret(); 1208 __ ret();
1210 *patch_code_pc_offset = assembler->CodeSize(); 1209 *patch_code_pc_offset = assembler->CodeSize();
1211 __ BranchPatchable(&StubCode::FixAllocationStubTargetLabel()); 1210 __ BranchPatchable(&StubCode::FixAllocationStubTargetLabel());
1212 } 1211 }
1213 1212
1214 1213
1215 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function 1214 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1216 // from the entry code of a dart function after an error in passed argument 1215 // from the entry code of a dart function after an error in passed argument
1217 // name or number is detected. 1216 // name or number is detected.
1218 // Input parameters: 1217 // Input parameters:
1219 // LR : return address. 1218 // LR : return address.
1220 // SP : address of last argument. 1219 // SP : address of last argument.
1221 // R4: arguments descriptor array. 1220 // R4: arguments descriptor array.
1222 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) { 1221 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
1223 __ EnterStubFrame(); 1222 __ EnterStubFrame();
1224 1223
1225 // Load the receiver. 1224 // Load the receiver.
1226 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset(), kNoPP); 1225 __ LoadFieldFromOffset(R2, R4, ArgumentsDescriptor::count_offset());
1227 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi. 1226 __ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
1228 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize, kNoPP); 1227 __ LoadFromOffset(R6, TMP, kParamEndSlotFromFp * kWordSize);
1229 1228
1230 // Push space for the return value. 1229 // Push space for the return value.
1231 // Push the receiver. 1230 // Push the receiver.
1232 // Push arguments descriptor array. 1231 // Push arguments descriptor array.
1233 __ PushObject(Object::null_object(), PP); 1232 __ PushObject(Object::null_object());
1234 __ Push(R6); 1233 __ Push(R6);
1235 __ Push(R4); 1234 __ Push(R4);
1236 1235
1237 // R2: Smi-tagged arguments array length. 1236 // R2: Smi-tagged arguments array length.
1238 PushArgumentsArray(assembler); 1237 PushArgumentsArray(assembler);
1239 1238
1240 const intptr_t kNumArgs = 3; 1239 const intptr_t kNumArgs = 3;
1241 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs); 1240 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs);
1242 // noSuchMethod on closures always throws an error, so it will never return. 1241 // noSuchMethod on closures always throws an error, so it will never return.
1243 __ brk(0); 1242 __ brk(0);
(...skipping 13 matching lines...) Expand all
1257 __ Push(R5); // Preserve. 1256 __ Push(R5); // Preserve.
1258 __ Push(ic_reg); // Argument. 1257 __ Push(ic_reg); // Argument.
1259 __ Push(func_reg); // Argument. 1258 __ Push(func_reg); // Argument.
1260 __ CallRuntime(kTraceICCallRuntimeEntry, 2); 1259 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
1261 __ Drop(2); // Discard argument; 1260 __ Drop(2); // Discard argument;
1262 __ Pop(R5); // Restore. 1261 __ Pop(R5); // Restore.
1263 __ Pop(R6); // Restore. 1262 __ Pop(R6); // Restore.
1264 __ LeaveStubFrame(); 1263 __ LeaveStubFrame();
1265 } 1264 }
1266 __ LoadFieldFromOffset( 1265 __ LoadFieldFromOffset(
1267 R7, func_reg, Function::usage_counter_offset(), kNoPP, kWord); 1266 R7, func_reg, Function::usage_counter_offset(), kWord);
1268 __ add(R7, R7, Operand(1)); 1267 __ add(R7, R7, Operand(1));
1269 __ StoreFieldToOffset( 1268 __ StoreFieldToOffset(
1270 R7, func_reg, Function::usage_counter_offset(), kNoPP, kWord); 1269 R7, func_reg, Function::usage_counter_offset(), kWord);
1271 } 1270 }
1272 1271
1273 1272
1274 // Loads function into 'temp_reg'. 1273 // Loads function into 'temp_reg'.
1275 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, 1274 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler,
1276 Register temp_reg) { 1275 Register temp_reg) {
1277 if (FLAG_optimization_counter_threshold >= 0) { 1276 if (FLAG_optimization_counter_threshold >= 0) {
1278 Register ic_reg = R5; 1277 Register ic_reg = R5;
1279 Register func_reg = temp_reg; 1278 Register func_reg = temp_reg;
1280 ASSERT(temp_reg == R6); 1279 ASSERT(temp_reg == R6);
1281 __ Comment("Increment function counter"); 1280 __ Comment("Increment function counter");
1282 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::owner_offset(), kNoPP); 1281 __ LoadFieldFromOffset(func_reg, ic_reg, ICData::owner_offset());
1283 __ LoadFieldFromOffset( 1282 __ LoadFieldFromOffset(
1284 R7, func_reg, Function::usage_counter_offset(), kNoPP, kWord); 1283 R7, func_reg, Function::usage_counter_offset(), kWord);
1285 __ AddImmediate(R7, R7, 1, kNoPP); 1284 __ AddImmediate(R7, R7, 1);
1286 __ StoreFieldToOffset( 1285 __ StoreFieldToOffset(
1287 R7, func_reg, Function::usage_counter_offset(), kNoPP, kWord); 1286 R7, func_reg, Function::usage_counter_offset(), kWord);
1288 } 1287 }
1289 } 1288 }
1290 1289
1291 1290
1292 // Note: R5 must be preserved. 1291 // Note: R5 must be preserved.
1293 // Attempt a quick Smi operation for known operations ('kind'). The ICData 1292 // Attempt a quick Smi operation for known operations ('kind'). The ICData
1294 // must have been primed with a Smi/Smi check that will be used for counting 1293 // must have been primed with a Smi/Smi check that will be used for counting
1295 // the invocations. 1294 // the invocations.
1296 static void EmitFastSmiOp(Assembler* assembler, 1295 static void EmitFastSmiOp(Assembler* assembler,
1297 Token::Kind kind, 1296 Token::Kind kind,
(...skipping 16 matching lines...) Expand all
1314 __ b(not_smi_or_overflow, VS); // Branch if overflow. 1313 __ b(not_smi_or_overflow, VS); // Branch if overflow.
1315 break; 1314 break;
1316 } 1315 }
1317 case Token::kSUB: { 1316 case Token::kSUB: {
1318 __ subs(R0, R1, Operand(R0)); // Subtract. 1317 __ subs(R0, R1, Operand(R0)); // Subtract.
1319 __ b(not_smi_or_overflow, VS); // Branch if overflow. 1318 __ b(not_smi_or_overflow, VS); // Branch if overflow.
1320 break; 1319 break;
1321 } 1320 }
1322 case Token::kEQ: { 1321 case Token::kEQ: {
1323 __ CompareRegisters(R0, R1); 1322 __ CompareRegisters(R0, R1);
1324 __ LoadObject(R0, Bool::True(), PP); 1323 __ LoadObject(R0, Bool::True());
1325 __ LoadObject(R1, Bool::False(), PP); 1324 __ LoadObject(R1, Bool::False());
1326 __ csel(R0, R1, R0, NE); 1325 __ csel(R0, R1, R0, NE);
1327 break; 1326 break;
1328 } 1327 }
1329 default: UNIMPLEMENTED(); 1328 default: UNIMPLEMENTED();
1330 } 1329 }
1331 1330
1332 if (should_update_result_range) { 1331 if (should_update_result_range) {
1333 Label done; 1332 Label done;
1334 __ UpdateRangeFeedback(R0, 2, R5, R1, R6, &done); 1333 __ UpdateRangeFeedback(R0, 2, R5, R1, R6, &done);
1335 __ Bind(&done); 1334 __ Bind(&done);
1336 } 1335 }
1337 1336
1338 // R5: IC data object (preserved). 1337 // R5: IC data object (preserved).
1339 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset(), kNoPP); 1338 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
1340 // R6: ic_data_array with check entries: classes and target functions. 1339 // R6: ic_data_array with check entries: classes and target functions.
1341 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag, kNoPP); 1340 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag);
1342 // R6: points directly to the first ic data array element. 1341 // R6: points directly to the first ic data array element.
1343 #if defined(DEBUG) 1342 #if defined(DEBUG)
1344 // Check that first entry is for Smi/Smi. 1343 // Check that first entry is for Smi/Smi.
1345 Label error, ok; 1344 Label error, ok;
1346 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid)); 1345 const intptr_t imm_smi_cid = reinterpret_cast<intptr_t>(Smi::New(kSmiCid));
1347 __ ldr(R1, Address(R6, 0)); 1346 __ ldr(R1, Address(R6, 0));
1348 __ CompareImmediate(R1, imm_smi_cid, kNoPP); 1347 __ CompareImmediate(R1, imm_smi_cid);
1349 __ b(&error, NE); 1348 __ b(&error, NE);
1350 __ ldr(R1, Address(R6, kWordSize)); 1349 __ ldr(R1, Address(R6, kWordSize));
1351 __ CompareImmediate(R1, imm_smi_cid, kNoPP); 1350 __ CompareImmediate(R1, imm_smi_cid);
1352 __ b(&ok, EQ); 1351 __ b(&ok, EQ);
1353 __ Bind(&error); 1352 __ Bind(&error);
1354 __ Stop("Incorrect IC data"); 1353 __ Stop("Incorrect IC data");
1355 __ Bind(&ok); 1354 __ Bind(&ok);
1356 #endif 1355 #endif
1357 if (FLAG_optimization_counter_threshold >= 0) { 1356 if (FLAG_optimization_counter_threshold >= 0) {
1358 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; 1357 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
1359 // Update counter. 1358 // Update counter.
1360 __ LoadFromOffset(R1, R6, count_offset, kNoPP); 1359 __ LoadFromOffset(R1, R6, count_offset);
1361 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1360 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1362 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue), kNoPP); 1361 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue));
1363 __ csel(R1, R2, R1, VS); // Overflow. 1362 __ csel(R1, R2, R1, VS); // Overflow.
1364 __ StoreToOffset(R1, R6, count_offset, kNoPP); 1363 __ StoreToOffset(R1, R6, count_offset);
1365 } 1364 }
1366 1365
1367 __ ret(); 1366 __ ret();
1368 } 1367 }
1369 1368
1370 1369
1371 // Generate inline cache check for 'num_args'. 1370 // Generate inline cache check for 'num_args'.
1372 // LR: return address. 1371 // LR: return address.
1373 // R5: inline cache data object. 1372 // R5: inline cache data object.
1374 // Control flow: 1373 // Control flow:
1375 // - If receiver is null -> jump to IC miss. 1374 // - If receiver is null -> jump to IC miss.
1376 // - If receiver is Smi -> load Smi class. 1375 // - If receiver is Smi -> load Smi class.
1377 // - If receiver is not-Smi -> load receiver's class. 1376 // - If receiver is not-Smi -> load receiver's class.
1378 // - Check if 'num_args' (including receiver) match any IC data group. 1377 // - Check if 'num_args' (including receiver) match any IC data group.
1379 // - Match found -> jump to target. 1378 // - Match found -> jump to target.
1380 // - Match not found -> jump to IC miss. 1379 // - Match not found -> jump to IC miss.
1381 void StubCode::GenerateNArgsCheckInlineCacheStub( 1380 void StubCode::GenerateNArgsCheckInlineCacheStub(
1382 Assembler* assembler, 1381 Assembler* assembler,
1383 intptr_t num_args, 1382 intptr_t num_args,
1384 const RuntimeEntry& handle_ic_miss, 1383 const RuntimeEntry& handle_ic_miss,
1385 Token::Kind kind, 1384 Token::Kind kind,
1386 RangeCollectionMode range_collection_mode, 1385 RangeCollectionMode range_collection_mode,
1387 bool optimized) { 1386 bool optimized) {
1388 ASSERT(num_args > 0); 1387 ASSERT(num_args > 0);
1389 #if defined(DEBUG) 1388 #if defined(DEBUG)
1390 { Label ok; 1389 { Label ok;
1391 // Check that the IC data array has NumArgsTested() == num_args. 1390 // Check that the IC data array has NumArgsTested() == num_args.
1392 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1391 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1393 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, 1392 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag,
1394 kNoPP, kUnsignedWord); 1393 kUnsignedWord);
1395 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1394 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1396 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); 1395 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask()));
1397 __ CompareImmediate(R6, num_args, kNoPP); 1396 __ CompareImmediate(R6, num_args);
1398 __ b(&ok, EQ); 1397 __ b(&ok, EQ);
1399 __ Stop("Incorrect stub for IC data"); 1398 __ Stop("Incorrect stub for IC data");
1400 __ Bind(&ok); 1399 __ Bind(&ok);
1401 } 1400 }
1402 #endif // DEBUG 1401 #endif // DEBUG
1403 1402
1404 Label stepping, done_stepping; 1403 Label stepping, done_stepping;
1405 if (FLAG_support_debugger && !optimized) { 1404 if (FLAG_support_debugger && !optimized) {
1406 __ Comment("Check single stepping"); 1405 __ Comment("Check single stepping");
1407 __ LoadIsolate(R6); 1406 __ LoadIsolate(R6);
1408 __ LoadFromOffset( 1407 __ LoadFromOffset(
1409 R6, R6, Isolate::single_step_offset(), kNoPP, kUnsignedByte); 1408 R6, R6, Isolate::single_step_offset(), kUnsignedByte);
1410 __ CompareRegisters(R6, ZR); 1409 __ CompareRegisters(R6, ZR);
1411 __ b(&stepping, NE); 1410 __ b(&stepping, NE);
1412 __ Bind(&done_stepping); 1411 __ Bind(&done_stepping);
1413 } 1412 }
1414 1413
1415 __ Comment("Range feedback collection"); 1414 __ Comment("Range feedback collection");
1416 Label not_smi_or_overflow; 1415 Label not_smi_or_overflow;
1417 if (range_collection_mode == kCollectRanges) { 1416 if (range_collection_mode == kCollectRanges) {
1418 ASSERT((num_args == 1) || (num_args == 2)); 1417 ASSERT((num_args == 1) || (num_args == 2));
1419 if (num_args == 2) { 1418 if (num_args == 2) {
1420 __ ldr(R0, Address(SP, 1 * kWordSize)); 1419 __ ldr(R0, Address(SP, 1 * kWordSize));
1421 __ UpdateRangeFeedback(R0, 0, R5, R1, R4, &not_smi_or_overflow); 1420 __ UpdateRangeFeedback(R0, 0, R5, R1, R4, &not_smi_or_overflow);
1422 } 1421 }
1423 1422
1424 __ ldr(R0, Address(SP, 0 * kWordSize)); 1423 __ ldr(R0, Address(SP, 0 * kWordSize));
1425 __ UpdateRangeFeedback(R0, num_args - 1, R5, R1, R4, &not_smi_or_overflow); 1424 __ UpdateRangeFeedback(R0, num_args - 1, R5, R1, R4, &not_smi_or_overflow);
1426 } 1425 }
1427 if (kind != Token::kILLEGAL) { 1426 if (kind != Token::kILLEGAL) {
1428 EmitFastSmiOp(assembler, 1427 EmitFastSmiOp(assembler,
1429 kind, 1428 kind,
1430 num_args, 1429 num_args,
1431 &not_smi_or_overflow, 1430 &not_smi_or_overflow,
1432 (range_collection_mode == kCollectRanges)); 1431 (range_collection_mode == kCollectRanges));
1433 } 1432 }
1434 __ Bind(&not_smi_or_overflow); 1433 __ Bind(&not_smi_or_overflow);
1435 1434
1436 __ Comment("Extract ICData initial values and receiver cid"); 1435 __ Comment("Extract ICData initial values and receiver cid");
1437 // Load arguments descriptor into R4. 1436 // Load arguments descriptor into R4.
1438 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset(), kNoPP); 1437 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset());
1439 // Loop that checks if there is an IC data match. 1438 // Loop that checks if there is an IC data match.
1440 Label loop, update, test, found; 1439 Label loop, update, test, found;
1441 // R5: IC data object (preserved). 1440 // R5: IC data object (preserved).
1442 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset(), kNoPP); 1441 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
1443 // R6: ic_data_array with check entries: classes and target functions. 1442 // R6: ic_data_array with check entries: classes and target functions.
1444 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag, kNoPP); 1443 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag);
1445 // R6: points directly to the first ic data array element. 1444 // R6: points directly to the first ic data array element.
1446 1445
1447 // Get the receiver's class ID (first read number of arguments from 1446 // Get the receiver's class ID (first read number of arguments from
1448 // arguments descriptor array and then access the receiver from the stack). 1447 // arguments descriptor array and then access the receiver from the stack).
1449 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset(), kNoPP); 1448 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
1450 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode. 1449 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
1451 __ sub(R7, R7, Operand(1)); 1450 __ sub(R7, R7, Operand(1));
1452 1451
1453 // R0 <- [SP + (R7 << 3)] 1452 // R0 <- [SP + (R7 << 3)]
1454 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled)); 1453 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled));
1455 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1454 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1456 1455
1457 // R7: argument_count - 1 (untagged). 1456 // R7: argument_count - 1 (untagged).
1458 // R0: receiver's class ID (smi). 1457 // R0: receiver's class ID (smi).
1459 __ ldr(R1, Address(R6)); // First class id (smi) to check. 1458 __ ldr(R1, Address(R6)); // First class id (smi) to check.
1460 __ b(&test); 1459 __ b(&test);
1461 1460
1462 __ Comment("ICData loop"); 1461 __ Comment("ICData loop");
1463 __ Bind(&loop); 1462 __ Bind(&loop);
1464 for (int i = 0; i < num_args; i++) { 1463 for (int i = 0; i < num_args; i++) {
1465 if (i > 0) { 1464 if (i > 0) {
1466 // If not the first, load the next argument's class ID. 1465 // If not the first, load the next argument's class ID.
1467 __ AddImmediate(R0, R7, -i, kNoPP); 1466 __ AddImmediate(R0, R7, -i);
1468 // R0 <- [SP + (R0 << 3)] 1467 // R0 <- [SP + (R0 << 3)]
1469 __ ldr(R0, Address(SP, R0, UXTX, Address::Scaled)); 1468 __ ldr(R0, Address(SP, R0, UXTX, Address::Scaled));
1470 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1469 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1471 // R0: next argument class ID (smi). 1470 // R0: next argument class ID (smi).
1472 __ LoadFromOffset(R1, R6, i * kWordSize, kNoPP); 1471 __ LoadFromOffset(R1, R6, i * kWordSize);
1473 // R1: next class ID to check (smi). 1472 // R1: next class ID to check (smi).
1474 } 1473 }
1475 __ CompareRegisters(R0, R1); // Class id match? 1474 __ CompareRegisters(R0, R1); // Class id match?
1476 if (i < (num_args - 1)) { 1475 if (i < (num_args - 1)) {
1477 __ b(&update, NE); // Continue. 1476 __ b(&update, NE); // Continue.
1478 } else { 1477 } else {
1479 // Last check, all checks before matched. 1478 // Last check, all checks before matched.
1480 __ b(&found, EQ); // Break. 1479 __ b(&found, EQ); // Break.
1481 } 1480 }
1482 } 1481 }
1483 __ Bind(&update); 1482 __ Bind(&update);
1484 // Reload receiver class ID. It has not been destroyed when num_args == 1. 1483 // Reload receiver class ID. It has not been destroyed when num_args == 1.
1485 if (num_args > 1) { 1484 if (num_args > 1) {
1486 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled)); 1485 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled));
1487 __ LoadTaggedClassIdMayBeSmi(R0, R0); 1486 __ LoadTaggedClassIdMayBeSmi(R0, R0);
1488 } 1487 }
1489 1488
1490 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; 1489 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize;
1491 __ AddImmediate(R6, R6, entry_size, kNoPP); // Next entry. 1490 __ AddImmediate(R6, R6, entry_size); // Next entry.
1492 __ ldr(R1, Address(R6)); // Next class ID. 1491 __ ldr(R1, Address(R6)); // Next class ID.
1493 1492
1494 __ Bind(&test); 1493 __ Bind(&test);
1495 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid), kNoPP); // Done? 1494 __ CompareImmediate(R1, Smi::RawValue(kIllegalCid)); // Done?
1496 __ b(&loop, NE); 1495 __ b(&loop, NE);
1497 1496
1498 __ Comment("IC miss"); 1497 __ Comment("IC miss");
1499 // Compute address of arguments. 1498 // Compute address of arguments.
1500 // R7: argument_count - 1 (untagged). 1499 // R7: argument_count - 1 (untagged).
1501 // R7 <- SP + (R7 << 3) 1500 // R7 <- SP + (R7 << 3)
1502 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged. 1501 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged.
1503 // R7: address of receiver. 1502 // R7: address of receiver.
1504 // Create a stub frame as we are pushing some objects on the stack before 1503 // Create a stub frame as we are pushing some objects on the stack before
1505 // calling into the runtime. 1504 // calling into the runtime.
1506 __ EnterStubFrame(); 1505 __ EnterStubFrame();
1507 // Preserve IC data object and arguments descriptor array and 1506 // Preserve IC data object and arguments descriptor array and
1508 // setup space on stack for result (target code object). 1507 // setup space on stack for result (target code object).
1509 __ Push(R4); // Preserve arguments descriptor array. 1508 __ Push(R4); // Preserve arguments descriptor array.
1510 __ Push(R5); // Preserve IC Data. 1509 __ Push(R5); // Preserve IC Data.
1511 // Setup space on stack for the result (target code object). 1510 // Setup space on stack for the result (target code object).
1512 __ PushObject(Object::null_object(), PP); 1511 __ PushObject(Object::null_object());
1513 // Push call arguments. 1512 // Push call arguments.
1514 for (intptr_t i = 0; i < num_args; i++) { 1513 for (intptr_t i = 0; i < num_args; i++) {
1515 __ LoadFromOffset(TMP, R7, -i * kWordSize, kNoPP); 1514 __ LoadFromOffset(TMP, R7, -i * kWordSize);
1516 __ Push(TMP); 1515 __ Push(TMP);
1517 } 1516 }
1518 // Pass IC data object. 1517 // Pass IC data object.
1519 __ Push(R5); 1518 __ Push(R5);
1520 __ CallRuntime(handle_ic_miss, num_args + 1); 1519 __ CallRuntime(handle_ic_miss, num_args + 1);
1521 // Remove the call arguments pushed earlier, including the IC data object. 1520 // Remove the call arguments pushed earlier, including the IC data object.
1522 __ Drop(num_args + 1); 1521 __ Drop(num_args + 1);
1523 // Pop returned function object into R0. 1522 // Pop returned function object into R0.
1524 // Restore arguments descriptor array and IC data array. 1523 // Restore arguments descriptor array and IC data array.
1525 __ Pop(R0); // Pop returned function object into R0. 1524 __ Pop(R0); // Pop returned function object into R0.
1526 __ Pop(R5); // Restore IC Data. 1525 __ Pop(R5); // Restore IC Data.
1527 __ Pop(R4); // Restore arguments descriptor array. 1526 __ Pop(R4); // Restore arguments descriptor array.
1528 __ LeaveStubFrame(); 1527 __ LeaveStubFrame();
1529 Label call_target_function; 1528 Label call_target_function;
1530 if (!FLAG_lazy_dispatchers) { 1529 if (!FLAG_lazy_dispatchers) {
1531 GenerateDispatcherCode(assembler, &call_target_function); 1530 GenerateDispatcherCode(assembler, &call_target_function);
1532 } else { 1531 } else {
1533 __ b(&call_target_function); 1532 __ b(&call_target_function);
1534 } 1533 }
1535 1534
1536 __ Bind(&found); 1535 __ Bind(&found);
1537 __ Comment("Update caller's counter"); 1536 __ Comment("Update caller's counter");
1538 // R6: pointer to an IC data check group. 1537 // R6: pointer to an IC data check group.
1539 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize; 1538 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize;
1540 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; 1539 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize;
1541 __ LoadFromOffset(R0, R6, target_offset, kNoPP); 1540 __ LoadFromOffset(R0, R6, target_offset);
1542 1541
1543 if (FLAG_optimization_counter_threshold >= 0) { 1542 if (FLAG_optimization_counter_threshold >= 0) {
1544 // Update counter. 1543 // Update counter.
1545 __ LoadFromOffset(R1, R6, count_offset, kNoPP); 1544 __ LoadFromOffset(R1, R6, count_offset);
1546 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1545 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1547 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue), kNoPP); 1546 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue));
1548 __ csel(R1, R2, R1, VS); // Overflow. 1547 __ csel(R1, R2, R1, VS); // Overflow.
1549 __ StoreToOffset(R1, R6, count_offset, kNoPP); 1548 __ StoreToOffset(R1, R6, count_offset);
1550 } 1549 }
1551 1550
1552 __ Comment("Call target"); 1551 __ Comment("Call target");
1553 __ Bind(&call_target_function); 1552 __ Bind(&call_target_function);
1554 // R0: target function. 1553 // R0: target function.
1555 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset(), kNoPP); 1554 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset());
1556 __ AddImmediate( 1555 __ AddImmediate(
1557 R2, R2, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 1556 R2, R2, Instructions::HeaderSize() - kHeapObjectTag);
1558 if (range_collection_mode == kCollectRanges) { 1557 if (range_collection_mode == kCollectRanges) {
1559 __ ldr(R1, Address(SP, 0 * kWordSize)); 1558 __ ldr(R1, Address(SP, 0 * kWordSize));
1560 if (num_args == 2) { 1559 if (num_args == 2) {
1561 __ ldr(R3, Address(SP, 1 * kWordSize)); 1560 __ ldr(R3, Address(SP, 1 * kWordSize));
1562 } 1561 }
1563 __ EnterStubFrame(); 1562 __ EnterStubFrame();
1564 __ Push(R5); 1563 __ Push(R5);
1565 if (num_args == 2) { 1564 if (num_args == 2) {
1566 __ Push(R3); 1565 __ Push(R3);
1567 } 1566 }
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1678 } 1677 }
1679 1678
1680 1679
1681 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { 1680 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) {
1682 GenerateUsageCounterIncrement(assembler, R6); 1681 GenerateUsageCounterIncrement(assembler, R6);
1683 #if defined(DEBUG) 1682 #if defined(DEBUG)
1684 { Label ok; 1683 { Label ok;
1685 // Check that the IC data array has NumArgsTested() == 0. 1684 // Check that the IC data array has NumArgsTested() == 0.
1686 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. 1685 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
1687 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag, 1686 __ LoadFromOffset(R6, R5, ICData::state_bits_offset() - kHeapObjectTag,
1688 kNoPP, kUnsignedWord); 1687 kUnsignedWord);
1689 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. 1688 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed.
1690 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask())); 1689 __ andi(R6, R6, Immediate(ICData::NumArgsTestedMask()));
1691 __ CompareImmediate(R6, 0, kNoPP); 1690 __ CompareImmediate(R6, 0);
1692 __ b(&ok, EQ); 1691 __ b(&ok, EQ);
1693 __ Stop("Incorrect IC data for unoptimized static call"); 1692 __ Stop("Incorrect IC data for unoptimized static call");
1694 __ Bind(&ok); 1693 __ Bind(&ok);
1695 } 1694 }
1696 #endif // DEBUG 1695 #endif // DEBUG
1697 1696
1698 // Check single stepping. 1697 // Check single stepping.
1699 Label stepping, done_stepping; 1698 Label stepping, done_stepping;
1700 if (FLAG_support_debugger) { 1699 if (FLAG_support_debugger) {
1701 __ LoadIsolate(R6); 1700 __ LoadIsolate(R6);
1702 __ LoadFromOffset( 1701 __ LoadFromOffset(
1703 R6, R6, Isolate::single_step_offset(), kNoPP, kUnsignedByte); 1702 R6, R6, Isolate::single_step_offset(), kUnsignedByte);
1704 __ CompareImmediate(R6, 0, kNoPP); 1703 __ CompareImmediate(R6, 0);
1705 __ b(&stepping, NE); 1704 __ b(&stepping, NE);
1706 __ Bind(&done_stepping); 1705 __ Bind(&done_stepping);
1707 } 1706 }
1708 1707
1709 // R5: IC data object (preserved). 1708 // R5: IC data object (preserved).
1710 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset(), kNoPP); 1709 __ LoadFieldFromOffset(R6, R5, ICData::ic_data_offset());
1711 // R6: ic_data_array with entries: target functions and count. 1710 // R6: ic_data_array with entries: target functions and count.
1712 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag, kNoPP); 1711 __ AddImmediate(R6, R6, Array::data_offset() - kHeapObjectTag);
1713 // R6: points directly to the first ic data array element. 1712 // R6: points directly to the first ic data array element.
1714 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize; 1713 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize;
1715 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize; 1714 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize;
1716 1715
1717 if (FLAG_optimization_counter_threshold >= 0) { 1716 if (FLAG_optimization_counter_threshold >= 0) {
1718 // Increment count for this call. 1717 // Increment count for this call.
1719 __ LoadFromOffset(R1, R6, count_offset, kNoPP); 1718 __ LoadFromOffset(R1, R6, count_offset);
1720 __ adds(R1, R1, Operand(Smi::RawValue(1))); 1719 __ adds(R1, R1, Operand(Smi::RawValue(1)));
1721 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue), kNoPP); 1720 __ LoadImmediate(R2, Smi::RawValue(Smi::kMaxValue));
1722 __ csel(R1, R2, R1, VS); // Overflow. 1721 __ csel(R1, R2, R1, VS); // Overflow.
1723 __ StoreToOffset(R1, R6, count_offset, kNoPP); 1722 __ StoreToOffset(R1, R6, count_offset);
1724 } 1723 }
1725 1724
1726 // Load arguments descriptor into R4. 1725 // Load arguments descriptor into R4.
1727 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset(), kNoPP); 1726 __ LoadFieldFromOffset(R4, R5, ICData::arguments_descriptor_offset());
1728 1727
1729 // Get function and call it, if possible. 1728 // Get function and call it, if possible.
1730 __ LoadFromOffset(R0, R6, target_offset, kNoPP); 1729 __ LoadFromOffset(R0, R6, target_offset);
1731 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset(), kNoPP); 1730 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset());
1732 1731
1733 // R0: function. 1732 // R0: function.
1734 // R2: target instructons. 1733 // R2: target instructons.
1735 __ AddImmediate( 1734 __ AddImmediate(
1736 R2, R2, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 1735 R2, R2, Instructions::HeaderSize() - kHeapObjectTag);
1737 __ br(R2); 1736 __ br(R2);
1738 1737
1739 if (FLAG_support_debugger) { 1738 if (FLAG_support_debugger) {
1740 __ Bind(&stepping); 1739 __ Bind(&stepping);
1741 __ EnterStubFrame(); 1740 __ EnterStubFrame();
1742 __ Push(R5); // Preserve IC data. 1741 __ Push(R5); // Preserve IC data.
1743 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1742 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1744 __ Pop(R5); 1743 __ Pop(R5);
1745 __ LeaveStubFrame(); 1744 __ LeaveStubFrame();
1746 __ b(&done_stepping); 1745 __ b(&done_stepping);
(...skipping 26 matching lines...) Expand all
1773 __ EnterStubFrame(); 1772 __ EnterStubFrame();
1774 __ Push(R5); // Save IC Data. 1773 __ Push(R5); // Save IC Data.
1775 __ Push(R4); // Save arg. desc. 1774 __ Push(R4); // Save arg. desc.
1776 __ Push(R0); // Pass function. 1775 __ Push(R0); // Pass function.
1777 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); 1776 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
1778 __ Pop(R0); // Restore argument. 1777 __ Pop(R0); // Restore argument.
1779 __ Pop(R4); // Restore arg desc. 1778 __ Pop(R4); // Restore arg desc.
1780 __ Pop(R5); // Restore IC Data. 1779 __ Pop(R5); // Restore IC Data.
1781 __ LeaveStubFrame(); 1780 __ LeaveStubFrame();
1782 1781
1783 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset(), kNoPP); 1782 __ LoadFieldFromOffset(R2, R0, Function::instructions_offset());
1784 __ AddImmediate( 1783 __ AddImmediate(
1785 R2, R2, Instructions::HeaderSize() - kHeapObjectTag, kNoPP); 1784 R2, R2, Instructions::HeaderSize() - kHeapObjectTag);
1786 __ br(R2); 1785 __ br(R2);
1787 } 1786 }
1788 1787
1789 1788
1790 // R5: Contains an ICData. 1789 // R5: Contains an ICData.
1791 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { 1790 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) {
1792 __ EnterStubFrame(); 1791 __ EnterStubFrame();
1793 __ Push(R5); 1792 __ Push(R5);
1794 __ PushObject(Object::null_object(), PP); // Space for result. 1793 __ PushObject(Object::null_object()); // Space for result.
1795 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1794 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1796 __ Pop(R0); 1795 __ Pop(R0);
1797 __ Pop(R5); 1796 __ Pop(R5);
1798 __ LeaveStubFrame(); 1797 __ LeaveStubFrame();
1799 __ br(R0); 1798 __ br(R0);
1800 } 1799 }
1801 1800
1802 1801
1803 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { 1802 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
1804 __ EnterStubFrame(); 1803 __ EnterStubFrame();
1805 __ PushObject(Object::null_object(), PP); // Space for result. 1804 __ PushObject(Object::null_object()); // Space for result.
1806 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); 1805 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
1807 __ Pop(R0); 1806 __ Pop(R0);
1808 __ LeaveStubFrame(); 1807 __ LeaveStubFrame();
1809 __ br(R0); 1808 __ br(R0);
1810 } 1809 }
1811 1810
1812 // Called only from unoptimized code. All relevant registers have been saved. 1811 // Called only from unoptimized code. All relevant registers have been saved.
1813 void StubCode::GenerateDebugStepCheckStub( 1812 void StubCode::GenerateDebugStepCheckStub(
1814 Assembler* assembler) { 1813 Assembler* assembler) {
1815 // Check single stepping. 1814 // Check single stepping.
1816 Label stepping, done_stepping; 1815 Label stepping, done_stepping;
1817 __ LoadIsolate(R1); 1816 __ LoadIsolate(R1);
1818 __ LoadFromOffset( 1817 __ LoadFromOffset(
1819 R1, R1, Isolate::single_step_offset(), kNoPP, kUnsignedByte); 1818 R1, R1, Isolate::single_step_offset(), kUnsignedByte);
1820 __ CompareImmediate(R1, 0, kNoPP); 1819 __ CompareImmediate(R1, 0);
1821 __ b(&stepping, NE); 1820 __ b(&stepping, NE);
1822 __ Bind(&done_stepping); 1821 __ Bind(&done_stepping);
1823 1822
1824 __ ret(); 1823 __ ret();
1825 1824
1826 __ Bind(&stepping); 1825 __ Bind(&stepping);
1827 __ EnterStubFrame(); 1826 __ EnterStubFrame();
1828 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 1827 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
1829 __ LeaveStubFrame(); 1828 __ LeaveStubFrame();
1830 __ b(&done_stepping); 1829 __ b(&done_stepping);
1831 } 1830 }
1832 1831
1833 1832
1834 // Used to check class and type arguments. Arguments passed in registers: 1833 // Used to check class and type arguments. Arguments passed in registers:
1835 // LR: return address. 1834 // LR: return address.
1836 // R0: instance (must be preserved). 1835 // R0: instance (must be preserved).
1837 // R1: instantiator type arguments or NULL. 1836 // R1: instantiator type arguments or NULL.
1838 // R2: cache array. 1837 // R2: cache array.
1839 // Result in R1: null -> not found, otherwise result (true or false). 1838 // Result in R1: null -> not found, otherwise result (true or false).
1840 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { 1839 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
1841 ASSERT((1 <= n) && (n <= 3)); 1840 ASSERT((1 <= n) && (n <= 3));
1842 if (n > 1) { 1841 if (n > 1) {
1843 // Get instance type arguments. 1842 // Get instance type arguments.
1844 __ LoadClass(R3, R0, kNoPP); 1843 __ LoadClass(R3, R0);
1845 // Compute instance type arguments into R4. 1844 // Compute instance type arguments into R4.
1846 Label has_no_type_arguments; 1845 Label has_no_type_arguments;
1847 __ LoadObject(R4, Object::null_object(), PP); 1846 __ LoadObject(R4, Object::null_object());
1848 __ LoadFieldFromOffset(R5, R3, 1847 __ LoadFieldFromOffset(R5, R3,
1849 Class::type_arguments_field_offset_in_words_offset(), kNoPP, kWord); 1848 Class::type_arguments_field_offset_in_words_offset(), kWord);
1850 __ CompareImmediate(R5, Class::kNoTypeArguments, kNoPP); 1849 __ CompareImmediate(R5, Class::kNoTypeArguments);
1851 __ b(&has_no_type_arguments, EQ); 1850 __ b(&has_no_type_arguments, EQ);
1852 __ add(R5, R0, Operand(R5, LSL, 3)); 1851 __ add(R5, R0, Operand(R5, LSL, 3));
1853 __ LoadFieldFromOffset(R4, R5, 0, kNoPP); 1852 __ LoadFieldFromOffset(R4, R5, 0);
1854 __ Bind(&has_no_type_arguments); 1853 __ Bind(&has_no_type_arguments);
1855 } 1854 }
1856 __ LoadClassId(R3, R0, kNoPP); 1855 __ LoadClassId(R3, R0);
1857 // R0: instance. 1856 // R0: instance.
1858 // R1: instantiator type arguments or NULL. 1857 // R1: instantiator type arguments or NULL.
1859 // R2: SubtypeTestCache. 1858 // R2: SubtypeTestCache.
1860 // R3: instance class id. 1859 // R3: instance class id.
1861 // R4: instance type arguments (null if none), used only if n > 1. 1860 // R4: instance type arguments (null if none), used only if n > 1.
1862 __ LoadFieldFromOffset(R2, R2, SubtypeTestCache::cache_offset(), kNoPP); 1861 __ LoadFieldFromOffset(R2, R2, SubtypeTestCache::cache_offset());
1863 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag, kNoPP); 1862 __ AddImmediate(R2, R2, Array::data_offset() - kHeapObjectTag);
1864 1863
1865 Label loop, found, not_found, next_iteration; 1864 Label loop, found, not_found, next_iteration;
1866 // R2: entry start. 1865 // R2: entry start.
1867 // R3: instance class id. 1866 // R3: instance class id.
1868 // R4: instance type arguments. 1867 // R4: instance type arguments.
1869 __ SmiTag(R3); 1868 __ SmiTag(R3);
1870 __ Bind(&loop); 1869 __ Bind(&loop);
1871 __ LoadFromOffset( 1870 __ LoadFromOffset(
1872 R5, R2, kWordSize * SubtypeTestCache::kInstanceClassId, kNoPP); 1871 R5, R2, kWordSize * SubtypeTestCache::kInstanceClassId);
1873 __ CompareObject(R5, Object::null_object(), PP); 1872 __ CompareObject(R5, Object::null_object());
1874 __ b(&not_found, EQ); 1873 __ b(&not_found, EQ);
1875 __ CompareRegisters(R5, R3); 1874 __ CompareRegisters(R5, R3);
1876 if (n == 1) { 1875 if (n == 1) {
1877 __ b(&found, EQ); 1876 __ b(&found, EQ);
1878 } else { 1877 } else {
1879 __ b(&next_iteration, NE); 1878 __ b(&next_iteration, NE);
1880 __ LoadFromOffset( 1879 __ LoadFromOffset(
1881 R5, R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments, kNoPP); 1880 R5, R2, kWordSize * SubtypeTestCache::kInstanceTypeArguments);
1882 __ CompareRegisters(R5, R4); 1881 __ CompareRegisters(R5, R4);
1883 if (n == 2) { 1882 if (n == 2) {
1884 __ b(&found, EQ); 1883 __ b(&found, EQ);
1885 } else { 1884 } else {
1886 __ b(&next_iteration, NE); 1885 __ b(&next_iteration, NE);
1887 __ LoadFromOffset(R5, R2, 1886 __ LoadFromOffset(R5, R2,
1888 kWordSize * SubtypeTestCache::kInstantiatorTypeArguments, kNoPP); 1887 kWordSize * SubtypeTestCache::kInstantiatorTypeArguments);
1889 __ CompareRegisters(R5, R1); 1888 __ CompareRegisters(R5, R1);
1890 __ b(&found, EQ); 1889 __ b(&found, EQ);
1891 } 1890 }
1892 } 1891 }
1893 __ Bind(&next_iteration); 1892 __ Bind(&next_iteration);
1894 __ AddImmediate( 1893 __ AddImmediate(
1895 R2, R2, kWordSize * SubtypeTestCache::kTestEntryLength, kNoPP); 1894 R2, R2, kWordSize * SubtypeTestCache::kTestEntryLength);
1896 __ b(&loop); 1895 __ b(&loop);
1897 // Fall through to not found. 1896 // Fall through to not found.
1898 __ Bind(&not_found); 1897 __ Bind(&not_found);
1899 __ LoadObject(R1, Object::null_object(), PP); 1898 __ LoadObject(R1, Object::null_object());
1900 __ ret(); 1899 __ ret();
1901 1900
1902 __ Bind(&found); 1901 __ Bind(&found);
1903 __ LoadFromOffset(R1, R2, kWordSize * SubtypeTestCache::kTestResult, kNoPP); 1902 __ LoadFromOffset(R1, R2, kWordSize * SubtypeTestCache::kTestResult);
1904 __ ret(); 1903 __ ret();
1905 } 1904 }
1906 1905
1907 1906
1908 // Used to check class and type arguments. Arguments passed on stack: 1907 // Used to check class and type arguments. Arguments passed on stack:
1909 // TOS + 0: return address. 1908 // TOS + 0: return address.
1910 // TOS + 1: instantiator type arguments or NULL. 1909 // TOS + 1: instantiator type arguments or NULL.
1911 // TOS + 2: instance. 1910 // TOS + 2: instance.
1912 // TOS + 3: cache array. 1911 // TOS + 3: cache array.
1913 // Result in RCX: null -> not found, otherwise result (true or false). 1912 // Result in RCX: null -> not found, otherwise result (true or false).
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1957 ASSERT(kExceptionObjectReg == R0); 1956 ASSERT(kExceptionObjectReg == R0);
1958 ASSERT(kStackTraceObjectReg == R1); 1957 ASSERT(kStackTraceObjectReg == R1);
1959 __ mov(LR, R0); // Program counter. 1958 __ mov(LR, R0); // Program counter.
1960 __ mov(SP, R1); // Stack pointer. 1959 __ mov(SP, R1); // Stack pointer.
1961 __ mov(FP, R2); // Frame_pointer. 1960 __ mov(FP, R2); // Frame_pointer.
1962 __ mov(R0, R3); // Exception object. 1961 __ mov(R0, R3); // Exception object.
1963 __ mov(R1, R4); // StackTrace object. 1962 __ mov(R1, R4); // StackTrace object.
1964 __ mov(THR, R5); 1963 __ mov(THR, R5);
1965 __ LoadIsolate(R5); 1964 __ LoadIsolate(R5);
1966 // Set the tag. 1965 // Set the tag.
1967 __ LoadImmediate(R2, VMTag::kDartTagId, kNoPP); 1966 __ LoadImmediate(R2, VMTag::kDartTagId);
1968 __ StoreToOffset(R2, R5, Isolate::vm_tag_offset(), kNoPP); 1967 __ StoreToOffset(R2, R5, Isolate::vm_tag_offset());
1969 // Clear top exit frame. 1968 // Clear top exit frame.
1970 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset(), kNoPP); 1969 __ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
1971 __ ret(); // Jump to the exception handler code. 1970 __ ret(); // Jump to the exception handler code.
1972 } 1971 }
1973 1972
1974 1973
1975 // Calls to the runtime to optimize the given function. 1974 // Calls to the runtime to optimize the given function.
1976 // R6: function to be re-optimized. 1975 // R6: function to be re-optimized.
1977 // R4: argument descriptor (preserved). 1976 // R4: argument descriptor (preserved).
1978 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { 1977 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {
1979 __ EnterStubFrame(); 1978 __ EnterStubFrame();
1980 __ Push(R4); 1979 __ Push(R4);
1981 // Setup space on stack for the return value. 1980 // Setup space on stack for the return value.
1982 __ PushObject(Object::null_object(), PP); 1981 __ PushObject(Object::null_object());
1983 __ Push(R6); 1982 __ Push(R6);
1984 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); 1983 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
1985 __ Pop(R0); // Discard argument. 1984 __ Pop(R0); // Discard argument.
1986 __ Pop(R0); // Get Code object 1985 __ Pop(R0); // Get Code object
1987 __ Pop(R4); // Restore argument descriptor. 1986 __ Pop(R4); // Restore argument descriptor.
1988 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset(), kNoPP); 1987 __ LoadFieldFromOffset(R0, R0, Code::instructions_offset());
1989 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag, PP); 1988 __ AddImmediate(R0, R0, Instructions::HeaderSize() - kHeapObjectTag);
1990 __ LeaveStubFrame(); 1989 __ LeaveStubFrame();
1991 __ br(R0); 1990 __ br(R0);
1992 __ brk(0); 1991 __ brk(0);
1993 } 1992 }
1994 1993
1995 1994
1996 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t, 1995 DECLARE_LEAF_RUNTIME_ENTRY(intptr_t,
1997 BigintCompare, 1996 BigintCompare,
1998 RawBigint* left, 1997 RawBigint* left,
1999 RawBigint* right); 1998 RawBigint* right);
2000 1999
2001 2000
2002 // Does identical check (object references are equal or not equal) with special 2001 // Does identical check (object references are equal or not equal) with special
2003 // checks for boxed numbers. 2002 // checks for boxed numbers.
2004 // Left and right are pushed on stack. 2003 // Left and right are pushed on stack.
2005 // Return Zero condition flag set if equal. 2004 // Return Zero condition flag set if equal.
2006 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint 2005 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint
2007 // cannot contain a value that fits in Mint or Smi. 2006 // cannot contain a value that fits in Mint or Smi.
2008 static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler, 2007 static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2009 const Register left, 2008 const Register left,
2010 const Register right) { 2009 const Register right) {
2011 Label reference_compare, done, check_mint, check_bigint; 2010 Label reference_compare, done, check_mint, check_bigint;
2012 // If any of the arguments is Smi do reference compare. 2011 // If any of the arguments is Smi do reference compare.
2013 __ tsti(left, Immediate(kSmiTagMask)); 2012 __ tsti(left, Immediate(kSmiTagMask));
2014 __ b(&reference_compare, EQ); 2013 __ b(&reference_compare, EQ);
2015 __ tsti(right, Immediate(kSmiTagMask)); 2014 __ tsti(right, Immediate(kSmiTagMask));
2016 __ b(&reference_compare, EQ); 2015 __ b(&reference_compare, EQ);
2017 2016
2018 // Value compare for two doubles. 2017 // Value compare for two doubles.
2019 __ CompareClassId(left, kDoubleCid, kNoPP); 2018 __ CompareClassId(left, kDoubleCid);
2020 __ b(&check_mint, NE); 2019 __ b(&check_mint, NE);
2021 __ CompareClassId(right, kDoubleCid, kNoPP); 2020 __ CompareClassId(right, kDoubleCid);
2022 __ b(&done, NE); 2021 __ b(&done, NE);
2023 2022
2024 // Double values bitwise compare. 2023 // Double values bitwise compare.
2025 __ LoadFieldFromOffset(left, left, Double::value_offset(), kNoPP); 2024 __ LoadFieldFromOffset(left, left, Double::value_offset());
2026 __ LoadFieldFromOffset(right, right, Double::value_offset(), kNoPP); 2025 __ LoadFieldFromOffset(right, right, Double::value_offset());
2027 __ CompareRegisters(left, right); 2026 __ CompareRegisters(left, right);
2028 __ b(&done); 2027 __ b(&done);
2029 2028
2030 __ Bind(&check_mint); 2029 __ Bind(&check_mint);
2031 __ CompareClassId(left, kMintCid, kNoPP); 2030 __ CompareClassId(left, kMintCid);
2032 __ b(&check_bigint, NE); 2031 __ b(&check_bigint, NE);
2033 __ CompareClassId(right, kMintCid, kNoPP); 2032 __ CompareClassId(right, kMintCid);
2034 __ b(&done, NE); 2033 __ b(&done, NE);
2035 __ LoadFieldFromOffset(left, left, Mint::value_offset(), kNoPP); 2034 __ LoadFieldFromOffset(left, left, Mint::value_offset());
2036 __ LoadFieldFromOffset(right, right, Mint::value_offset(), kNoPP); 2035 __ LoadFieldFromOffset(right, right, Mint::value_offset());
2037 __ b(&done); 2036 __ b(&done);
2038 2037
2039 __ Bind(&check_bigint); 2038 __ Bind(&check_bigint);
2040 __ CompareClassId(left, kBigintCid, kNoPP); 2039 __ CompareClassId(left, kBigintCid);
2041 __ b(&reference_compare, NE); 2040 __ b(&reference_compare, NE);
2042 __ CompareClassId(right, kBigintCid, kNoPP); 2041 __ CompareClassId(right, kBigintCid);
2043 __ b(&done, NE); 2042 __ b(&done, NE);
2044 __ EnterStubFrame(); 2043 __ EnterStubFrame();
2045 __ ReserveAlignedFrameSpace(2 * kWordSize); 2044 __ ReserveAlignedFrameSpace(2 * kWordSize);
2046 __ StoreToOffset(left, SP, 0 * kWordSize, kNoPP); 2045 __ StoreToOffset(left, SP, 0 * kWordSize);
2047 __ StoreToOffset(right, SP, 1 * kWordSize, kNoPP); 2046 __ StoreToOffset(right, SP, 1 * kWordSize);
2048 __ CallRuntime(kBigintCompareRuntimeEntry, 2); 2047 __ CallRuntime(kBigintCompareRuntimeEntry, 2);
2049 // Result in R0, 0 means equal. 2048 // Result in R0, 0 means equal.
2050 __ LeaveStubFrame(); 2049 __ LeaveStubFrame();
2051 __ cmp(R0, Operand(0)); 2050 __ cmp(R0, Operand(0));
2052 __ b(&done); 2051 __ b(&done);
2053 2052
2054 __ Bind(&reference_compare); 2053 __ Bind(&reference_compare);
2055 __ CompareRegisters(left, right); 2054 __ CompareRegisters(left, right);
2056 __ Bind(&done); 2055 __ Bind(&done);
2057 } 2056 }
2058 2057
2059 2058
2060 // Called only from unoptimized code. All relevant registers have been saved. 2059 // Called only from unoptimized code. All relevant registers have been saved.
2061 // LR: return address. 2060 // LR: return address.
2062 // SP + 4: left operand. 2061 // SP + 4: left operand.
2063 // SP + 0: right operand. 2062 // SP + 0: right operand.
2064 // Return Zero condition flag set if equal. 2063 // Return Zero condition flag set if equal.
2065 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( 2064 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub(
2066 Assembler* assembler) { 2065 Assembler* assembler) {
2067 // Check single stepping. 2066 // Check single stepping.
2068 Label stepping, done_stepping; 2067 Label stepping, done_stepping;
2069 if (FLAG_support_debugger) { 2068 if (FLAG_support_debugger) {
2070 __ LoadIsolate(R1); 2069 __ LoadIsolate(R1);
2071 __ LoadFromOffset( 2070 __ LoadFromOffset(R1, R1, Isolate::single_step_offset(), kUnsignedByte);
2072 R1, R1, Isolate::single_step_offset(), kNoPP, kUnsignedByte); 2071 __ CompareImmediate(R1, 0);
2073 __ CompareImmediate(R1, 0, kNoPP);
2074 __ b(&stepping, NE); 2072 __ b(&stepping, NE);
2075 __ Bind(&done_stepping); 2073 __ Bind(&done_stepping);
2076 } 2074 }
2077 2075
2078 const Register left = R1; 2076 const Register left = R1;
2079 const Register right = R0; 2077 const Register right = R0;
2080 __ LoadFromOffset(left, SP, 1 * kWordSize, kNoPP); 2078 __ LoadFromOffset(left, SP, 1 * kWordSize);
2081 __ LoadFromOffset(right, SP, 0 * kWordSize, kNoPP); 2079 __ LoadFromOffset(right, SP, 0 * kWordSize);
2082 GenerateIdenticalWithNumberCheckStub(assembler, left, right); 2080 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
2083 __ ret(); 2081 __ ret();
2084 2082
2085 if (FLAG_support_debugger) { 2083 if (FLAG_support_debugger) {
2086 __ Bind(&stepping); 2084 __ Bind(&stepping);
2087 __ EnterStubFrame(); 2085 __ EnterStubFrame();
2088 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); 2086 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2089 __ LeaveStubFrame(); 2087 __ LeaveStubFrame();
2090 __ b(&done_stepping); 2088 __ b(&done_stepping);
2091 } 2089 }
2092 } 2090 }
2093 2091
2094 2092
2095 // Called from optimized code only. 2093 // Called from optimized code only.
2096 // LR: return address. 2094 // LR: return address.
2097 // SP + 4: left operand. 2095 // SP + 4: left operand.
2098 // SP + 0: right operand. 2096 // SP + 0: right operand.
2099 // Return Zero condition flag set if equal. 2097 // Return Zero condition flag set if equal.
2100 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( 2098 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub(
2101 Assembler* assembler) { 2099 Assembler* assembler) {
2102 const Register left = R1; 2100 const Register left = R1;
2103 const Register right = R0; 2101 const Register right = R0;
2104 __ LoadFromOffset(left, SP, 1 * kWordSize, kNoPP); 2102 __ LoadFromOffset(left, SP, 1 * kWordSize);
2105 __ LoadFromOffset(right, SP, 0 * kWordSize, kNoPP); 2103 __ LoadFromOffset(right, SP, 0 * kWordSize);
2106 GenerateIdenticalWithNumberCheckStub(assembler, left, right); 2104 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
2107 __ ret(); 2105 __ ret();
2108 } 2106 }
2109 2107
2110 2108
2111 void StubCode::EmitMegamorphicLookup( 2109 void StubCode::EmitMegamorphicLookup(
2112 Assembler* assembler, Register receiver, Register cache, Register target) { 2110 Assembler* assembler, Register receiver, Register cache, Register target) {
2113 ASSERT((cache != R0) && (cache != R2)); 2111 ASSERT((cache != R0) && (cache != R2));
2114 __ LoadTaggedClassIdMayBeSmi(R0, receiver); 2112 __ LoadTaggedClassIdMayBeSmi(R0, receiver);
2115 // R0: class ID of the receiver (smi). 2113 // R0: class ID of the receiver (smi).
2116 __ LoadFieldFromOffset(R2, cache, MegamorphicCache::buckets_offset(), PP); 2114 __ LoadFieldFromOffset(R2, cache, MegamorphicCache::buckets_offset());
2117 __ LoadFieldFromOffset(R1, cache, MegamorphicCache::mask_offset(), PP); 2115 __ LoadFieldFromOffset(R1, cache, MegamorphicCache::mask_offset());
2118 // R2: cache buckets array. 2116 // R2: cache buckets array.
2119 // R1: mask. 2117 // R1: mask.
2120 __ mov(R3, R0); 2118 __ mov(R3, R0);
2121 2119
2122 Label loop, update, call_target_function; 2120 Label loop, update, call_target_function;
2123 __ b(&loop); 2121 __ b(&loop);
2124 2122
2125 __ Bind(&update); 2123 __ Bind(&update);
2126 __ add(R3, R3, Operand(Smi::RawValue(1))); 2124 __ add(R3, R3, Operand(Smi::RawValue(1)));
2127 __ Bind(&loop); 2125 __ Bind(&loop);
2128 __ and_(R3, R3, Operand(R1)); 2126 __ and_(R3, R3, Operand(R1));
2129 const intptr_t base = Array::data_offset(); 2127 const intptr_t base = Array::data_offset();
2130 // R3 is smi tagged, but table entries are 16 bytes, so LSL 3. 2128 // R3 is smi tagged, but table entries are 16 bytes, so LSL 3.
2131 __ add(TMP, R2, Operand(R3, LSL, 3)); 2129 __ add(TMP, R2, Operand(R3, LSL, 3));
2132 __ LoadFieldFromOffset(R4, TMP, base, PP); 2130 __ LoadFieldFromOffset(R4, TMP, base);
2133 2131
2134 ASSERT(kIllegalCid == 0); 2132 ASSERT(kIllegalCid == 0);
2135 __ tst(R4, Operand(R4)); 2133 __ tst(R4, Operand(R4));
2136 __ b(&call_target_function, EQ); 2134 __ b(&call_target_function, EQ);
2137 __ CompareRegisters(R4, R0); 2135 __ CompareRegisters(R4, R0);
2138 __ b(&update, NE); 2136 __ b(&update, NE);
2139 2137
2140 __ Bind(&call_target_function); 2138 __ Bind(&call_target_function);
2141 // Call the target found in the cache. For a class id match, this is a 2139 // Call the target found in the cache. For a class id match, this is a
2142 // proper target for the given name and arguments descriptor. If the 2140 // proper target for the given name and arguments descriptor. If the
2143 // illegal class id was found, the target is a cache miss handler that can 2141 // illegal class id was found, the target is a cache miss handler that can
2144 // be invoked as a normal Dart function. 2142 // be invoked as a normal Dart function.
2145 __ add(TMP, R2, Operand(R3, LSL, 3)); 2143 __ add(TMP, R2, Operand(R3, LSL, 3));
2146 __ LoadFieldFromOffset(R0, TMP, base + kWordSize, PP); 2144 __ LoadFieldFromOffset(R0, TMP, base + kWordSize);
2147 __ LoadFieldFromOffset(R1, R0, Function::instructions_offset(), PP); 2145 __ LoadFieldFromOffset(R1, R0, Function::instructions_offset());
2148 // TODO(srdjan): Evaluate performance impact of moving the instruction below 2146 // TODO(srdjan): Evaluate performance impact of moving the instruction below
2149 // to the call site, instead of having it here. 2147 // to the call site, instead of having it here.
2150 __ AddImmediate(target, R1, Instructions::HeaderSize() - kHeapObjectTag, PP); 2148 __ AddImmediate(target, R1, Instructions::HeaderSize() - kHeapObjectTag);
2151 } 2149 }
2152 2150
2153 2151
2154 // Called from megamorphic calls. 2152 // Called from megamorphic calls.
2155 // R0: receiver. 2153 // R0: receiver.
2156 // R1: lookup cache. 2154 // R1: lookup cache.
2157 // Result: 2155 // Result:
2158 // R1: entry point. 2156 // R1: entry point.
2159 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) { 2157 void StubCode::GenerateMegamorphicLookupStub(Assembler* assembler) {
2160 EmitMegamorphicLookup(assembler, R0, R1, R1); 2158 EmitMegamorphicLookup(assembler, R0, R1, R1);
2161 __ ret(); 2159 __ ret();
2162 } 2160 }
2163 2161
2164 } // namespace dart 2162 } // namespace dart
2165 2163
2166 #endif // defined TARGET_ARCH_ARM64 2164 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/runtime_entry_arm64.cc ('k') | runtime/vm/stub_code_arm64_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698