OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #include "vm/globals.h" | |
6 #if defined(TARGET_ARCH_MIPS) | |
7 | |
8 #include "vm/assembler.h" | |
9 #include "vm/compiler.h" | |
10 #include "vm/dart_entry.h" | |
11 #include "vm/flow_graph_compiler.h" | |
12 #include "vm/heap.h" | |
13 #include "vm/instructions.h" | |
14 #include "vm/object_store.h" | |
15 #include "vm/runtime_entry.h" | |
16 #include "vm/stack_frame.h" | |
17 #include "vm/stub_code.h" | |
18 #include "vm/tags.h" | |
19 | |
20 #define __ assembler-> | |
21 | |
22 namespace dart { | |
23 | |
24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); | |
25 DEFINE_FLAG(bool, | |
26 use_slow_path, | |
27 false, | |
28 "Set to true for debugging & verifying the slow paths."); | |
29 DECLARE_FLAG(bool, trace_optimized_ic_calls); | |
30 | |
31 // Input parameters: | |
32 // RA : return address. | |
33 // SP : address of last argument in argument array. | |
34 // SP + 4*S4 - 4 : address of first argument in argument array. | |
35 // SP + 4*S4 : address of return value. | |
36 // S5 : address of the runtime function to call. | |
37 // S4 : number of arguments to the call. | |
38 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | |
39 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
40 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
41 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
42 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
43 | |
44 __ SetPrologueOffset(); | |
45 __ Comment("CallToRuntimeStub"); | |
46 __ EnterStubFrame(); | |
47 | |
48 // Save exit frame information to enable stack walking as we are about | |
49 // to transition to Dart VM C++ code. | |
50 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
51 | |
52 #if defined(DEBUG) | |
53 { | |
54 Label ok; | |
55 // Check that we are always entering from Dart code. | |
56 __ lw(T0, Assembler::VMTagAddress()); | |
57 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
58 __ Stop("Not coming from Dart code."); | |
59 __ Bind(&ok); | |
60 } | |
61 #endif | |
62 | |
63 // Mark that the thread is executing VM code. | |
64 __ sw(S5, Assembler::VMTagAddress()); | |
65 | |
66 // Reserve space for arguments and align frame before entering C++ world. | |
67 // NativeArguments are passed in registers. | |
68 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); | |
69 __ ReserveAlignedFrameSpace(4 * kWordSize); // Reserve space for arguments. | |
70 | |
71 // Pass NativeArguments structure by value and call runtime. | |
72 // Registers A0, A1, A2, and A3 are used. | |
73 | |
74 ASSERT(thread_offset == 0 * kWordSize); | |
75 // Set thread in NativeArgs. | |
76 __ mov(A0, THR); | |
77 | |
78 // There are no runtime calls to closures, so we do not need to set the tag | |
79 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
80 ASSERT(argc_tag_offset == 1 * kWordSize); | |
81 __ mov(A1, S4); // Set argc in NativeArguments. | |
82 | |
83 ASSERT(argv_offset == 2 * kWordSize); | |
84 __ sll(A2, S4, 2); | |
85 __ addu(A2, FP, A2); // Compute argv. | |
86 // Set argv in NativeArguments. | |
87 __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); | |
88 | |
89 | |
90 // Call runtime or redirection via simulator. | |
91 // We defensively always jalr through T9 because it is sometimes required by | |
92 // the MIPS ABI. | |
93 __ mov(T9, S5); | |
94 __ jalr(T9); | |
95 | |
96 ASSERT(retval_offset == 3 * kWordSize); | |
97 // Retval is next to 1st argument. | |
98 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); | |
99 __ Comment("CallToRuntimeStub return"); | |
100 | |
101 // Mark that the thread is executing Dart code. | |
102 __ LoadImmediate(A2, VMTag::kDartTagId); | |
103 __ sw(A2, Assembler::VMTagAddress()); | |
104 | |
105 // Reset exit frame information in Isolate structure. | |
106 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
107 | |
108 __ LeaveStubFrameAndReturn(); | |
109 } | |
110 | |
111 | |
112 // Print the stop message. | |
113 DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) { | |
114 OS::Print("Stop message: %s\n", message); | |
115 } | |
116 END_LEAF_RUNTIME_ENTRY | |
117 | |
118 | |
119 // Input parameters: | |
120 // A0 : stop message (const char*). | |
121 // Must preserve all registers. | |
122 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) { | |
123 __ EnterCallRuntimeFrame(0); | |
124 // Call the runtime leaf function. A0 already contains the parameter. | |
125 __ CallRuntime(kPrintStopMessageRuntimeEntry, 1); | |
126 __ LeaveCallRuntimeFrame(); | |
127 __ Ret(); | |
128 } | |
129 | |
130 | |
131 // Input parameters: | |
132 // RA : return address. | |
133 // SP : address of return value. | |
134 // T5 : address of the native function to call. | |
135 // A2 : address of first argument in argument array. | |
136 // A1 : argc_tag including number of arguments and function kind. | |
137 static void GenerateCallNativeWithWrapperStub(Assembler* assembler, | |
138 Address wrapper) { | |
139 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
140 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
141 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
142 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
143 | |
144 __ SetPrologueOffset(); | |
145 __ Comment("CallNativeCFunctionStub"); | |
146 __ EnterStubFrame(); | |
147 | |
148 // Save exit frame information to enable stack walking as we are about | |
149 // to transition to native code. | |
150 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
151 | |
152 #if defined(DEBUG) | |
153 { | |
154 Label ok; | |
155 // Check that we are always entering from Dart code. | |
156 __ lw(T0, Assembler::VMTagAddress()); | |
157 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
158 __ Stop("Not coming from Dart code."); | |
159 __ Bind(&ok); | |
160 } | |
161 #endif | |
162 | |
163 // Mark that the thread is executing native code. | |
164 __ sw(T5, Assembler::VMTagAddress()); | |
165 | |
166 // Initialize NativeArguments structure and call native function. | |
167 // Registers A0, A1, A2, and A3 are used. | |
168 | |
169 ASSERT(thread_offset == 0 * kWordSize); | |
170 // Set thread in NativeArgs. | |
171 __ mov(A0, THR); | |
172 | |
173 // There are no native calls to closures, so we do not need to set the tag | |
174 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
175 ASSERT(argc_tag_offset == 1 * kWordSize); | |
176 // Set argc in NativeArguments: A1 already contains argc. | |
177 | |
178 ASSERT(argv_offset == 2 * kWordSize); | |
179 // Set argv in NativeArguments: A2 already contains argv. | |
180 | |
181 ASSERT(retval_offset == 3 * kWordSize); | |
182 // Set retval in NativeArgs. | |
183 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | |
184 | |
185 // Passing the structure by value as in runtime calls would require changing | |
186 // Dart API for native functions. | |
187 // For now, space is reserved on the stack and we pass a pointer to it. | |
188 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
189 __ sw(A3, Address(SP, 3 * kWordSize)); | |
190 __ sw(A2, Address(SP, 2 * kWordSize)); | |
191 __ sw(A1, Address(SP, 1 * kWordSize)); | |
192 __ sw(A0, Address(SP, 0 * kWordSize)); | |
193 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | |
194 | |
195 | |
196 __ mov(A1, T5); // Pass the function entrypoint. | |
197 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. | |
198 | |
199 // Call native wrapper function or redirection via simulator. | |
200 __ lw(T9, wrapper); | |
201 __ jalr(T9); | |
202 __ Comment("CallNativeCFunctionStub return"); | |
203 | |
204 // Mark that the thread is executing Dart code. | |
205 __ LoadImmediate(A2, VMTag::kDartTagId); | |
206 __ sw(A2, Assembler::VMTagAddress()); | |
207 | |
208 // Reset exit frame information in Isolate structure. | |
209 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
210 | |
211 __ LeaveStubFrameAndReturn(); | |
212 } | |
213 | |
214 | |
215 void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) { | |
216 GenerateCallNativeWithWrapperStub( | |
217 assembler, | |
218 Address(THR, Thread::no_scope_native_wrapper_entry_point_offset())); | |
219 } | |
220 | |
221 | |
222 void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) { | |
223 GenerateCallNativeWithWrapperStub( | |
224 assembler, | |
225 Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset())); | |
226 } | |
227 | |
228 | |
229 // Input parameters: | |
230 // RA : return address. | |
231 // SP : address of return value. | |
232 // T5 : address of the native function to call. | |
233 // A2 : address of first argument in argument array. | |
234 // A1 : argc_tag including number of arguments and function kind. | |
235 void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) { | |
236 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
237 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
238 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
239 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
240 | |
241 __ SetPrologueOffset(); | |
242 __ Comment("CallNativeCFunctionStub"); | |
243 __ EnterStubFrame(); | |
244 | |
245 // Save exit frame information to enable stack walking as we are about | |
246 // to transition to native code. | |
247 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
248 | |
249 #if defined(DEBUG) | |
250 { | |
251 Label ok; | |
252 // Check that we are always entering from Dart code. | |
253 __ lw(T0, Assembler::VMTagAddress()); | |
254 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
255 __ Stop("Not coming from Dart code."); | |
256 __ Bind(&ok); | |
257 } | |
258 #endif | |
259 | |
260 // Mark that the thread is executing native code. | |
261 __ sw(T5, Assembler::VMTagAddress()); | |
262 | |
263 // Initialize NativeArguments structure and call native function. | |
264 // Registers A0, A1, A2, and A3 are used. | |
265 | |
266 ASSERT(thread_offset == 0 * kWordSize); | |
267 // Set thread in NativeArgs. | |
268 __ mov(A0, THR); | |
269 | |
270 // There are no native calls to closures, so we do not need to set the tag | |
271 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
272 ASSERT(argc_tag_offset == 1 * kWordSize); | |
273 // Set argc in NativeArguments: A1 already contains argc. | |
274 | |
275 ASSERT(argv_offset == 2 * kWordSize); | |
276 // Set argv in NativeArguments: A2 already contains argv. | |
277 | |
278 ASSERT(retval_offset == 3 * kWordSize); | |
279 // Set retval in NativeArgs. | |
280 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | |
281 | |
282 // Passing the structure by value as in runtime calls would require changing | |
283 // Dart API for native functions. | |
284 // For now, space is reserved on the stack and we pass a pointer to it. | |
285 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
286 __ sw(A3, Address(SP, 3 * kWordSize)); | |
287 __ sw(A2, Address(SP, 2 * kWordSize)); | |
288 __ sw(A1, Address(SP, 1 * kWordSize)); | |
289 __ sw(A0, Address(SP, 0 * kWordSize)); | |
290 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | |
291 | |
292 __ ReserveAlignedFrameSpace(kWordSize); // Just passing A0. | |
293 | |
294 // Call native function or redirection via simulator. | |
295 | |
296 // We defensively always jalr through T9 because it is sometimes required by | |
297 // the MIPS ABI. | |
298 __ mov(T9, T5); | |
299 __ jalr(T9); | |
300 __ Comment("CallNativeCFunctionStub return"); | |
301 | |
302 // Mark that the thread is executing Dart code. | |
303 __ LoadImmediate(A2, VMTag::kDartTagId); | |
304 __ sw(A2, Assembler::VMTagAddress()); | |
305 | |
306 // Reset exit frame information in Isolate structure. | |
307 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
308 | |
309 __ LeaveStubFrameAndReturn(); | |
310 } | |
311 | |
312 | |
313 // Input parameters: | |
314 // S4: arguments descriptor array. | |
315 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | |
316 __ Comment("CallStaticFunctionStub"); | |
317 __ EnterStubFrame(); | |
318 // Setup space on stack for return value and preserve arguments descriptor. | |
319 | |
320 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
321 __ sw(S4, Address(SP, 1 * kWordSize)); | |
322 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
323 | |
324 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); | |
325 __ Comment("CallStaticFunctionStub return"); | |
326 | |
327 // Get Code object result and restore arguments descriptor array. | |
328 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
329 __ lw(S4, Address(SP, 1 * kWordSize)); | |
330 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
331 | |
332 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
333 | |
334 // Remove the stub frame as we are about to jump to the dart function. | |
335 __ LeaveStubFrameAndReturn(T0); | |
336 } | |
337 | |
338 | |
339 // Called from a static call only when an invalid code has been entered | |
340 // (invalid because its function was optimized or deoptimized). | |
341 // S4: arguments descriptor array. | |
342 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | |
343 // Load code pointer to this stub from the thread: | |
344 // The one that is passed in, is not correct - it points to the code object | |
345 // that needs to be replaced. | |
346 __ lw(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); | |
347 // Create a stub frame as we are pushing some objects on the stack before | |
348 // calling into the runtime. | |
349 __ EnterStubFrame(); | |
350 // Setup space on stack for return value and preserve arguments descriptor. | |
351 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
352 __ sw(S4, Address(SP, 1 * kWordSize)); | |
353 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
354 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); | |
355 // Get Code object result and restore arguments descriptor array. | |
356 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
357 __ lw(S4, Address(SP, 1 * kWordSize)); | |
358 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
359 | |
360 // Jump to the dart function. | |
361 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
362 | |
363 // Remove the stub frame. | |
364 __ LeaveStubFrameAndReturn(T0); | |
365 } | |
366 | |
367 | |
368 // Called from object allocate instruction when the allocation stub has been | |
369 // disabled. | |
370 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | |
371 // Load code pointer to this stub from the thread: | |
372 // The one that is passed in, is not correct - it points to the code object | |
373 // that needs to be replaced. | |
374 __ lw(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); | |
375 __ EnterStubFrame(); | |
376 // Setup space on stack for return value. | |
377 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
378 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
379 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); | |
380 // Get Code object result. | |
381 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
382 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
383 | |
384 // Jump to the dart function. | |
385 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
386 | |
387 // Remove the stub frame. | |
388 __ LeaveStubFrameAndReturn(T0); | |
389 } | |
390 | |
391 | |
392 // Input parameters: | |
393 // A1: Smi-tagged argument count, may be zero. | |
394 // FP[kParamEndSlotFromFp + 1]: Last argument. | |
395 static void PushArgumentsArray(Assembler* assembler) { | |
396 __ Comment("PushArgumentsArray"); | |
397 // Allocate array to store arguments of caller. | |
398 __ LoadObject(A0, Object::null_object()); | |
399 // A0: Null element type for raw Array. | |
400 // A1: Smi-tagged argument count, may be zero. | |
401 __ BranchLink(*StubCode::AllocateArray_entry()); | |
402 __ Comment("PushArgumentsArray return"); | |
403 // V0: newly allocated array. | |
404 // A1: Smi-tagged argument count, may be zero (was preserved by the stub). | |
405 __ Push(V0); // Array is in V0 and on top of stack. | |
406 __ sll(T1, A1, 1); | |
407 __ addu(T1, FP, T1); | |
408 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); | |
409 // T1: address of first argument on stack. | |
410 // T2: address of first argument in array. | |
411 | |
412 Label loop, loop_exit; | |
413 __ blez(A1, &loop_exit); | |
414 __ delay_slot()->addiu(T2, V0, | |
415 Immediate(Array::data_offset() - kHeapObjectTag)); | |
416 __ Bind(&loop); | |
417 __ lw(T3, Address(T1)); | |
418 __ addiu(A1, A1, Immediate(-Smi::RawValue(1))); | |
419 __ addiu(T1, T1, Immediate(-kWordSize)); | |
420 __ addiu(T2, T2, Immediate(kWordSize)); | |
421 __ bgez(A1, &loop); | |
422 __ delay_slot()->sw(T3, Address(T2, -kWordSize)); | |
423 __ Bind(&loop_exit); | |
424 } | |
425 | |
426 | |
427 // Used by eager and lazy deoptimization. Preserve result in V0 if necessary. | |
428 // This stub translates optimized frame into unoptimized frame. The optimized | |
429 // frame can contain values in registers and on stack, the unoptimized | |
430 // frame contains all values on stack. | |
431 // Deoptimization occurs in following steps: | |
432 // - Push all registers that can contain values. | |
433 // - Call C routine to copy the stack and saved registers into temporary buffer. | |
434 // - Adjust caller's frame to correct unoptimized frame size. | |
435 // - Fill the unoptimized frame. | |
436 // - Materialize objects that require allocation (e.g. Double instances). | |
437 // GC can occur only after frame is fully rewritten. | |
438 // Stack after EnterFrame(...) below: | |
439 // +------------------+ | |
440 // | Saved PP | <- TOS | |
441 // +------------------+ | |
442 // | Saved CODE_REG | | |
443 // +------------------+ | |
444 // | Saved FP | <- FP of stub | |
445 // +------------------+ | |
446 // | Saved LR | (deoptimization point) | |
447 // +------------------+ | |
448 // | Saved CODE_REG | | |
449 // +------------------+ | |
450 // | ... | <- SP of optimized frame | |
451 // | |
452 // Parts of the code cannot GC, part of the code can GC. | |
453 static void GenerateDeoptimizationSequence(Assembler* assembler, | |
454 DeoptStubKind kind) { | |
455 const intptr_t kPushedRegistersSize = | |
456 kNumberOfCpuRegisters * kWordSize + kNumberOfFRegisters * kWordSize; | |
457 | |
458 __ SetPrologueOffset(); | |
459 __ Comment("GenerateDeoptimizationSequence"); | |
460 // DeoptimizeCopyFrame expects a Dart frame. | |
461 __ EnterStubFrame(kPushedRegistersSize); | |
462 | |
463 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | |
464 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | |
465 const intptr_t saved_result_slot_from_fp = | |
466 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | |
467 const intptr_t saved_exception_slot_from_fp = | |
468 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | |
469 const intptr_t saved_stacktrace_slot_from_fp = | |
470 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V1); | |
471 // Result in V0 is preserved as part of pushing all registers below. | |
472 | |
473 // Push registers in their enumeration order: lowest register number at | |
474 // lowest address. | |
475 for (int i = 0; i < kNumberOfCpuRegisters; i++) { | |
476 const int slot = kNumberOfCpuRegisters - i; | |
477 Register reg = static_cast<Register>(i); | |
478 if (reg == CODE_REG) { | |
479 // Save the original value of CODE_REG pushed before invoking this stub | |
480 // instead of the value used to call this stub. | |
481 COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. | |
482 __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); | |
483 __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
484 } else { | |
485 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
486 } | |
487 } | |
488 for (int i = 0; i < kNumberOfFRegisters; i++) { | |
489 // These go below the CPU registers. | |
490 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; | |
491 FRegister reg = static_cast<FRegister>(i); | |
492 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
493 } | |
494 | |
495 __ mov(A0, SP); // Pass address of saved registers block. | |
496 bool is_lazy = | |
497 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); | |
498 __ LoadImmediate(A1, is_lazy ? 1 : 0); | |
499 __ ReserveAlignedFrameSpace(1 * kWordSize); | |
500 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | |
501 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | |
502 | |
503 if (kind == kLazyDeoptFromReturn) { | |
504 // Restore result into T1 temporarily. | |
505 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | |
506 } else if (kind == kLazyDeoptFromThrow) { | |
507 // Restore result into T1 temporarily. | |
508 __ lw(T1, Address(FP, saved_exception_slot_from_fp * kWordSize)); | |
509 __ lw(T2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize)); | |
510 } | |
511 | |
512 __ RestoreCodePointer(); | |
513 __ LeaveDartFrame(); | |
514 __ subu(SP, FP, V0); | |
515 | |
516 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | |
517 // is no need to set the correct PC marker or load PP, since they get patched. | |
518 __ EnterStubFrame(); | |
519 | |
520 __ mov(A0, FP); // Get last FP address. | |
521 if (kind == kLazyDeoptFromReturn) { | |
522 __ Push(T1); // Preserve result as first local. | |
523 } else if (kind == kLazyDeoptFromThrow) { | |
524 __ Push(T1); // Preserve exception as first local. | |
525 __ Push(T2); // Preserve stacktrace as second local. | |
526 } | |
527 __ ReserveAlignedFrameSpace(1 * kWordSize); | |
528 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. | |
529 if (kind == kLazyDeoptFromReturn) { | |
530 // Restore result into T1. | |
531 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | |
532 } else if (kind == kLazyDeoptFromThrow) { | |
533 // Restore result into T1. | |
534 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | |
535 __ lw(T2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize)); | |
536 } | |
537 // Code above cannot cause GC. | |
538 __ RestoreCodePointer(); | |
539 __ LeaveStubFrame(); | |
540 | |
541 // Frame is fully rewritten at this point and it is safe to perform a GC. | |
542 // Materialize any objects that were deferred by FillFrame because they | |
543 // require allocation. | |
544 // Enter stub frame with loading PP. The caller's PP is not materialized yet. | |
545 __ EnterStubFrame(); | |
546 if (kind == kLazyDeoptFromReturn) { | |
547 __ Push(T1); // Preserve result, it will be GC-d here. | |
548 } else if (kind == kLazyDeoptFromThrow) { | |
549 __ Push(T1); // Preserve exception, it will be GC-d here. | |
550 __ Push(T2); // Preserve stacktrace, it will be GC-d here. | |
551 } | |
552 __ PushObject(Smi::ZoneHandle()); // Space for the result. | |
553 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); | |
554 // Result tells stub how many bytes to remove from the expression stack | |
555 // of the bottom-most frame. They were used as materialization arguments. | |
556 __ Pop(T1); | |
557 if (kind == kLazyDeoptFromReturn) { | |
558 __ Pop(V0); // Restore result. | |
559 } else if (kind == kLazyDeoptFromThrow) { | |
560 __ Pop(V1); // Restore stacktrace. | |
561 __ Pop(V0); // Restore exception. | |
562 } | |
563 __ LeaveStubFrame(); | |
564 // Remove materialization arguments. | |
565 __ SmiUntag(T1); | |
566 __ addu(SP, SP, T1); | |
567 // The caller is responsible for emitting the return instruction. | |
568 } | |
569 | |
570 // V0: result, must be preserved | |
571 void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) { | |
572 // Push zap value instead of CODE_REG for lazy deopt. | |
573 __ LoadImmediate(TMP, kZapCodeReg); | |
574 __ Push(TMP); | |
575 // Return address for "call" to deopt stub. | |
576 __ LoadImmediate(RA, kZapReturnAddress); | |
577 __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset())); | |
578 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn); | |
579 __ Ret(); | |
580 } | |
581 | |
582 | |
583 // V0: exception, must be preserved | |
584 // V1: stacktrace, must be preserved | |
585 void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) { | |
586 // Push zap value instead of CODE_REG for lazy deopt. | |
587 __ LoadImmediate(TMP, kZapCodeReg); | |
588 __ Push(TMP); | |
589 // Return address for "call" to deopt stub. | |
590 __ LoadImmediate(RA, kZapReturnAddress); | |
591 __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset())); | |
592 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow); | |
593 __ Ret(); | |
594 } | |
595 | |
596 | |
597 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | |
598 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | |
599 __ Ret(); | |
600 } | |
601 | |
602 | |
603 static void GenerateDispatcherCode(Assembler* assembler, | |
604 Label* call_target_function) { | |
605 __ Comment("NoSuchMethodDispatch"); | |
606 // When lazily generated invocation dispatchers are disabled, the | |
607 // miss-handler may return null. | |
608 __ BranchNotEqual(T0, Object::null_object(), call_target_function); | |
609 __ EnterStubFrame(); | |
610 // Load the receiver. | |
611 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
612 __ sll(TMP, A1, 1); // A1 is a Smi. | |
613 __ addu(TMP, FP, TMP); | |
614 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
615 | |
616 // Push space for the return value. | |
617 // Push the receiver. | |
618 // Push ICData/MegamorphicCache object. | |
619 // Push arguments descriptor array. | |
620 // Push original arguments array. | |
621 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
622 __ sw(ZR, Address(SP, 3 * kWordSize)); | |
623 __ sw(T6, Address(SP, 2 * kWordSize)); | |
624 __ sw(S5, Address(SP, 1 * kWordSize)); | |
625 __ sw(S4, Address(SP, 0 * kWordSize)); | |
626 | |
627 // Adjust arguments count. | |
628 __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); | |
629 Label args_count_ok; | |
630 __ BranchEqual(TMP, Immediate(0), &args_count_ok); | |
631 __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. | |
632 __ Bind(&args_count_ok); | |
633 | |
634 // A1: Smi-tagged arguments array length. | |
635 PushArgumentsArray(assembler); | |
636 const intptr_t kNumArgs = 4; | |
637 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs); | |
638 __ lw(V0, Address(SP, 4 * kWordSize)); // Return value. | |
639 __ addiu(SP, SP, Immediate(5 * kWordSize)); | |
640 __ LeaveStubFrame(); | |
641 __ Ret(); | |
642 } | |
643 | |
644 | |
645 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { | |
646 __ EnterStubFrame(); | |
647 | |
648 // Load the receiver. | |
649 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
650 __ sll(T2, T2, 1); // T2 is a Smi. | |
651 __ addu(TMP, FP, T2); | |
652 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
653 | |
654 // Preserve IC data and arguments descriptor. | |
655 __ addiu(SP, SP, Immediate(-6 * kWordSize)); | |
656 __ sw(S5, Address(SP, 5 * kWordSize)); | |
657 __ sw(S4, Address(SP, 4 * kWordSize)); | |
658 | |
659 // Push space for the return value. | |
660 // Push the receiver. | |
661 // Push IC data object. | |
662 // Push arguments descriptor array. | |
663 __ sw(ZR, Address(SP, 3 * kWordSize)); | |
664 __ sw(T6, Address(SP, 2 * kWordSize)); | |
665 __ sw(S5, Address(SP, 1 * kWordSize)); | |
666 __ sw(S4, Address(SP, 0 * kWordSize)); | |
667 | |
668 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); | |
669 | |
670 __ lw(T0, Address(SP, 3 * kWordSize)); // Get result function. | |
671 __ lw(S4, Address(SP, 4 * kWordSize)); // Restore argument descriptor. | |
672 __ lw(S5, Address(SP, 5 * kWordSize)); // Restore IC data. | |
673 __ addiu(SP, SP, Immediate(6 * kWordSize)); | |
674 | |
675 __ RestoreCodePointer(); | |
676 __ LeaveStubFrame(); | |
677 | |
678 if (!FLAG_lazy_dispatchers) { | |
679 Label call_target_function; | |
680 GenerateDispatcherCode(assembler, &call_target_function); | |
681 __ Bind(&call_target_function); | |
682 } | |
683 | |
684 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
685 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
686 __ jr(T2); | |
687 } | |
688 | |
689 | |
690 // Called for inline allocation of arrays. | |
691 // Input parameters: | |
692 // RA: return address. | |
693 // A1: Array length as Smi (must be preserved). | |
694 // A0: array element type (either NULL or an instantiated type). | |
695 // NOTE: A1 cannot be clobbered here as the caller relies on it being saved. | |
696 // The newly allocated object is returned in V0. | |
697 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { | |
698 __ Comment("AllocateArrayStub"); | |
699 Label slow_case; | |
700 // Compute the size to be allocated, it is based on the array length | |
701 // and is computed as: | |
702 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). | |
703 __ mov(T3, A1); // Array length. | |
704 | |
705 // Check that length is a positive Smi. | |
706 __ andi(CMPRES1, T3, Immediate(kSmiTagMask)); | |
707 if (FLAG_use_slow_path) { | |
708 __ b(&slow_case); | |
709 } else { | |
710 __ bne(CMPRES1, ZR, &slow_case); | |
711 } | |
712 __ bltz(T3, &slow_case); | |
713 | |
714 // Check for maximum allowed length. | |
715 const intptr_t max_len = | |
716 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); | |
717 __ BranchUnsignedGreater(T3, Immediate(max_len), &slow_case); | |
718 | |
719 const intptr_t cid = kArrayCid; | |
720 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, T4, &slow_case)); | |
721 | |
722 const intptr_t fixed_size_plus_alignment_padding = | |
723 sizeof(RawArray) + kObjectAlignment - 1; | |
724 __ LoadImmediate(T2, fixed_size_plus_alignment_padding); | |
725 __ sll(T3, T3, 1); // T3 is a Smi. | |
726 __ addu(T2, T2, T3); | |
727 ASSERT(kSmiTagShift == 1); | |
728 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); | |
729 __ and_(T2, T2, T3); | |
730 | |
731 // T2: Allocation size. | |
732 | |
733 Heap::Space space = Heap::kNew; | |
734 __ lw(T3, Address(THR, Thread::heap_offset())); | |
735 // Potential new object start. | |
736 __ lw(T0, Address(T3, Heap::TopOffset(space))); | |
737 | |
738 __ addu(T1, T0, T2); // Potential next object start. | |
739 __ BranchUnsignedLess(T1, T0, &slow_case); // Branch on unsigned overflow. | |
740 | |
741 // Check if the allocation fits into the remaining space. | |
742 // T0: potential new object start. | |
743 // T1: potential next object start. | |
744 // T2: allocation size. | |
745 // T3: heap. | |
746 __ lw(T4, Address(T3, Heap::EndOffset(space))); | |
747 __ BranchUnsignedGreaterEqual(T1, T4, &slow_case); | |
748 | |
749 // Successfully allocated the object(s), now update top to point to | |
750 // next object start and initialize the object. | |
751 // T3: heap. | |
752 __ sw(T1, Address(T3, Heap::TopOffset(space))); | |
753 __ addiu(T0, T0, Immediate(kHeapObjectTag)); | |
754 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); | |
755 | |
756 // Initialize the tags. | |
757 // T0: new object start as a tagged pointer. | |
758 // T1: new object end address. | |
759 // T2: allocation size. | |
760 { | |
761 Label overflow, done; | |
762 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
763 | |
764 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), | |
765 &overflow); | |
766 __ b(&done); | |
767 __ delay_slot()->sll(T2, T2, shift); | |
768 __ Bind(&overflow); | |
769 __ mov(T2, ZR); | |
770 __ Bind(&done); | |
771 | |
772 // Get the class index and insert it into the tags. | |
773 // T2: size and bit tags. | |
774 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | |
775 __ or_(T2, T2, TMP); | |
776 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. | |
777 } | |
778 | |
779 // T0: new object start as a tagged pointer. | |
780 // T1: new object end address. | |
781 // Store the type argument field. | |
782 __ StoreIntoObjectNoBarrier( | |
783 T0, FieldAddress(T0, Array::type_arguments_offset()), A0); | |
784 | |
785 // Set the length field. | |
786 __ StoreIntoObjectNoBarrier(T0, FieldAddress(T0, Array::length_offset()), A1); | |
787 | |
788 __ LoadObject(T7, Object::null_object()); | |
789 // Initialize all array elements to raw_null. | |
790 // T0: new object start as a tagged pointer. | |
791 // T1: new object end address. | |
792 // T2: iterator which initially points to the start of the variable | |
793 // data area to be initialized. | |
794 // T7: null. | |
795 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); | |
796 | |
797 Label done; | |
798 Label init_loop; | |
799 __ Bind(&init_loop); | |
800 __ BranchUnsignedGreaterEqual(T2, T1, &done); | |
801 __ sw(T7, Address(T2, 0)); | |
802 __ b(&init_loop); | |
803 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); | |
804 __ Bind(&done); | |
805 | |
806 __ Ret(); // Returns the newly allocated object in V0. | |
807 __ delay_slot()->mov(V0, T0); | |
808 | |
809 // Unable to allocate the array using the fast inline code, just call | |
810 // into the runtime. | |
811 __ Bind(&slow_case); | |
812 // Create a stub frame as we are pushing some objects on the stack before | |
813 // calling into the runtime. | |
814 __ EnterStubFrame(); | |
815 // Setup space on stack for return value. | |
816 // Push array length as Smi and element type. | |
817 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
818 __ sw(ZR, Address(SP, 2 * kWordSize)); | |
819 __ sw(A1, Address(SP, 1 * kWordSize)); | |
820 __ sw(A0, Address(SP, 0 * kWordSize)); | |
821 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); | |
822 __ Comment("AllocateArrayStub return"); | |
823 // Pop arguments; result is popped in IP. | |
824 __ lw(V0, Address(SP, 2 * kWordSize)); | |
825 __ lw(A1, Address(SP, 1 * kWordSize)); | |
826 __ lw(A0, Address(SP, 0 * kWordSize)); | |
827 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
828 | |
829 __ LeaveStubFrameAndReturn(); | |
830 } | |
831 | |
832 | |
833 // Called when invoking Dart code from C++ (VM code). | |
834 // Input parameters: | |
835 // RA : points to return address. | |
836 // A0 : code object of the Dart function to call. | |
837 // A1 : arguments descriptor array. | |
838 // A2 : arguments array. | |
839 // A3 : current thread. | |
840 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | |
841 // Save frame pointer coming in. | |
842 __ Comment("InvokeDartCodeStub"); | |
843 __ EnterFrame(); | |
844 | |
845 // Push code object to PC marker slot. | |
846 __ lw(TMP, Address(A3, Thread::invoke_dart_code_stub_offset())); | |
847 __ Push(TMP); | |
848 | |
849 // Save new context and C++ ABI callee-saved registers. | |
850 | |
851 // The saved vm tag, top resource, and top exit frame info. | |
852 const intptr_t kPreservedSlots = 3; | |
853 const intptr_t kPreservedRegSpace = | |
854 kWordSize * | |
855 (kAbiPreservedCpuRegCount + kAbiPreservedFpuRegCount + kPreservedSlots); | |
856 | |
857 __ addiu(SP, SP, Immediate(-kPreservedRegSpace)); | |
858 for (int i = S0; i <= S7; i++) { | |
859 Register r = static_cast<Register>(i); | |
860 const intptr_t slot = i - S0 + kPreservedSlots; | |
861 __ sw(r, Address(SP, slot * kWordSize)); | |
862 } | |
863 | |
864 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; | |
865 i++) { | |
866 FRegister r = static_cast<FRegister>(i); | |
867 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - | |
868 kAbiFirstPreservedFpuReg; | |
869 __ swc1(r, Address(SP, slot * kWordSize)); | |
870 } | |
871 | |
872 // We now load the pool pointer(PP) with a GC safe value as we are about | |
873 // to invoke dart code. | |
874 __ LoadImmediate(PP, 0); | |
875 | |
876 // Set up THR, which caches the current thread in Dart code. | |
877 if (THR != A3) { | |
878 __ mov(THR, A3); | |
879 } | |
880 | |
881 // Save the current VMTag on the stack. | |
882 __ lw(T1, Assembler::VMTagAddress()); | |
883 __ sw(T1, Address(SP, 2 * kWordSize)); | |
884 | |
885 // Mark that the thread is executing Dart code. | |
886 __ LoadImmediate(T0, VMTag::kDartTagId); | |
887 __ sw(T0, Assembler::VMTagAddress()); | |
888 | |
889 // Save top resource and top exit frame info. Use T0 as a temporary register. | |
890 // StackFrameIterator reads the top exit frame info saved in this frame. | |
891 __ lw(T0, Address(THR, Thread::top_resource_offset())); | |
892 __ sw(ZR, Address(THR, Thread::top_resource_offset())); | |
893 __ sw(T0, Address(SP, 1 * kWordSize)); | |
894 __ lw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | |
895 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
896 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. | |
897 ASSERT(kExitLinkSlotFromEntryFp == -24); | |
898 __ sw(T0, Address(SP, 0 * kWordSize)); | |
899 | |
900 // After the call, The stack pointer is restored to this location. | |
901 // Pushed S0-7, F20-31, T0, T0, T1 = 23. | |
902 | |
903 // Load arguments descriptor array into S4, which is passed to Dart code. | |
904 __ lw(S4, Address(A1, VMHandles::kOffsetOfRawPtrInHandle)); | |
905 | |
906 // Load number of arguments into S5. | |
907 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
908 __ SmiUntag(T1); | |
909 | |
910 // Compute address of 'arguments array' data area into A2. | |
911 __ lw(A2, Address(A2, VMHandles::kOffsetOfRawPtrInHandle)); | |
912 | |
913 // Set up arguments for the Dart call. | |
914 Label push_arguments; | |
915 Label done_push_arguments; | |
916 __ beq(T1, ZR, &done_push_arguments); // check if there are arguments. | |
917 __ delay_slot()->addiu(A2, A2, | |
918 Immediate(Array::data_offset() - kHeapObjectTag)); | |
919 __ mov(A1, ZR); | |
920 __ Bind(&push_arguments); | |
921 __ lw(A3, Address(A2)); | |
922 __ Push(A3); | |
923 __ addiu(A1, A1, Immediate(1)); | |
924 __ BranchSignedLess(A1, T1, &push_arguments); | |
925 __ delay_slot()->addiu(A2, A2, Immediate(kWordSize)); | |
926 | |
927 __ Bind(&done_push_arguments); | |
928 | |
929 // Call the Dart code entrypoint. | |
930 // We are calling into Dart code, here, so there is no need to call through | |
931 // T9 to match the ABI. | |
932 __ lw(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle)); | |
933 __ lw(A0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
934 __ jalr(A0); // S4 is the arguments descriptor array. | |
935 __ Comment("InvokeDartCodeStub return"); | |
936 | |
937 // Get rid of arguments pushed on the stack. | |
938 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); | |
939 | |
940 | |
941 // Restore the current VMTag from the stack. | |
942 __ lw(T1, Address(SP, 2 * kWordSize)); | |
943 __ sw(T1, Assembler::VMTagAddress()); | |
944 | |
945 // Restore the saved top resource and top exit frame info back into the | |
946 // Isolate structure. Uses T0 as a temporary register for this. | |
947 __ lw(T0, Address(SP, 1 * kWordSize)); | |
948 __ sw(T0, Address(THR, Thread::top_resource_offset())); | |
949 __ lw(T0, Address(SP, 0 * kWordSize)); | |
950 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | |
951 | |
952 // Restore C++ ABI callee-saved registers. | |
953 for (int i = S0; i <= S7; i++) { | |
954 Register r = static_cast<Register>(i); | |
955 const intptr_t slot = i - S0 + kPreservedSlots; | |
956 __ lw(r, Address(SP, slot * kWordSize)); | |
957 } | |
958 | |
959 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; | |
960 i++) { | |
961 FRegister r = static_cast<FRegister>(i); | |
962 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - | |
963 kAbiFirstPreservedFpuReg; | |
964 __ lwc1(r, Address(SP, slot * kWordSize)); | |
965 } | |
966 | |
967 __ addiu(SP, SP, Immediate(kPreservedRegSpace)); | |
968 | |
969 // Restore the frame pointer and return. | |
970 __ LeaveFrameAndReturn(); | |
971 } | |
972 | |
973 | |
974 // Called for inline allocation of contexts. | |
975 // Input: | |
976 // T1: number of context variables. | |
977 // Output: | |
978 // V0: new allocated RawContext object. | |
979 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { | |
980 __ Comment("AllocateContext"); | |
981 if (FLAG_inline_alloc) { | |
982 Label slow_case; | |
983 // First compute the rounded instance size. | |
984 // T1: number of context variables. | |
985 intptr_t fixed_size_plus_alignment_padding = | |
986 sizeof(RawContext) + kObjectAlignment - 1; | |
987 __ LoadImmediate(T2, fixed_size_plus_alignment_padding); | |
988 __ sll(T0, T1, 2); | |
989 __ addu(T2, T2, T0); | |
990 ASSERT(kSmiTagShift == 1); | |
991 __ LoadImmediate(T0, ~((kObjectAlignment)-1)); | |
992 __ and_(T2, T2, T0); | |
993 | |
994 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, &slow_case)); | |
995 // Now allocate the object. | |
996 // T1: number of context variables. | |
997 // T2: object size. | |
998 const intptr_t cid = kContextCid; | |
999 Heap::Space space = Heap::kNew; | |
1000 __ lw(T5, Address(THR, Thread::heap_offset())); | |
1001 __ lw(V0, Address(T5, Heap::TopOffset(space))); | |
1002 __ addu(T3, T2, V0); | |
1003 | |
1004 // Check if the allocation fits into the remaining space. | |
1005 // V0: potential new object. | |
1006 // T1: number of context variables. | |
1007 // T2: object size. | |
1008 // T3: potential next object start. | |
1009 // T5: heap. | |
1010 __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); | |
1011 if (FLAG_use_slow_path) { | |
1012 __ b(&slow_case); | |
1013 } else { | |
1014 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); | |
1015 } | |
1016 | |
1017 // Successfully allocated the object, now update top to point to | |
1018 // next object start and initialize the object. | |
1019 // V0: new object. | |
1020 // T1: number of context variables. | |
1021 // T2: object size. | |
1022 // T3: next object start. | |
1023 // T5: heap. | |
1024 __ sw(T3, Address(T5, Heap::TopOffset(space))); | |
1025 __ addiu(V0, V0, Immediate(kHeapObjectTag)); | |
1026 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T5, space)); | |
1027 | |
1028 // Calculate the size tag. | |
1029 // V0: new object. | |
1030 // T1: number of context variables. | |
1031 // T2: object size. | |
1032 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
1033 __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag); | |
1034 __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0. | |
1035 __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2. | |
1036 __ sll(TMP, T2, shift); // TMP = T2 << shift. | |
1037 __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2. | |
1038 | |
1039 // Get the class index and insert it into the tags. | |
1040 // T2: size and bit tags. | |
1041 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | |
1042 __ or_(T2, T2, TMP); | |
1043 __ sw(T2, FieldAddress(V0, Context::tags_offset())); | |
1044 | |
1045 // Setup up number of context variables field. | |
1046 // V0: new object. | |
1047 // T1: number of context variables as integer value (not object). | |
1048 __ sw(T1, FieldAddress(V0, Context::num_variables_offset())); | |
1049 | |
1050 __ LoadObject(T7, Object::null_object()); | |
1051 | |
1052 // Initialize the context variables. | |
1053 // V0: new object. | |
1054 // T1: number of context variables. | |
1055 Label loop, loop_exit; | |
1056 __ blez(T1, &loop_exit); | |
1057 // Setup the parent field. | |
1058 __ delay_slot()->sw(T7, FieldAddress(V0, Context::parent_offset())); | |
1059 __ AddImmediate(T3, V0, Context::variable_offset(0) - kHeapObjectTag); | |
1060 __ sll(T1, T1, 2); | |
1061 __ Bind(&loop); | |
1062 __ addiu(T1, T1, Immediate(-kWordSize)); | |
1063 __ addu(T4, T3, T1); | |
1064 __ bgtz(T1, &loop); | |
1065 __ delay_slot()->sw(T7, Address(T4)); | |
1066 __ Bind(&loop_exit); | |
1067 | |
1068 // Done allocating and initializing the context. | |
1069 // V0: new object. | |
1070 __ Ret(); | |
1071 | |
1072 __ Bind(&slow_case); | |
1073 } | |
1074 // Create a stub frame as we are pushing some objects on the stack before | |
1075 // calling into the runtime. | |
1076 __ EnterStubFrame(); | |
1077 // Setup space on stack for return value. | |
1078 __ SmiTag(T1); | |
1079 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1080 __ LoadObject(TMP, Object::null_object()); | |
1081 __ sw(TMP, Address(SP, 1 * kWordSize)); // Store null. | |
1082 __ sw(T1, Address(SP, 0 * kWordSize)); | |
1083 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. | |
1084 __ lw(V0, Address(SP, 1 * kWordSize)); // Get the new context. | |
1085 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Pop argument and return. | |
1086 | |
1087 // V0: new object | |
1088 // Restore the frame pointer. | |
1089 __ LeaveStubFrameAndReturn(); | |
1090 } | |
1091 | |
1092 | |
1093 // Helper stub to implement Assembler::StoreIntoObject. | |
1094 // Input parameters: | |
1095 // T0: Address (i.e. object) being stored into. | |
1096 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { | |
1097 // Save values being destroyed. | |
1098 __ Comment("UpdateStoreBufferStub"); | |
1099 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
1100 __ sw(T3, Address(SP, 2 * kWordSize)); | |
1101 __ sw(T2, Address(SP, 1 * kWordSize)); | |
1102 __ sw(T1, Address(SP, 0 * kWordSize)); | |
1103 | |
1104 Label add_to_buffer; | |
1105 // Check whether this object has already been remembered. Skip adding to the | |
1106 // store buffer if the object is in the store buffer already. | |
1107 // Spilled: T1, T2, T3. | |
1108 // T0: Address being stored. | |
1109 __ lw(T2, FieldAddress(T0, Object::tags_offset())); | |
1110 __ andi(CMPRES1, T2, Immediate(1 << RawObject::kRememberedBit)); | |
1111 __ beq(CMPRES1, ZR, &add_to_buffer); | |
1112 __ lw(T1, Address(SP, 0 * kWordSize)); | |
1113 __ lw(T2, Address(SP, 1 * kWordSize)); | |
1114 __ lw(T3, Address(SP, 2 * kWordSize)); | |
1115 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
1116 __ Ret(); | |
1117 | |
1118 __ Bind(&add_to_buffer); | |
1119 // Atomically set the remembered bit of the object header. | |
1120 Label retry; | |
1121 __ Bind(&retry); | |
1122 __ ll(T2, FieldAddress(T0, Object::tags_offset())); | |
1123 __ ori(T2, T2, Immediate(1 << RawObject::kRememberedBit)); | |
1124 __ sc(T2, FieldAddress(T0, Object::tags_offset())); | |
1125 // T2 = 1 on success, 0 on failure. | |
1126 __ beq(T2, ZR, &retry); | |
1127 | |
1128 // Load the StoreBuffer block out of the thread. Then load top_ out of the | |
1129 // StoreBufferBlock and add the address to the pointers_. | |
1130 __ lw(T1, Address(THR, Thread::store_buffer_block_offset())); | |
1131 __ lw(T2, Address(T1, StoreBufferBlock::top_offset())); | |
1132 __ sll(T3, T2, 2); | |
1133 __ addu(T3, T1, T3); | |
1134 __ sw(T0, Address(T3, StoreBufferBlock::pointers_offset())); | |
1135 | |
1136 // Increment top_ and check for overflow. | |
1137 // T2: top_ | |
1138 // T1: StoreBufferBlock | |
1139 Label L; | |
1140 __ addiu(T2, T2, Immediate(1)); | |
1141 __ sw(T2, Address(T1, StoreBufferBlock::top_offset())); | |
1142 __ addiu(CMPRES1, T2, Immediate(-StoreBufferBlock::kSize)); | |
1143 // Restore values. | |
1144 __ lw(T1, Address(SP, 0 * kWordSize)); | |
1145 __ lw(T2, Address(SP, 1 * kWordSize)); | |
1146 __ lw(T3, Address(SP, 2 * kWordSize)); | |
1147 __ beq(CMPRES1, ZR, &L); | |
1148 __ delay_slot()->addiu(SP, SP, Immediate(3 * kWordSize)); | |
1149 __ Ret(); | |
1150 | |
1151 // Handle overflow: Call the runtime leaf function. | |
1152 __ Bind(&L); | |
1153 // Setup frame, push callee-saved registers. | |
1154 | |
1155 __ EnterCallRuntimeFrame(1 * kWordSize); | |
1156 __ mov(A0, THR); | |
1157 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1); | |
1158 __ Comment("UpdateStoreBufferStub return"); | |
1159 // Restore callee-saved registers, tear down frame. | |
1160 __ LeaveCallRuntimeFrame(); | |
1161 __ Ret(); | |
1162 } | |
1163 | |
1164 | |
1165 // Called for inline allocation of objects. | |
1166 // Input parameters: | |
1167 // RA : return address. | |
1168 // SP + 0 : type arguments object (only if class is parameterized). | |
1169 void StubCode::GenerateAllocationStubForClass(Assembler* assembler, | |
1170 const Class& cls) { | |
1171 __ Comment("AllocationStubForClass"); | |
1172 // The generated code is different if the class is parameterized. | |
1173 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; | |
1174 ASSERT(!is_cls_parameterized || | |
1175 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); | |
1176 // kInlineInstanceSize is a constant used as a threshold for determining | |
1177 // when the object initialization should be done as a loop or as | |
1178 // straight line code. | |
1179 const int kInlineInstanceSize = 12; | |
1180 const intptr_t instance_size = cls.instance_size(); | |
1181 ASSERT(instance_size > 0); | |
1182 if (is_cls_parameterized) { | |
1183 __ lw(T1, Address(SP, 0 * kWordSize)); | |
1184 // T1: type arguments. | |
1185 } | |
1186 Isolate* isolate = Isolate::Current(); | |
1187 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && | |
1188 !cls.TraceAllocation(isolate)) { | |
1189 Label slow_case; | |
1190 // Allocate the object and update top to point to | |
1191 // next object start and initialize the allocated object. | |
1192 // T1: instantiated type arguments (if is_cls_parameterized). | |
1193 Heap::Space space = Heap::kNew; | |
1194 __ lw(T5, Address(THR, Thread::heap_offset())); | |
1195 __ lw(T2, Address(T5, Heap::TopOffset(space))); | |
1196 __ LoadImmediate(T4, instance_size); | |
1197 __ addu(T3, T2, T4); | |
1198 // Check if the allocation fits into the remaining space. | |
1199 // T2: potential new object start. | |
1200 // T3: potential next object start. | |
1201 // T5: heap. | |
1202 __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); | |
1203 if (FLAG_use_slow_path) { | |
1204 __ b(&slow_case); | |
1205 } else { | |
1206 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); | |
1207 } | |
1208 // Successfully allocated the object(s), now update top to point to | |
1209 // next object start and initialize the object. | |
1210 __ sw(T3, Address(T5, Heap::TopOffset(space))); | |
1211 NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), T5, space)); | |
1212 | |
1213 // T2: new object start. | |
1214 // T3: next object start. | |
1215 // T1: new object type arguments (if is_cls_parameterized). | |
1216 // Set the tags. | |
1217 uword tags = 0; | |
1218 tags = RawObject::SizeTag::update(instance_size, tags); | |
1219 ASSERT(cls.id() != kIllegalCid); | |
1220 tags = RawObject::ClassIdTag::update(cls.id(), tags); | |
1221 __ LoadImmediate(T0, tags); | |
1222 __ sw(T0, Address(T2, Instance::tags_offset())); | |
1223 | |
1224 __ LoadObject(T7, Object::null_object()); | |
1225 | |
1226 // Initialize the remaining words of the object. | |
1227 // T2: new object start. | |
1228 // T3: next object start. | |
1229 // T1: new object type arguments (if is_cls_parameterized). | |
1230 // First try inlining the initialization without a loop. | |
1231 if (instance_size < (kInlineInstanceSize * kWordSize)) { | |
1232 // Check if the object contains any non-header fields. | |
1233 // Small objects are initialized using a consecutive set of writes. | |
1234 for (intptr_t current_offset = Instance::NextFieldOffset(); | |
1235 current_offset < instance_size; current_offset += kWordSize) { | |
1236 __ sw(T7, Address(T2, current_offset)); | |
1237 } | |
1238 } else { | |
1239 __ addiu(T4, T2, Immediate(Instance::NextFieldOffset())); | |
1240 // Loop until the whole object is initialized. | |
1241 // T2: new object. | |
1242 // T3: next object start. | |
1243 // T4: next word to be initialized. | |
1244 // T1: new object type arguments (if is_cls_parameterized). | |
1245 Label loop, loop_exit; | |
1246 __ BranchUnsignedGreaterEqual(T4, T3, &loop_exit); | |
1247 __ Bind(&loop); | |
1248 __ addiu(T4, T4, Immediate(kWordSize)); | |
1249 __ bne(T4, T3, &loop); | |
1250 __ delay_slot()->sw(T7, Address(T4, -kWordSize)); | |
1251 __ Bind(&loop_exit); | |
1252 } | |
1253 if (is_cls_parameterized) { | |
1254 // T1: new object type arguments. | |
1255 // Set the type arguments in the new object. | |
1256 __ sw(T1, Address(T2, cls.type_arguments_field_offset())); | |
1257 } | |
1258 // Done allocating and initializing the instance. | |
1259 // T2: new object still missing its heap tag. | |
1260 __ Ret(); | |
1261 __ delay_slot()->addiu(V0, T2, Immediate(kHeapObjectTag)); | |
1262 | |
1263 __ Bind(&slow_case); | |
1264 } | |
1265 // If is_cls_parameterized: | |
1266 // T1: new object type arguments (instantiated or not). | |
1267 // Create a stub frame as we are pushing some objects on the stack before | |
1268 // calling into the runtime. | |
1269 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. | |
1270 __ LoadObject(TMP, cls); | |
1271 | |
1272 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
1273 // Space on stack for return value. | |
1274 __ LoadObject(T7, Object::null_object()); | |
1275 __ sw(T7, Address(SP, 2 * kWordSize)); | |
1276 __ sw(TMP, Address(SP, 1 * kWordSize)); // Class of object to be allocated. | |
1277 | |
1278 if (is_cls_parameterized) { | |
1279 // Push type arguments of object to be allocated and of instantiator. | |
1280 __ sw(T1, Address(SP, 0 * kWordSize)); | |
1281 } else { | |
1282 // Push null type arguments. | |
1283 __ sw(T7, Address(SP, 0 * kWordSize)); | |
1284 } | |
1285 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. | |
1286 __ Comment("AllocationStubForClass return"); | |
1287 // Pop result (newly allocated object). | |
1288 __ lw(V0, Address(SP, 2 * kWordSize)); | |
1289 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Pop arguments. | |
1290 // V0: new object | |
1291 // Restore the frame pointer and return. | |
1292 __ LeaveStubFrameAndReturn(RA); | |
1293 } | |
1294 | |
1295 | |
1296 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function | |
1297 // from the entry code of a dart function after an error in passed argument | |
1298 // name or number is detected. | |
1299 // Input parameters: | |
1300 // RA : return address. | |
1301 // SP : address of last argument. | |
1302 // S4: arguments descriptor array. | |
1303 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) { | |
1304 __ EnterStubFrame(); | |
1305 | |
1306 // Load the receiver. | |
1307 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
1308 __ sll(TMP, A1, 1); // A1 is a Smi. | |
1309 __ addu(TMP, FP, TMP); | |
1310 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
1311 | |
1312 // Push space for the return value. | |
1313 // Push the receiver. | |
1314 // Push arguments descriptor array. | |
1315 const intptr_t kNumArgs = 3; | |
1316 __ addiu(SP, SP, Immediate(-kNumArgs * kWordSize)); | |
1317 __ sw(ZR, Address(SP, 2 * kWordSize)); | |
1318 __ sw(T6, Address(SP, 1 * kWordSize)); | |
1319 __ sw(S4, Address(SP, 0 * kWordSize)); | |
1320 | |
1321 // Adjust arguments count. | |
1322 __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); | |
1323 Label args_count_ok; | |
1324 __ BranchEqual(TMP, Immediate(0), &args_count_ok); | |
1325 __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. | |
1326 __ Bind(&args_count_ok); | |
1327 | |
1328 // A1: Smi-tagged arguments array length. | |
1329 PushArgumentsArray(assembler); | |
1330 | |
1331 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs); | |
1332 // noSuchMethod on closures always throws an error, so it will never return. | |
1333 __ break_(0); | |
1334 } | |
1335 | |
1336 | |
1337 // T0: function object. | |
1338 // S5: inline cache data object. | |
1339 // Cannot use function object from ICData as it may be the inlined | |
1340 // function and not the top-scope function. | |
1341 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { | |
1342 __ Comment("OptimizedUsageCounterIncrement"); | |
1343 Register ic_reg = S5; | |
1344 Register func_reg = T0; | |
1345 if (FLAG_trace_optimized_ic_calls) { | |
1346 __ EnterStubFrame(); | |
1347 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
1348 __ sw(T0, Address(SP, 3 * kWordSize)); | |
1349 __ sw(S5, Address(SP, 2 * kWordSize)); | |
1350 __ sw(ic_reg, Address(SP, 1 * kWordSize)); // Argument. | |
1351 __ sw(func_reg, Address(SP, 0 * kWordSize)); // Argument. | |
1352 __ CallRuntime(kTraceICCallRuntimeEntry, 2); | |
1353 __ lw(S5, Address(SP, 2 * kWordSize)); | |
1354 __ lw(T0, Address(SP, 3 * kWordSize)); | |
1355 __ addiu(SP, SP, Immediate(4 * kWordSize)); // Discard argument; | |
1356 __ LeaveStubFrame(); | |
1357 } | |
1358 __ lw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | |
1359 __ addiu(T7, T7, Immediate(1)); | |
1360 __ sw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | |
1361 } | |
1362 | |
1363 | |
1364 // Loads function into 'temp_reg'. | |
1365 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, | |
1366 Register temp_reg) { | |
1367 if (FLAG_optimization_counter_threshold >= 0) { | |
1368 __ Comment("UsageCounterIncrement"); | |
1369 Register ic_reg = S5; | |
1370 Register func_reg = temp_reg; | |
1371 ASSERT(temp_reg == T0); | |
1372 __ Comment("Increment function counter"); | |
1373 __ lw(func_reg, FieldAddress(ic_reg, ICData::owner_offset())); | |
1374 __ lw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); | |
1375 __ addiu(T1, T1, Immediate(1)); | |
1376 __ sw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); | |
1377 } | |
1378 } | |
1379 | |
1380 | |
1381 // Note: S5 must be preserved. | |
1382 // Attempt a quick Smi operation for known operations ('kind'). The ICData | |
1383 // must have been primed with a Smi/Smi check that will be used for counting | |
1384 // the invocations. | |
1385 static void EmitFastSmiOp(Assembler* assembler, | |
1386 Token::Kind kind, | |
1387 intptr_t num_args, | |
1388 Label* not_smi_or_overflow) { | |
1389 __ Comment("Fast Smi op"); | |
1390 ASSERT(num_args == 2); | |
1391 __ lw(T0, Address(SP, 0 * kWordSize)); // Left. | |
1392 __ lw(T1, Address(SP, 1 * kWordSize)); // Right. | |
1393 __ or_(CMPRES1, T0, T1); | |
1394 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | |
1395 __ bne(CMPRES1, ZR, not_smi_or_overflow); | |
1396 switch (kind) { | |
1397 case Token::kADD: { | |
1398 __ AdduDetectOverflow(V0, T1, T0, CMPRES1); // Add. | |
1399 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | |
1400 break; | |
1401 } | |
1402 case Token::kSUB: { | |
1403 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. | |
1404 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | |
1405 break; | |
1406 } | |
1407 case Token::kEQ: { | |
1408 Label true_label, done; | |
1409 __ beq(T1, T0, &true_label); | |
1410 __ LoadObject(V0, Bool::False()); | |
1411 __ b(&done); | |
1412 __ Bind(&true_label); | |
1413 __ LoadObject(V0, Bool::True()); | |
1414 __ Bind(&done); | |
1415 break; | |
1416 } | |
1417 default: | |
1418 UNIMPLEMENTED(); | |
1419 } | |
1420 // S5: IC data object (preserved). | |
1421 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
1422 // T0: ic_data_array with check entries: classes and target functions. | |
1423 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
1424 // T0: points directly to the first ic data array element. | |
1425 #if defined(DEBUG) | |
1426 // Check that first entry is for Smi/Smi. | |
1427 Label error, ok; | |
1428 const int32_t imm_smi_cid = reinterpret_cast<int32_t>(Smi::New(kSmiCid)); | |
1429 __ lw(T4, Address(T0)); | |
1430 __ BranchNotEqual(T4, Immediate(imm_smi_cid), &error); | |
1431 __ lw(T4, Address(T0, kWordSize)); | |
1432 __ BranchEqual(T4, Immediate(imm_smi_cid), &ok); | |
1433 __ Bind(&error); | |
1434 __ Stop("Incorrect IC data"); | |
1435 __ Bind(&ok); | |
1436 #endif | |
1437 if (FLAG_optimization_counter_threshold >= 0) { | |
1438 // Update counter, ignore overflow. | |
1439 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; | |
1440 __ lw(T4, Address(T0, count_offset)); | |
1441 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
1442 __ sw(T4, Address(T0, count_offset)); | |
1443 } | |
1444 | |
1445 __ Ret(); | |
1446 } | |
1447 | |
1448 | |
1449 // Generate inline cache check for 'num_args'. | |
1450 // RA: return address | |
1451 // S5: Inline cache data object. | |
1452 // Control flow: | |
1453 // - If receiver is null -> jump to IC miss. | |
1454 // - If receiver is Smi -> load Smi class. | |
1455 // - If receiver is not-Smi -> load receiver's class. | |
1456 // - Check if 'num_args' (including receiver) match any IC data group. | |
1457 // - Match found -> jump to target. | |
1458 // - Match not found -> jump to IC miss. | |
1459 void StubCode::GenerateNArgsCheckInlineCacheStub( | |
1460 Assembler* assembler, | |
1461 intptr_t num_args, | |
1462 const RuntimeEntry& handle_ic_miss, | |
1463 Token::Kind kind, | |
1464 bool optimized) { | |
1465 __ Comment("NArgsCheckInlineCacheStub"); | |
1466 ASSERT(num_args == 1 || num_args == 2); | |
1467 #if defined(DEBUG) | |
1468 { | |
1469 Label ok; | |
1470 // Check that the IC data array has NumArgsTested() == num_args. | |
1471 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | |
1472 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | |
1473 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | |
1474 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | |
1475 __ BranchEqual(T0, Immediate(num_args), &ok); | |
1476 __ Stop("Incorrect stub for IC data"); | |
1477 __ Bind(&ok); | |
1478 } | |
1479 #endif // DEBUG | |
1480 | |
1481 | |
1482 Label stepping, done_stepping; | |
1483 if (FLAG_support_debugger && !optimized) { | |
1484 __ Comment("Check single stepping"); | |
1485 __ LoadIsolate(T0); | |
1486 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
1487 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
1488 __ Bind(&done_stepping); | |
1489 } | |
1490 | |
1491 Label not_smi_or_overflow; | |
1492 if (kind != Token::kILLEGAL) { | |
1493 EmitFastSmiOp(assembler, kind, num_args, ¬_smi_or_overflow); | |
1494 } | |
1495 __ Bind(¬_smi_or_overflow); | |
1496 | |
1497 __ Comment("Extract ICData initial values and receiver cid"); | |
1498 // Load argument descriptor into S4. | |
1499 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
1500 // Preserve return address, since RA is needed for subroutine call. | |
1501 __ mov(T2, RA); | |
1502 // Loop that checks if there is an IC data match. | |
1503 Label loop, found, miss; | |
1504 // S5: IC data object (preserved). | |
1505 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
1506 // T0: ic_data_array with check entries: classes and target functions. | |
1507 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
1508 // T0: points directly to the first ic data array element. | |
1509 | |
1510 // Get the receiver's class ID (first read number of arguments from | |
1511 // arguments descriptor array and then access the receiver from the stack). | |
1512 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
1513 __ sll(T5, T1, 1); // T1 (argument_count - 1) is smi. | |
1514 __ addu(T5, T5, SP); | |
1515 __ lw(T3, Address(T5, -kWordSize)); | |
1516 __ LoadTaggedClassIdMayBeSmi(T3, T3); | |
1517 | |
1518 if (num_args == 2) { | |
1519 __ lw(T5, Address(T5, -2 * kWordSize)); | |
1520 __ LoadTaggedClassIdMayBeSmi(T5, T5); | |
1521 } | |
1522 | |
1523 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; | |
1524 // T1: argument_count (smi). | |
1525 // T3: receiver's class ID (smi). | |
1526 // T5: first argument's class ID (smi). | |
1527 | |
1528 // We unroll the generic one that is generated once more than the others. | |
1529 const bool optimize = kind == Token::kILLEGAL; | |
1530 | |
1531 __ Comment("ICData loop"); | |
1532 __ Bind(&loop); | |
1533 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) { | |
1534 __ lw(T4, Address(T0, 0)); | |
1535 if (num_args == 1) { | |
1536 __ beq(T3, T4, &found); // IC hit. | |
1537 } else { | |
1538 ASSERT(num_args == 2); | |
1539 Label update; | |
1540 __ bne(T3, T4, &update); // Continue. | |
1541 __ lw(T4, Address(T0, kWordSize)); | |
1542 __ beq(T5, T4, &found); // IC hit. | |
1543 __ Bind(&update); | |
1544 } | |
1545 | |
1546 __ AddImmediate(T0, entry_size); // Next entry. | |
1547 if (unroll == 0) { | |
1548 __ BranchNotEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), | |
1549 &loop); // Done? | |
1550 } else { | |
1551 __ BranchEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), | |
1552 &miss); // Done? | |
1553 } | |
1554 } | |
1555 | |
1556 __ Bind(&miss); | |
1557 __ Comment("IC miss"); | |
1558 // Restore return address. | |
1559 __ mov(RA, T2); | |
1560 | |
1561 // Compute address of arguments (first read number of arguments from | |
1562 // arguments descriptor array and then compute address on the stack). | |
1563 // T1: argument_count (smi). | |
1564 __ addiu(T1, T1, Immediate(Smi::RawValue(-1))); | |
1565 __ sll(T1, T1, 1); // T1 is Smi. | |
1566 __ addu(T1, SP, T1); | |
1567 // T1: address of receiver. | |
1568 // Create a stub frame as we are pushing some objects on the stack before | |
1569 // calling into the runtime. | |
1570 __ EnterStubFrame(); | |
1571 // Preserve IC data object and arguments descriptor array and | |
1572 // setup space on stack for result (target code object). | |
1573 int num_slots = num_args + 4; | |
1574 __ addiu(SP, SP, Immediate(-num_slots * kWordSize)); | |
1575 __ sw(S5, Address(SP, (num_slots - 1) * kWordSize)); | |
1576 __ sw(S4, Address(SP, (num_slots - 2) * kWordSize)); | |
1577 __ sw(ZR, Address(SP, (num_slots - 3) * kWordSize)); | |
1578 // Push call arguments. | |
1579 for (intptr_t i = 0; i < num_args; i++) { | |
1580 __ lw(TMP, Address(T1, -i * kWordSize)); | |
1581 __ sw(TMP, Address(SP, (num_slots - i - 4) * kWordSize)); | |
1582 } | |
1583 // Pass IC data object. | |
1584 __ sw(S5, Address(SP, (num_slots - num_args - 4) * kWordSize)); | |
1585 __ CallRuntime(handle_ic_miss, num_args + 1); | |
1586 __ Comment("NArgsCheckInlineCacheStub return"); | |
1587 // Pop returned function object into T3. | |
1588 // Restore arguments descriptor array and IC data array. | |
1589 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); | |
1590 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); | |
1591 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); | |
1592 // Remove the call arguments pushed earlier, including the IC data object | |
1593 // and the arguments descriptor array. | |
1594 __ addiu(SP, SP, Immediate(num_slots * kWordSize)); | |
1595 __ RestoreCodePointer(); | |
1596 __ LeaveStubFrame(); | |
1597 | |
1598 Label call_target_function; | |
1599 if (!FLAG_lazy_dispatchers) { | |
1600 __ mov(T0, T3); | |
1601 GenerateDispatcherCode(assembler, &call_target_function); | |
1602 } else { | |
1603 __ b(&call_target_function); | |
1604 } | |
1605 | |
1606 __ Bind(&found); | |
1607 __ mov(RA, T2); // Restore return address if found. | |
1608 __ Comment("Update caller's counter"); | |
1609 // T0: Pointer to an IC data check group. | |
1610 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize; | |
1611 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; | |
1612 __ lw(T3, Address(T0, target_offset)); | |
1613 | |
1614 if (FLAG_optimization_counter_threshold >= 0) { | |
1615 // Update counter, ignore overflow. | |
1616 __ lw(T4, Address(T0, count_offset)); | |
1617 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
1618 __ sw(T4, Address(T0, count_offset)); | |
1619 } | |
1620 | |
1621 __ Comment("Call target"); | |
1622 __ Bind(&call_target_function); | |
1623 // T0 <- T3: Target function. | |
1624 __ mov(T0, T3); | |
1625 Label is_compiled; | |
1626 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | |
1627 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
1628 __ jr(T4); | |
1629 | |
1630 // Call single step callback in debugger. | |
1631 if (FLAG_support_debugger && !optimized) { | |
1632 __ Bind(&stepping); | |
1633 __ EnterStubFrame(); | |
1634 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1635 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | |
1636 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
1637 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
1638 __ lw(RA, Address(SP, 0 * kWordSize)); | |
1639 __ lw(S5, Address(SP, 1 * kWordSize)); | |
1640 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
1641 __ RestoreCodePointer(); | |
1642 __ LeaveStubFrame(); | |
1643 __ b(&done_stepping); | |
1644 } | |
1645 } | |
1646 | |
1647 | |
1648 // Use inline cache data array to invoke the target or continue in inline | |
1649 // cache miss handler. Stub for 1-argument check (receiver class). | |
1650 // RA: Return address. | |
1651 // S5: Inline cache data object. | |
1652 // Inline cache data object structure: | |
1653 // 0: function-name | |
1654 // 1: N, number of arguments checked. | |
1655 // 2 .. (length - 1): group of checks, each check containing: | |
1656 // - N classes. | |
1657 // - 1 target function. | |
1658 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { | |
1659 GenerateUsageCounterIncrement(assembler, T0); | |
1660 GenerateNArgsCheckInlineCacheStub( | |
1661 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | |
1662 } | |
1663 | |
1664 | |
1665 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { | |
1666 GenerateUsageCounterIncrement(assembler, T0); | |
1667 GenerateNArgsCheckInlineCacheStub(assembler, 2, | |
1668 kInlineCacheMissHandlerTwoArgsRuntimeEntry, | |
1669 Token::kILLEGAL); | |
1670 } | |
1671 | |
1672 | |
1673 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { | |
1674 GenerateUsageCounterIncrement(assembler, T0); | |
1675 GenerateNArgsCheckInlineCacheStub( | |
1676 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); | |
1677 } | |
1678 | |
1679 | |
1680 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { | |
1681 GenerateUsageCounterIncrement(assembler, T0); | |
1682 GenerateNArgsCheckInlineCacheStub( | |
1683 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); | |
1684 } | |
1685 | |
1686 | |
1687 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { | |
1688 GenerateUsageCounterIncrement(assembler, T0); | |
1689 GenerateNArgsCheckInlineCacheStub( | |
1690 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); | |
1691 } | |
1692 | |
1693 | |
1694 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( | |
1695 Assembler* assembler) { | |
1696 GenerateOptimizedUsageCounterIncrement(assembler); | |
1697 GenerateNArgsCheckInlineCacheStub(assembler, 1, | |
1698 kInlineCacheMissHandlerOneArgRuntimeEntry, | |
1699 Token::kILLEGAL, true /* optimized */); | |
1700 } | |
1701 | |
1702 | |
1703 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( | |
1704 Assembler* assembler) { | |
1705 GenerateOptimizedUsageCounterIncrement(assembler); | |
1706 GenerateNArgsCheckInlineCacheStub(assembler, 2, | |
1707 kInlineCacheMissHandlerTwoArgsRuntimeEntry, | |
1708 Token::kILLEGAL, true /* optimized */); | |
1709 } | |
1710 | |
1711 | |
1712 // Intermediary stub between a static call and its target. ICData contains | |
1713 // the target function and the call count. | |
1714 // S5: ICData | |
1715 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | |
1716 GenerateUsageCounterIncrement(assembler, T0); | |
1717 __ Comment("UnoptimizedStaticCallStub"); | |
1718 #if defined(DEBUG) | |
1719 { | |
1720 Label ok; | |
1721 // Check that the IC data array has NumArgsTested() == 0. | |
1722 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | |
1723 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | |
1724 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | |
1725 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | |
1726 __ beq(T0, ZR, &ok); | |
1727 __ Stop("Incorrect IC data for unoptimized static call"); | |
1728 __ Bind(&ok); | |
1729 } | |
1730 #endif // DEBUG | |
1731 | |
1732 // Check single stepping. | |
1733 Label stepping, done_stepping; | |
1734 if (FLAG_support_debugger) { | |
1735 __ LoadIsolate(T0); | |
1736 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
1737 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
1738 __ Bind(&done_stepping); | |
1739 } | |
1740 | |
1741 // S5: IC data object (preserved). | |
1742 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
1743 // T0: ic_data_array with entries: target functions and count. | |
1744 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
1745 // T0: points directly to the first ic data array element. | |
1746 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize; | |
1747 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize; | |
1748 | |
1749 if (FLAG_optimization_counter_threshold >= 0) { | |
1750 // Increment count for this call, ignore overflow. | |
1751 __ lw(T4, Address(T0, count_offset)); | |
1752 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
1753 __ sw(T4, Address(T0, count_offset)); | |
1754 } | |
1755 | |
1756 // Load arguments descriptor into S4. | |
1757 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
1758 | |
1759 // Get function and call it, if possible. | |
1760 __ lw(T0, Address(T0, target_offset)); | |
1761 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
1762 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | |
1763 __ jr(T4); | |
1764 | |
1765 // Call single step callback in debugger. | |
1766 if (FLAG_support_debugger) { | |
1767 __ Bind(&stepping); | |
1768 __ EnterStubFrame(); | |
1769 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1770 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | |
1771 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
1772 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
1773 __ lw(RA, Address(SP, 0 * kWordSize)); | |
1774 __ lw(S5, Address(SP, 1 * kWordSize)); | |
1775 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
1776 __ RestoreCodePointer(); | |
1777 __ LeaveStubFrame(); | |
1778 __ b(&done_stepping); | |
1779 } | |
1780 } | |
1781 | |
1782 | |
1783 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | |
1784 GenerateUsageCounterIncrement(assembler, T0); | |
1785 GenerateNArgsCheckInlineCacheStub( | |
1786 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | |
1787 } | |
1788 | |
1789 | |
1790 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { | |
1791 GenerateUsageCounterIncrement(assembler, T0); | |
1792 GenerateNArgsCheckInlineCacheStub( | |
1793 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | |
1794 } | |
1795 | |
1796 | |
1797 // Stub for compiling a function and jumping to the compiled code. | |
1798 // S5: IC-Data (for methods). | |
1799 // S4: Arguments descriptor. | |
1800 // T0: Function. | |
1801 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | |
1802 __ EnterStubFrame(); | |
1803 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
1804 __ sw(S5, Address(SP, 2 * kWordSize)); // Preserve IC data object. | |
1805 __ sw(S4, Address(SP, 1 * kWordSize)); // Preserve args descriptor array. | |
1806 __ sw(T0, Address(SP, 0 * kWordSize)); // Pass function. | |
1807 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); | |
1808 __ lw(T0, Address(SP, 0 * kWordSize)); // Restore function. | |
1809 __ lw(S4, Address(SP, 1 * kWordSize)); // Restore args descriptor array. | |
1810 __ lw(S5, Address(SP, 2 * kWordSize)); // Restore IC data array. | |
1811 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
1812 __ LeaveStubFrame(); | |
1813 | |
1814 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
1815 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
1816 __ jr(T2); | |
1817 } | |
1818 | |
1819 | |
1820 // S5: Contains an ICData. | |
1821 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { | |
1822 __ Comment("ICCallBreakpoint stub"); | |
1823 __ EnterStubFrame(); | |
1824 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1825 __ sw(S5, Address(SP, 1 * kWordSize)); | |
1826 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
1827 | |
1828 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | |
1829 | |
1830 __ lw(S5, Address(SP, 1 * kWordSize)); | |
1831 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
1832 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
1833 __ LeaveStubFrame(); | |
1834 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
1835 __ jr(T0); | |
1836 } | |
1837 | |
1838 | |
1839 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { | |
1840 __ Comment("RuntimeCallBreakpoint stub"); | |
1841 __ EnterStubFrame(); | |
1842 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
1843 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
1844 | |
1845 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | |
1846 | |
1847 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
1848 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
1849 __ LeaveStubFrame(); | |
1850 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
1851 __ jr(T0); | |
1852 } | |
1853 | |
1854 | |
1855 // Called only from unoptimized code. All relevant registers have been saved. | |
1856 // RA: return address. | |
1857 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { | |
1858 // Check single stepping. | |
1859 Label stepping, done_stepping; | |
1860 __ LoadIsolate(T0); | |
1861 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
1862 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
1863 __ Bind(&done_stepping); | |
1864 | |
1865 __ Ret(); | |
1866 | |
1867 // Call single step callback in debugger. | |
1868 __ Bind(&stepping); | |
1869 __ EnterStubFrame(); | |
1870 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
1871 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
1872 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
1873 __ lw(RA, Address(SP, 0 * kWordSize)); | |
1874 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
1875 __ LeaveStubFrame(); | |
1876 __ b(&done_stepping); | |
1877 } | |
1878 | |
1879 | |
1880 // Used to check class and type arguments. Arguments passed in registers: | |
1881 // RA: return address. | |
1882 // A0: instance (must be preserved). | |
1883 // A1: instantiator type arguments (only if n == 4, can be raw_null). | |
1884 // A2: function type arguments (only if n == 4, can be raw_null). | |
1885 // A3: SubtypeTestCache. | |
1886 // Result in V0: null -> not found, otherwise result (true or false). | |
1887 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { | |
1888 __ Comment("SubtypeNTestCacheStub"); | |
1889 ASSERT((n == 1) || (n == 2) || (n == 4)); | |
1890 if (n > 1) { | |
1891 __ LoadClass(T0, A0); | |
1892 // Compute instance type arguments into T1. | |
1893 Label has_no_type_arguments; | |
1894 __ LoadObject(T1, Object::null_object()); | |
1895 __ lw(T2, FieldAddress( | |
1896 T0, Class::type_arguments_field_offset_in_words_offset())); | |
1897 __ BranchEqual(T2, Immediate(Class::kNoTypeArguments), | |
1898 &has_no_type_arguments); | |
1899 __ sll(T2, T2, 2); | |
1900 __ addu(T2, A0, T2); // T2 <- A0 + T2 * 4 | |
1901 __ lw(T1, FieldAddress(T2, 0)); | |
1902 __ Bind(&has_no_type_arguments); | |
1903 } | |
1904 __ LoadClassId(T0, A0); | |
1905 // A0: instance. | |
1906 // A1: instantiator type arguments (only if n == 4, can be raw_null). | |
1907 // A2: function type arguments (only if n == 4, can be raw_null). | |
1908 // A3: SubtypeTestCache. | |
1909 // T0: instance class id. | |
1910 // T1: instance type arguments (null if none), used only if n > 1. | |
1911 __ lw(T2, FieldAddress(A3, SubtypeTestCache::cache_offset())); | |
1912 __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); | |
1913 | |
1914 __ LoadObject(T7, Object::null_object()); | |
1915 Label loop, found, not_found, next_iteration; | |
1916 // T0: instance class id. | |
1917 // T1: instance type arguments (still null if closure). | |
1918 // T2: Entry start. | |
1919 // T7: null. | |
1920 __ SmiTag(T0); | |
1921 __ BranchNotEqual(T0, Immediate(Smi::RawValue(kClosureCid)), &loop); | |
1922 __ lw(T1, FieldAddress(A0, Closure::function_type_arguments_offset())); | |
1923 __ bne(T1, T7, ¬_found); // Cache cannot be used for generic closures. | |
1924 __ lw(T1, FieldAddress(A0, Closure::instantiator_type_arguments_offset())); | |
1925 __ lw(T0, FieldAddress(A0, Closure::function_offset())); | |
1926 // T0: instance class id as Smi or function. | |
1927 __ Bind(&loop); | |
1928 __ lw(T3, | |
1929 Address(T2, kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction)); | |
1930 __ beq(T3, T7, ¬_found); | |
1931 if (n == 1) { | |
1932 __ beq(T3, T0, &found); | |
1933 } else { | |
1934 __ bne(T3, T0, &next_iteration); | |
1935 __ lw(T3, | |
1936 Address(T2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); | |
1937 if (n == 2) { | |
1938 __ beq(T3, T1, &found); | |
1939 } else { | |
1940 __ bne(T3, T1, &next_iteration); | |
1941 __ lw(T3, Address(T2, kWordSize * | |
1942 SubtypeTestCache::kInstantiatorTypeArguments)); | |
1943 __ bne(T3, A1, &next_iteration); | |
1944 __ lw(T3, | |
1945 Address(T2, kWordSize * SubtypeTestCache::kFunctionTypeArguments)); | |
1946 __ beq(T3, A2, &found); | |
1947 } | |
1948 } | |
1949 __ Bind(&next_iteration); | |
1950 __ b(&loop); | |
1951 __ delay_slot()->addiu( | |
1952 T2, T2, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength)); | |
1953 // Fall through to not found. | |
1954 __ Bind(¬_found); | |
1955 __ Ret(); | |
1956 __ delay_slot()->mov(V0, T7); | |
1957 | |
1958 __ Bind(&found); | |
1959 __ Ret(); | |
1960 __ delay_slot()->lw(V0, | |
1961 Address(T2, kWordSize * SubtypeTestCache::kTestResult)); | |
1962 } | |
1963 | |
1964 | |
1965 // Used to check class and type arguments. Arguments passed in registers: | |
1966 // RA: return address. | |
1967 // A0: instance (must be preserved). | |
1968 // A1: unused. | |
1969 // A2: unused. | |
1970 // A3: SubtypeTestCache. | |
1971 // Result in V0: null -> not found, otherwise result (true or false). | |
1972 void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) { | |
1973 GenerateSubtypeNTestCacheStub(assembler, 1); | |
1974 } | |
1975 | |
1976 | |
1977 // Used to check class and type arguments. Arguments passed in registers: | |
1978 // RA: return address. | |
1979 // A0: instance (must be preserved). | |
1980 // A1: unused. | |
1981 // A2: unused. | |
1982 // A3: SubtypeTestCache. | |
1983 // Result in V0: null -> not found, otherwise result (true or false). | |
1984 void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) { | |
1985 GenerateSubtypeNTestCacheStub(assembler, 2); | |
1986 } | |
1987 | |
1988 | |
1989 // Used to check class and type arguments. Arguments passed in registers: | |
1990 // RA: return address. | |
1991 // A0: instance (must be preserved). | |
1992 // A1: instantiator type arguments (can be raw_null). | |
1993 // A2: function type arguments (can be raw_null). | |
1994 // A3: SubtypeTestCache. | |
1995 // Result in V0: null -> not found, otherwise result (true or false). | |
1996 void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) { | |
1997 GenerateSubtypeNTestCacheStub(assembler, 4); | |
1998 } | |
1999 | |
2000 | |
2001 // Return the current stack pointer address, used to stack alignment | |
2002 // checks. | |
2003 void StubCode::GenerateGetStackPointerStub(Assembler* assembler) { | |
2004 __ Ret(); | |
2005 __ delay_slot()->mov(V0, SP); | |
2006 } | |
2007 | |
2008 | |
2009 // Jump to the exception or error handler. | |
2010 // RA: return address. | |
2011 // A0: program_counter. | |
2012 // A1: stack_pointer. | |
2013 // A2: frame_pointer. | |
2014 // A3: thread. | |
2015 // Does not return. | |
2016 void StubCode::GenerateJumpToFrameStub(Assembler* assembler) { | |
2017 ASSERT(kExceptionObjectReg == V0); | |
2018 ASSERT(kStackTraceObjectReg == V1); | |
2019 __ mov(FP, A2); // Frame_pointer. | |
2020 __ mov(THR, A3); // Thread. | |
2021 // Set tag. | |
2022 __ LoadImmediate(A2, VMTag::kDartTagId); | |
2023 __ sw(A2, Assembler::VMTagAddress()); | |
2024 // Clear top exit frame. | |
2025 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
2026 // Restore pool pointer. | |
2027 __ RestoreCodePointer(); | |
2028 __ LoadPoolPointer(); | |
2029 __ jr(A0); // Jump to the program counter. | |
2030 __ delay_slot()->mov(SP, A1); // Stack pointer. | |
2031 } | |
2032 | |
2033 | |
2034 // Run an exception handler. Execution comes from JumpToFrame | |
2035 // stub or from the simulator. | |
2036 // | |
2037 // The arguments are stored in the Thread object. | |
2038 // Does not return. | |
2039 void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) { | |
2040 __ lw(A0, Address(THR, Thread::resume_pc_offset())); | |
2041 __ LoadImmediate(A2, 0); | |
2042 | |
2043 // Load the exception from the current thread. | |
2044 Address exception_addr(THR, Thread::active_exception_offset()); | |
2045 __ lw(V0, exception_addr); | |
2046 __ sw(A2, exception_addr); | |
2047 | |
2048 // Load the stacktrace from the current thread. | |
2049 Address stacktrace_addr(THR, Thread::active_stacktrace_offset()); | |
2050 __ lw(V1, stacktrace_addr); | |
2051 | |
2052 __ jr(A0); // Jump to continuation point. | |
2053 __ delay_slot()->sw(A2, stacktrace_addr); | |
2054 } | |
2055 | |
2056 | |
2057 // Deoptimize a frame on the call stack before rewinding. | |
2058 // The arguments are stored in the Thread object. | |
2059 // No result. | |
2060 void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) { | |
2061 // Push zap value instead of CODE_REG. | |
2062 __ LoadImmediate(TMP, kZapCodeReg); | |
2063 __ Push(TMP); | |
2064 | |
2065 // Load the deopt pc into RA. | |
2066 __ lw(RA, Address(THR, Thread::resume_pc_offset())); | |
2067 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | |
2068 | |
2069 // After we have deoptimized, jump to the correct frame. | |
2070 __ EnterStubFrame(); | |
2071 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0); | |
2072 __ LeaveStubFrame(); | |
2073 __ break_(0); | |
2074 } | |
2075 | |
2076 | |
2077 // Calls to the runtime to optimize the given function. | |
2078 // T0: function to be reoptimized. | |
2079 // S4: argument descriptor (preserved). | |
2080 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { | |
2081 __ Comment("OptimizeFunctionStub"); | |
2082 __ EnterStubFrame(); | |
2083 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
2084 __ sw(S4, Address(SP, 2 * kWordSize)); | |
2085 // Setup space on stack for return value. | |
2086 __ sw(ZR, Address(SP, 1 * kWordSize)); | |
2087 __ sw(T0, Address(SP, 0 * kWordSize)); | |
2088 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | |
2089 __ Comment("OptimizeFunctionStub return"); | |
2090 __ lw(T0, Address(SP, 1 * kWordSize)); // Get Function object | |
2091 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. | |
2092 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. | |
2093 | |
2094 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
2095 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
2096 __ LeaveStubFrameAndReturn(T1); | |
2097 __ break_(0); | |
2098 } | |
2099 | |
2100 | |
2101 // Does identical check (object references are equal or not equal) with special | |
2102 // checks for boxed numbers. | |
2103 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
2104 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint | |
2105 // cannot contain a value that fits in Mint or Smi. | |
2106 static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler, | |
2107 const Register left, | |
2108 const Register right, | |
2109 const Register temp1, | |
2110 const Register temp2) { | |
2111 __ Comment("IdenticalWithNumberCheckStub"); | |
2112 Label reference_compare, done, check_mint, check_bigint; | |
2113 // If any of the arguments is Smi do reference compare. | |
2114 __ andi(temp1, left, Immediate(kSmiTagMask)); | |
2115 __ beq(temp1, ZR, &reference_compare); | |
2116 __ andi(temp1, right, Immediate(kSmiTagMask)); | |
2117 __ beq(temp1, ZR, &reference_compare); | |
2118 | |
2119 // Value compare for two doubles. | |
2120 __ LoadImmediate(temp1, kDoubleCid); | |
2121 __ LoadClassId(temp2, left); | |
2122 __ bne(temp1, temp2, &check_mint); | |
2123 __ LoadClassId(temp2, right); | |
2124 __ subu(CMPRES1, temp1, temp2); | |
2125 __ bne(CMPRES1, ZR, &done); | |
2126 | |
2127 // Double values bitwise compare. | |
2128 __ lw(temp1, FieldAddress(left, Double::value_offset() + 0 * kWordSize)); | |
2129 __ lw(temp2, FieldAddress(right, Double::value_offset() + 0 * kWordSize)); | |
2130 __ subu(CMPRES1, temp1, temp2); | |
2131 __ bne(CMPRES1, ZR, &done); | |
2132 __ lw(temp1, FieldAddress(left, Double::value_offset() + 1 * kWordSize)); | |
2133 __ lw(temp2, FieldAddress(right, Double::value_offset() + 1 * kWordSize)); | |
2134 __ b(&done); | |
2135 __ delay_slot()->subu(CMPRES1, temp1, temp2); | |
2136 | |
2137 __ Bind(&check_mint); | |
2138 __ LoadImmediate(temp1, kMintCid); | |
2139 __ LoadClassId(temp2, left); | |
2140 __ bne(temp1, temp2, &check_bigint); | |
2141 __ LoadClassId(temp2, right); | |
2142 __ subu(CMPRES1, temp1, temp2); | |
2143 __ bne(CMPRES1, ZR, &done); | |
2144 | |
2145 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 0 * kWordSize)); | |
2146 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 0 * kWordSize)); | |
2147 __ subu(CMPRES1, temp1, temp2); | |
2148 __ bne(CMPRES1, ZR, &done); | |
2149 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 1 * kWordSize)); | |
2150 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 1 * kWordSize)); | |
2151 __ b(&done); | |
2152 __ delay_slot()->subu(CMPRES1, temp1, temp2); | |
2153 | |
2154 __ Bind(&check_bigint); | |
2155 __ LoadImmediate(temp1, kBigintCid); | |
2156 __ LoadClassId(temp2, left); | |
2157 __ bne(temp1, temp2, &reference_compare); | |
2158 __ LoadClassId(temp2, right); | |
2159 __ subu(CMPRES1, temp1, temp2); | |
2160 __ bne(CMPRES1, ZR, &done); | |
2161 | |
2162 __ EnterStubFrame(); | |
2163 __ ReserveAlignedFrameSpace(2 * kWordSize); | |
2164 __ sw(left, Address(SP, 1 * kWordSize)); | |
2165 __ sw(right, Address(SP, 0 * kWordSize)); | |
2166 __ mov(A0, left); | |
2167 __ mov(A1, right); | |
2168 __ CallRuntime(kBigintCompareRuntimeEntry, 2); | |
2169 __ Comment("IdenticalWithNumberCheckStub return"); | |
2170 // Result in V0, 0 means equal. | |
2171 __ LeaveStubFrame(); | |
2172 __ b(&done); | |
2173 __ delay_slot()->mov(CMPRES1, V0); | |
2174 | |
2175 __ Bind(&reference_compare); | |
2176 __ subu(CMPRES1, left, right); | |
2177 __ Bind(&done); | |
2178 // A branch or test after this comparison will check CMPRES1 == ZR. | |
2179 } | |
2180 | |
2181 | |
2182 // Called only from unoptimized code. All relevant registers have been saved. | |
2183 // RA: return address. | |
2184 // SP + 4: left operand. | |
2185 // SP + 0: right operand. | |
2186 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
2187 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( | |
2188 Assembler* assembler) { | |
2189 // Check single stepping. | |
2190 Label stepping, done_stepping; | |
2191 if (FLAG_support_debugger) { | |
2192 __ LoadIsolate(T0); | |
2193 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
2194 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
2195 __ Bind(&done_stepping); | |
2196 } | |
2197 | |
2198 const Register temp1 = T2; | |
2199 const Register temp2 = T3; | |
2200 const Register left = T1; | |
2201 const Register right = T0; | |
2202 __ lw(left, Address(SP, 1 * kWordSize)); | |
2203 __ lw(right, Address(SP, 0 * kWordSize)); | |
2204 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); | |
2205 __ Ret(); | |
2206 | |
2207 // Call single step callback in debugger. | |
2208 if (FLAG_support_debugger) { | |
2209 __ Bind(&stepping); | |
2210 __ EnterStubFrame(); | |
2211 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
2212 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
2213 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
2214 __ lw(RA, Address(SP, 0 * kWordSize)); | |
2215 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
2216 __ RestoreCodePointer(); | |
2217 __ LeaveStubFrame(); | |
2218 __ b(&done_stepping); | |
2219 } | |
2220 } | |
2221 | |
2222 | |
2223 // Called from optimized code only. | |
2224 // SP + 4: left operand. | |
2225 // SP + 0: right operand. | |
2226 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
2227 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( | |
2228 Assembler* assembler) { | |
2229 const Register temp1 = T2; | |
2230 const Register temp2 = T3; | |
2231 const Register left = T1; | |
2232 const Register right = T0; | |
2233 __ lw(left, Address(SP, 1 * kWordSize)); | |
2234 __ lw(right, Address(SP, 0 * kWordSize)); | |
2235 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); | |
2236 __ Ret(); | |
2237 } | |
2238 | |
2239 | |
2240 // Called from megamorphic calls. | |
2241 // T0: receiver | |
2242 // S5: MegamorphicCache (preserved) | |
2243 // Passed to target: | |
2244 // CODE_REG: target Code object | |
2245 // S4: arguments descriptor | |
2246 void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) { | |
2247 __ LoadTaggedClassIdMayBeSmi(T0, T0); | |
2248 // T0: class ID of the receiver (smi). | |
2249 __ lw(S4, FieldAddress(S5, MegamorphicCache::arguments_descriptor_offset())); | |
2250 __ lw(T2, FieldAddress(S5, MegamorphicCache::buckets_offset())); | |
2251 __ lw(T1, FieldAddress(S5, MegamorphicCache::mask_offset())); | |
2252 // T2: cache buckets array. | |
2253 // T1: mask. | |
2254 __ LoadImmediate(TMP, MegamorphicCache::kSpreadFactor); | |
2255 __ mult(TMP, T0); | |
2256 __ mflo(T3); | |
2257 // T3: probe. | |
2258 | |
2259 Label loop, update, call_target_function; | |
2260 __ b(&loop); | |
2261 | |
2262 __ Bind(&update); | |
2263 __ addiu(T3, T3, Immediate(Smi::RawValue(1))); | |
2264 __ Bind(&loop); | |
2265 __ and_(T3, T3, T1); | |
2266 const intptr_t base = Array::data_offset(); | |
2267 // T3 is smi tagged, but table entries are two words, so LSL 2. | |
2268 __ sll(TMP, T3, 2); | |
2269 __ addu(TMP, T2, TMP); | |
2270 __ lw(T4, FieldAddress(TMP, base)); | |
2271 | |
2272 ASSERT(kIllegalCid == 0); | |
2273 __ beq(T4, ZR, &call_target_function); | |
2274 __ bne(T4, T0, &update); | |
2275 | |
2276 __ Bind(&call_target_function); | |
2277 // Call the target found in the cache. For a class id match, this is a | |
2278 // proper target for the given name and arguments descriptor. If the | |
2279 // illegal class id was found, the target is a cache miss handler that can | |
2280 // be invoked as a normal Dart function. | |
2281 __ sll(T1, T3, 2); | |
2282 __ addu(T1, T2, T1); | |
2283 __ lw(T0, FieldAddress(T1, base + kWordSize)); | |
2284 | |
2285 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
2286 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
2287 __ jr(T1); | |
2288 } | |
2289 | |
2290 | |
2291 // Called from switchable IC calls. | |
2292 // T0: receiver | |
2293 // S5: ICData (preserved) | |
2294 // Passed to target: | |
2295 // CODE_REG: target Code object | |
2296 // S4: arguments descriptor | |
2297 void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) { | |
2298 Label loop, found, miss; | |
2299 __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); | |
2300 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
2301 __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); | |
2302 // T6: first IC entry. | |
2303 __ LoadTaggedClassIdMayBeSmi(T1, T0); | |
2304 // T1: receiver cid as Smi | |
2305 | |
2306 __ Bind(&loop); | |
2307 __ lw(T2, Address(T6, 0)); | |
2308 __ beq(T1, T2, &found); | |
2309 ASSERT(Smi::RawValue(kIllegalCid) == 0); | |
2310 __ beq(T2, ZR, &miss); | |
2311 | |
2312 const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; | |
2313 __ AddImmediate(T6, entry_length); // Next entry. | |
2314 __ b(&loop); | |
2315 | |
2316 __ Bind(&found); | |
2317 const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize; | |
2318 __ lw(T0, Address(T6, target_offset)); | |
2319 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
2320 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
2321 __ jr(T1); | |
2322 | |
2323 __ Bind(&miss); | |
2324 __ LoadIsolate(T2); | |
2325 __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); | |
2326 __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
2327 __ jr(T1); | |
2328 } | |
2329 | |
2330 | |
2331 void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) { | |
2332 Label loop, found, miss; | |
2333 __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); | |
2334 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
2335 __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); | |
2336 // T6: first IC entry. | |
2337 __ LoadTaggedClassIdMayBeSmi(T1, T0); | |
2338 // T1: receiver cid as Smi | |
2339 | |
2340 __ Bind(&loop); | |
2341 __ lw(T2, Address(T6, 0)); | |
2342 __ beq(T1, T2, &found); | |
2343 ASSERT(Smi::RawValue(kIllegalCid) == 0); | |
2344 __ beq(T2, ZR, &miss); | |
2345 | |
2346 const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; | |
2347 __ AddImmediate(T6, entry_length); // Next entry. | |
2348 __ b(&loop); | |
2349 | |
2350 __ Bind(&found); | |
2351 const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize; | |
2352 const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize; | |
2353 __ lw(T1, Address(T6, entry_offset)); | |
2354 __ lw(CODE_REG, Address(T6, code_offset)); | |
2355 __ jr(T1); | |
2356 | |
2357 __ Bind(&miss); | |
2358 __ LoadIsolate(T2); | |
2359 __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); | |
2360 __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
2361 __ jr(T1); | |
2362 } | |
2363 | |
2364 | |
2365 // Called from switchable IC calls. | |
2366 // T0: receiver | |
2367 // S5: SingleTargetCache | |
2368 void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) { | |
2369 __ EnterStubFrame(); | |
2370 __ Push(T0); // Preserve receiver. | |
2371 | |
2372 __ Push(ZR); // Result slot. | |
2373 __ Push(T0); // Arg0: Receiver | |
2374 __ Push(S5); // Arg1: UnlinkedCall | |
2375 __ CallRuntime(kUnlinkedCallRuntimeEntry, 2); | |
2376 __ Drop(2); | |
2377 __ Pop(S5); // result = IC | |
2378 | |
2379 __ Pop(T0); // Restore receiver. | |
2380 __ LeaveStubFrame(); | |
2381 | |
2382 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
2383 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
2384 __ jr(T1); | |
2385 } | |
2386 | |
2387 | |
2388 // Called from switchable IC calls. | |
2389 // T0: receiver | |
2390 // S5: SingleTargetCache | |
2391 // Passed to target: | |
2392 // CODE_REG: target Code object | |
2393 void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) { | |
2394 Label miss; | |
2395 __ LoadClassIdMayBeSmi(T1, T0); | |
2396 __ lhu(T2, FieldAddress(S5, SingleTargetCache::lower_limit_offset())); | |
2397 __ lhu(T3, FieldAddress(S5, SingleTargetCache::upper_limit_offset())); | |
2398 | |
2399 __ BranchUnsignedLess(T1, T2, &miss); | |
2400 __ BranchUnsignedGreater(T1, T3, &miss); | |
2401 | |
2402 __ lw(T1, FieldAddress(S5, SingleTargetCache::entry_point_offset())); | |
2403 __ lw(CODE_REG, FieldAddress(S5, SingleTargetCache::target_offset())); | |
2404 __ jr(T1); | |
2405 | |
2406 __ Bind(&miss); | |
2407 __ EnterStubFrame(); | |
2408 __ Push(T0); // Preserve receiver. | |
2409 | |
2410 __ Push(ZR); // Result slot. | |
2411 __ Push(T0); // Arg0: Receiver | |
2412 __ CallRuntime(kSingleTargetMissRuntimeEntry, 1); | |
2413 __ Drop(1); | |
2414 __ Pop(S5); // result = IC | |
2415 | |
2416 __ Pop(T0); // Restore receiver. | |
2417 __ LeaveStubFrame(); | |
2418 | |
2419 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
2420 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
2421 __ jr(T1); | |
2422 } | |
2423 | |
2424 | |
2425 // Called from the monomorphic checked entry. | |
2426 // T0: receiver | |
2427 void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) { | |
2428 __ lw(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset())); | |
2429 __ EnterStubFrame(); | |
2430 __ Push(T0); // Preserve receiver. | |
2431 | |
2432 __ Push(ZR); // Result slot. | |
2433 __ Push(T0); // Arg0: Receiver | |
2434 __ CallRuntime(kMonomorphicMissRuntimeEntry, 1); | |
2435 __ Drop(1); | |
2436 __ Pop(S5); // result = IC | |
2437 | |
2438 __ Pop(T0); // Restore receiver. | |
2439 __ LeaveStubFrame(); | |
2440 | |
2441 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
2442 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
2443 __ jr(T1); | |
2444 } | |
2445 | |
2446 | |
2447 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { | |
2448 __ break_(0); | |
2449 } | |
2450 | |
2451 | |
2452 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { | |
2453 __ break_(0); | |
2454 } | |
2455 | |
2456 } // namespace dart | |
2457 | |
2458 #endif // defined TARGET_ARCH_MIPS | |
OLD | NEW |