OLD | NEW |
| (Empty) |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE file. | |
4 | |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. | |
6 #if defined(TARGET_ARCH_MIPS) | |
7 | |
8 #include "vm/flow_graph_compiler.h" | |
9 | |
10 #include "vm/ast_printer.h" | |
11 #include "vm/compiler.h" | |
12 #include "vm/dart_entry.h" | |
13 #include "vm/deopt_instructions.h" | |
14 #include "vm/il_printer.h" | |
15 #include "vm/instructions.h" | |
16 #include "vm/locations.h" | |
17 #include "vm/object_store.h" | |
18 #include "vm/parser.h" | |
19 #include "vm/stack_frame.h" | |
20 #include "vm/stub_code.h" | |
21 #include "vm/symbols.h" | |
22 | |
23 namespace dart { | |
24 | |
25 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); | |
26 | |
27 | |
28 FlowGraphCompiler::~FlowGraphCompiler() { | |
29 // BlockInfos are zone-allocated, so their destructors are not called. | |
30 // Verify the labels explicitly here. | |
31 for (int i = 0; i < block_info_.length(); ++i) { | |
32 ASSERT(!block_info_[i]->jump_label()->IsLinked()); | |
33 } | |
34 } | |
35 | |
36 | |
37 bool FlowGraphCompiler::SupportsUnboxedDoubles() { | |
38 return true; | |
39 } | |
40 | |
41 | |
42 bool FlowGraphCompiler::SupportsUnboxedMints() { | |
43 return true; | |
44 } | |
45 | |
46 | |
47 bool FlowGraphCompiler::SupportsUnboxedSimd128() { | |
48 return false; | |
49 } | |
50 | |
51 | |
52 bool FlowGraphCompiler::SupportsHardwareDivision() { | |
53 return true; | |
54 } | |
55 | |
56 | |
57 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { | |
58 // TODO(johnmccutchan): Investigate possibility on MIPS once | |
59 // mints are implemented there. | |
60 return false; | |
61 } | |
62 | |
63 | |
64 void FlowGraphCompiler::EnterIntrinsicMode() { | |
65 ASSERT(!intrinsic_mode()); | |
66 intrinsic_mode_ = true; | |
67 assembler()->set_constant_pool_allowed(false); | |
68 } | |
69 | |
70 | |
71 void FlowGraphCompiler::ExitIntrinsicMode() { | |
72 ASSERT(intrinsic_mode()); | |
73 intrinsic_mode_ = false; | |
74 assembler()->set_constant_pool_allowed(true); | |
75 } | |
76 | |
77 | |
78 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | |
79 DeoptInfoBuilder* builder, | |
80 const Array& deopt_table) { | |
81 if (deopt_env_ == NULL) { | |
82 ++builder->current_info_number_; | |
83 return TypedData::null(); | |
84 } | |
85 | |
86 intptr_t stack_height = compiler->StackSize(); | |
87 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); | |
88 | |
89 intptr_t slot_ix = 0; | |
90 Environment* current = deopt_env_; | |
91 | |
92 // Emit all kMaterializeObject instructions describing objects to be | |
93 // materialized on the deoptimization as a prefix to the deoptimization info. | |
94 EmitMaterializations(deopt_env_, builder); | |
95 | |
96 // The real frame starts here. | |
97 builder->MarkFrameStart(); | |
98 | |
99 Zone* zone = compiler->zone(); | |
100 | |
101 builder->AddPp(current->function(), slot_ix++); | |
102 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++); | |
103 builder->AddCallerFp(slot_ix++); | |
104 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); | |
105 | |
106 | |
107 // Emit all values that are needed for materialization as a part of the | |
108 // expression stack for the bottom-most frame. This guarantees that GC | |
109 // will be able to find them during materialization. | |
110 slot_ix = builder->EmitMaterializationArguments(slot_ix); | |
111 | |
112 // For the innermost environment, set outgoing arguments and the locals. | |
113 for (intptr_t i = current->Length() - 1; | |
114 i >= current->fixed_parameter_count(); i--) { | |
115 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); | |
116 } | |
117 | |
118 Environment* previous = current; | |
119 current = current->outer(); | |
120 while (current != NULL) { | |
121 builder->AddPp(current->function(), slot_ix++); | |
122 builder->AddPcMarker(previous->function(), slot_ix++); | |
123 builder->AddCallerFp(slot_ix++); | |
124 | |
125 // For any outer environment the deopt id is that of the call instruction | |
126 // which is recorded in the outer environment. | |
127 builder->AddReturnAddress(current->function(), | |
128 Thread::ToDeoptAfter(current->deopt_id()), | |
129 slot_ix++); | |
130 | |
131 // The values of outgoing arguments can be changed from the inlined call so | |
132 // we must read them from the previous environment. | |
133 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | |
134 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), | |
135 slot_ix++); | |
136 } | |
137 | |
138 // Set the locals, note that outgoing arguments are not in the environment. | |
139 for (intptr_t i = current->Length() - 1; | |
140 i >= current->fixed_parameter_count(); i--) { | |
141 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); | |
142 } | |
143 | |
144 // Iterate on the outer environment. | |
145 previous = current; | |
146 current = current->outer(); | |
147 } | |
148 // The previous pointer is now the outermost environment. | |
149 ASSERT(previous != NULL); | |
150 | |
151 // Set slots for the outermost environment. | |
152 builder->AddCallerPp(slot_ix++); | |
153 builder->AddPcMarker(previous->function(), slot_ix++); | |
154 builder->AddCallerFp(slot_ix++); | |
155 builder->AddCallerPc(slot_ix++); | |
156 | |
157 // For the outermost environment, set the incoming arguments. | |
158 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | |
159 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); | |
160 } | |
161 | |
162 return builder->CreateDeoptInfo(deopt_table); | |
163 } | |
164 | |
165 | |
166 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | |
167 intptr_t stub_ix) { | |
168 // Calls do not need stubs, they share a deoptimization trampoline. | |
169 ASSERT(reason() != ICData::kDeoptAtCall); | |
170 Assembler* assembler = compiler->assembler(); | |
171 #define __ assembler-> | |
172 __ Comment("%s", Name()); | |
173 __ Bind(entry_label()); | |
174 if (FLAG_trap_on_deoptimization) { | |
175 __ break_(0); | |
176 } | |
177 | |
178 ASSERT(deopt_env() != NULL); | |
179 __ Push(CODE_REG); | |
180 __ BranchLink(*StubCode::Deoptimize_entry()); | |
181 set_pc_offset(assembler->CodeSize()); | |
182 #undef __ | |
183 } | |
184 | |
185 | |
186 #define __ assembler()-> | |
187 | |
188 | |
189 // Fall through if bool_register contains null. | |
190 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, | |
191 Label* is_true, | |
192 Label* is_false) { | |
193 __ Comment("BoolToJump"); | |
194 Label fall_through; | |
195 __ BranchEqual(bool_register, Object::null_object(), &fall_through); | |
196 __ BranchEqual(bool_register, Bool::True(), is_true); | |
197 __ b(is_false); | |
198 __ Bind(&fall_through); | |
199 } | |
200 | |
201 | |
202 // A0: instance (must be preserved). | |
203 // A1: instantiator type arguments (if used). | |
204 // A2: function type arguments (if used). | |
205 // Clobbers A3. | |
206 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( | |
207 TypeTestStubKind test_kind, | |
208 Register instance_reg, | |
209 Register instantiator_type_arguments_reg, | |
210 Register function_type_arguments_reg, | |
211 Register temp_reg, | |
212 Label* is_instance_lbl, | |
213 Label* is_not_instance_lbl) { | |
214 __ Comment("CallSubtypeTestStub"); | |
215 ASSERT(instance_reg == A0); | |
216 ASSERT(temp_reg == kNoRegister); // Unused on MIPS. | |
217 const SubtypeTestCache& type_test_cache = | |
218 SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New()); | |
219 __ LoadUniqueObject(A3, type_test_cache); | |
220 if (test_kind == kTestTypeOneArg) { | |
221 ASSERT(instantiator_type_arguments_reg == kNoRegister); | |
222 ASSERT(function_type_arguments_reg == kNoRegister); | |
223 __ BranchLink(*StubCode::Subtype1TestCache_entry()); | |
224 } else if (test_kind == kTestTypeTwoArgs) { | |
225 ASSERT(instantiator_type_arguments_reg == kNoRegister); | |
226 ASSERT(function_type_arguments_reg == kNoRegister); | |
227 __ BranchLink(*StubCode::Subtype2TestCache_entry()); | |
228 } else if (test_kind == kTestTypeFourArgs) { | |
229 ASSERT(instantiator_type_arguments_reg == A1); | |
230 ASSERT(function_type_arguments_reg == A2); | |
231 __ BranchLink(*StubCode::Subtype4TestCache_entry()); | |
232 } else { | |
233 UNREACHABLE(); | |
234 } | |
235 // Result is in V0: null -> not found, otherwise Bool::True or Bool::False. | |
236 GenerateBoolToJump(V0, is_instance_lbl, is_not_instance_lbl); | |
237 return type_test_cache.raw(); | |
238 } | |
239 | |
240 | |
241 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if | |
242 // type test is conclusive, otherwise fallthrough if a type test could not | |
243 // be completed. | |
244 // A0: instance being type checked (preserved). | |
245 // Clobbers T0. | |
246 RawSubtypeTestCache* | |
247 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( | |
248 TokenPosition token_pos, | |
249 const AbstractType& type, | |
250 Label* is_instance_lbl, | |
251 Label* is_not_instance_lbl) { | |
252 __ Comment("InstantiatedTypeWithArgumentsTest"); | |
253 ASSERT(type.IsInstantiated()); | |
254 const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); | |
255 ASSERT(type.IsFunctionType() || (type_class.NumTypeArguments() > 0)); | |
256 const Register kInstanceReg = A0; | |
257 Error& bound_error = Error::Handle(zone()); | |
258 const Type& int_type = Type::Handle(zone(), Type::IntType()); | |
259 const bool smi_is_ok = | |
260 int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld); | |
261 // Malformed type should have been handled at graph construction time. | |
262 ASSERT(smi_is_ok || bound_error.IsNull()); | |
263 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask)); | |
264 if (smi_is_ok) { | |
265 __ beq(CMPRES1, ZR, is_instance_lbl); | |
266 } else { | |
267 __ beq(CMPRES1, ZR, is_not_instance_lbl); | |
268 } | |
269 // A function type test requires checking the function signature. | |
270 if (!type.IsFunctionType()) { | |
271 const intptr_t num_type_args = type_class.NumTypeArguments(); | |
272 const intptr_t num_type_params = type_class.NumTypeParameters(); | |
273 const intptr_t from_index = num_type_args - num_type_params; | |
274 const TypeArguments& type_arguments = | |
275 TypeArguments::ZoneHandle(zone(), type.arguments()); | |
276 const bool is_raw_type = type_arguments.IsNull() || | |
277 type_arguments.IsRaw(from_index, num_type_params); | |
278 if (is_raw_type) { | |
279 const Register kClassIdReg = T0; | |
280 // dynamic type argument, check only classes. | |
281 __ LoadClassId(kClassIdReg, kInstanceReg); | |
282 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); | |
283 // List is a very common case. | |
284 if (IsListClass(type_class)) { | |
285 GenerateListTypeCheck(kClassIdReg, is_instance_lbl); | |
286 } | |
287 return GenerateSubtype1TestCacheLookup( | |
288 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | |
289 } | |
290 // If one type argument only, check if type argument is Object or dynamic. | |
291 if (type_arguments.Length() == 1) { | |
292 const AbstractType& tp_argument = | |
293 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0)); | |
294 ASSERT(!tp_argument.IsMalformed()); | |
295 if (tp_argument.IsType()) { | |
296 ASSERT(tp_argument.HasResolvedTypeClass()); | |
297 // Check if type argument is dynamic or Object. | |
298 const Type& object_type = Type::Handle(zone(), Type::ObjectType()); | |
299 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) { | |
300 // Instance class test only necessary. | |
301 return GenerateSubtype1TestCacheLookup( | |
302 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | |
303 } | |
304 } | |
305 } | |
306 } | |
307 // Regular subtype test cache involving instance's type arguments. | |
308 const Register kInstantiatorTypeArgumentsReg = kNoRegister; | |
309 const Register kFunctionTypeArgumentsReg = kNoRegister; | |
310 const Register kTempReg = kNoRegister; | |
311 // A0: instance (must be preserved). | |
312 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, | |
313 kInstantiatorTypeArgumentsReg, | |
314 kFunctionTypeArgumentsReg, kTempReg, | |
315 is_instance_lbl, is_not_instance_lbl); | |
316 } | |
317 | |
318 | |
319 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, | |
320 const GrowableArray<intptr_t>& class_ids, | |
321 Label* is_equal_lbl, | |
322 Label* is_not_equal_lbl) { | |
323 __ Comment("CheckClassIds"); | |
324 for (intptr_t i = 0; i < class_ids.length(); i++) { | |
325 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl); | |
326 } | |
327 __ b(is_not_equal_lbl); | |
328 } | |
329 | |
330 | |
331 // Testing against an instantiated type with no arguments, without | |
332 // SubtypeTestCache. | |
333 // A0: instance being type checked (preserved). | |
334 // Clobbers: T0, T1, T2 | |
335 // Returns true if there is a fallthrough. | |
336 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( | |
337 TokenPosition token_pos, | |
338 const AbstractType& type, | |
339 Label* is_instance_lbl, | |
340 Label* is_not_instance_lbl) { | |
341 __ Comment("InstantiatedTypeNoArgumentsTest"); | |
342 ASSERT(type.IsInstantiated()); | |
343 if (type.IsFunctionType()) { | |
344 // Fallthrough. | |
345 return true; | |
346 } | |
347 const Class& type_class = Class::Handle(zone(), type.type_class()); | |
348 ASSERT(type_class.NumTypeArguments() == 0); | |
349 | |
350 const Register kInstanceReg = A0; | |
351 __ andi(T0, A0, Immediate(kSmiTagMask)); | |
352 // If instance is Smi, check directly. | |
353 const Class& smi_class = Class::Handle(zone(), Smi::Class()); | |
354 if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class, | |
355 Object::null_type_arguments(), NULL, NULL, | |
356 Heap::kOld)) { | |
357 __ beq(T0, ZR, is_instance_lbl); | |
358 } else { | |
359 __ beq(T0, ZR, is_not_instance_lbl); | |
360 } | |
361 const Register kClassIdReg = T0; | |
362 __ LoadClassId(kClassIdReg, kInstanceReg); | |
363 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted | |
364 // interfaces. | |
365 // Bool interface can be implemented only by core class Bool. | |
366 if (type.IsBoolType()) { | |
367 __ BranchEqual(kClassIdReg, Immediate(kBoolCid), is_instance_lbl); | |
368 __ b(is_not_instance_lbl); | |
369 return false; | |
370 } | |
371 // Custom checking for numbers (Smi, Mint, Bigint and Double). | |
372 // Note that instance is not Smi (checked above). | |
373 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { | |
374 GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl, | |
375 is_not_instance_lbl); | |
376 return false; | |
377 } | |
378 if (type.IsStringType()) { | |
379 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); | |
380 return false; | |
381 } | |
382 if (type.IsDartFunctionType()) { | |
383 // Check if instance is a closure. | |
384 __ BranchEqual(kClassIdReg, Immediate(kClosureCid), is_instance_lbl); | |
385 return true; // Fall through | |
386 } | |
387 // Compare if the classes are equal. | |
388 if (!type_class.is_abstract()) { | |
389 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); | |
390 } | |
391 // Otherwise fallthrough. | |
392 return true; | |
393 } | |
394 | |
395 | |
396 // Uses SubtypeTestCache to store instance class and result. | |
397 // A0: instance to test. | |
398 // Clobbers A1-A3, T0-T3. | |
399 // Immediate class test already done. | |
400 // TODO(srdjan): Implement a quicker subtype check, as type test | |
401 // arrays can grow too high, but they may be useful when optimizing | |
402 // code (type-feedback). | |
403 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( | |
404 TokenPosition token_pos, | |
405 const Class& type_class, | |
406 Label* is_instance_lbl, | |
407 Label* is_not_instance_lbl) { | |
408 __ Comment("Subtype1TestCacheLookup"); | |
409 const Register kInstanceReg = A0; | |
410 __ LoadClass(T0, kInstanceReg); | |
411 // T0: instance class. | |
412 // Check immediate superclass equality. | |
413 __ lw(T0, FieldAddress(T0, Class::super_type_offset())); | |
414 __ lw(T0, FieldAddress(T0, Type::type_class_id_offset())); | |
415 __ BranchEqual(T0, Immediate(Smi::RawValue(type_class.id())), | |
416 is_instance_lbl); | |
417 | |
418 const Register kInstantiatorTypeArgumentsReg = kNoRegister; | |
419 const Register kFunctionTypeArgumentsReg = kNoRegister; | |
420 const Register kTempReg = kNoRegister; | |
421 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, | |
422 kInstantiatorTypeArgumentsReg, | |
423 kFunctionTypeArgumentsReg, kTempReg, | |
424 is_instance_lbl, is_not_instance_lbl); | |
425 } | |
426 | |
427 | |
428 // Generates inlined check if 'type' is a type parameter or type itself | |
429 // A0: instance (preserved). | |
430 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( | |
431 TokenPosition token_pos, | |
432 const AbstractType& type, | |
433 Label* is_instance_lbl, | |
434 Label* is_not_instance_lbl) { | |
435 __ Comment("UninstantiatedTypeTest"); | |
436 ASSERT(!type.IsInstantiated()); | |
437 // Skip check if destination is a dynamic type. | |
438 if (type.IsTypeParameter()) { | |
439 const TypeParameter& type_param = TypeParameter::Cast(type); | |
440 __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args. | |
441 __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args. | |
442 // A1: instantiator type arguments. | |
443 // A2: function type arguments. | |
444 const Register kTypeArgumentsReg = | |
445 type_param.IsClassTypeParameter() ? A1 : A2; | |
446 // Check if type arguments are null, i.e. equivalent to vector of dynamic. | |
447 __ LoadObject(T7, Object::null_object()); | |
448 __ beq(kTypeArgumentsReg, T7, is_instance_lbl); | |
449 __ lw(T2, FieldAddress(kTypeArgumentsReg, | |
450 TypeArguments::type_at_offset(type_param.index()))); | |
451 // T2: concrete type of type. | |
452 // Check if type argument is dynamic. | |
453 __ BranchEqual(T2, Object::dynamic_type(), is_instance_lbl); | |
454 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::ObjectType()), | |
455 is_instance_lbl); | |
456 // TODO(regis): Optimize void type as well once allowed as type argument. | |
457 | |
458 // For Smi check quickly against int and num interfaces. | |
459 Label not_smi; | |
460 __ andi(CMPRES1, A0, Immediate(kSmiTagMask)); | |
461 __ bne(CMPRES1, ZR, ¬_smi); // Value is Smi? | |
462 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::IntType()), | |
463 is_instance_lbl); | |
464 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::Number()), | |
465 is_instance_lbl); | |
466 // Smi must be handled in runtime. | |
467 Label fall_through; | |
468 __ b(&fall_through); | |
469 | |
470 __ Bind(¬_smi); | |
471 // A0: instance. | |
472 // A1: instantiator type arguments. | |
473 // A2: function type arguments. | |
474 const Register kInstanceReg = A0; | |
475 const Register kInstantiatorTypeArgumentsReg = A1; | |
476 const Register kFunctionTypeArgumentsReg = A2; | |
477 const Register kTempReg = kNoRegister; | |
478 const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle( | |
479 zone(), GenerateCallSubtypeTestStub( | |
480 kTestTypeFourArgs, kInstanceReg, | |
481 kInstantiatorTypeArgumentsReg, kFunctionTypeArgumentsReg, | |
482 kTempReg, is_instance_lbl, is_not_instance_lbl)); | |
483 __ Bind(&fall_through); | |
484 return type_test_cache.raw(); | |
485 } | |
486 if (type.IsType()) { | |
487 const Register kInstanceReg = A0; | |
488 const Register kInstantiatorTypeArgumentsReg = A1; | |
489 const Register kFunctionTypeArgumentsReg = A2; | |
490 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask)); | |
491 __ beq(CMPRES1, ZR, is_not_instance_lbl); // Is instance Smi? | |
492 __ lw(kInstantiatorTypeArgumentsReg, Address(SP, 1 * kWordSize)); | |
493 __ lw(kFunctionTypeArgumentsReg, Address(SP, 0 * kWordSize)); | |
494 // Uninstantiated type class is known at compile time, but the type | |
495 // arguments are determined at runtime by the instantiator. | |
496 const Register kTempReg = kNoRegister; | |
497 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg, | |
498 kInstantiatorTypeArgumentsReg, | |
499 kFunctionTypeArgumentsReg, kTempReg, | |
500 is_instance_lbl, is_not_instance_lbl); | |
501 } | |
502 return SubtypeTestCache::null(); | |
503 } | |
504 | |
505 | |
506 // Inputs: | |
507 // - A0: instance being type checked (preserved). | |
508 // - A1: optional instantiator type arguments (preserved). | |
509 // - A2: optional function type arguments (preserved). | |
510 // Returns: | |
511 // - preserved instance in A0, optional instantiator type arguments in A1, and | |
512 // optional function type arguments in A2. | |
513 // Clobbers: T0, T1, T2 | |
514 // Note that this inlined code must be followed by the runtime_call code, as it | |
515 // may fall through to it. Otherwise, this inline code will jump to the label | |
516 // is_instance or to the label is_not_instance. | |
517 RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof( | |
518 TokenPosition token_pos, | |
519 const AbstractType& type, | |
520 Label* is_instance_lbl, | |
521 Label* is_not_instance_lbl) { | |
522 __ Comment("InlineInstanceof"); | |
523 if (type.IsInstantiated()) { | |
524 const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); | |
525 // A class equality check is only applicable with a dst type (not a | |
526 // function type) of a non-parameterized class or with a raw dst type of | |
527 // a parameterized class. | |
528 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) { | |
529 return GenerateInstantiatedTypeWithArgumentsTest( | |
530 token_pos, type, is_instance_lbl, is_not_instance_lbl); | |
531 // Fall through to runtime call. | |
532 } | |
533 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest( | |
534 token_pos, type, is_instance_lbl, is_not_instance_lbl); | |
535 if (has_fall_through) { | |
536 // If test non-conclusive so far, try the inlined type-test cache. | |
537 // 'type' is known at compile time. | |
538 return GenerateSubtype1TestCacheLookup( | |
539 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | |
540 } else { | |
541 return SubtypeTestCache::null(); | |
542 } | |
543 } | |
544 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, | |
545 is_not_instance_lbl); | |
546 } | |
547 | |
548 | |
549 // If instanceof type test cannot be performed successfully at compile time and | |
550 // therefore eliminated, optimize it by adding inlined tests for: | |
551 // - NULL -> return type == Null (type is not Object or dynamic). | |
552 // - Smi -> compile time subtype check (only if dst class is not parameterized). | |
553 // - Class equality (only if class is not parameterized). | |
554 // Inputs: | |
555 // - A0: object. | |
556 // - A1: instantiator type arguments or raw_null. | |
557 // - A2: function type arguments or raw_null. | |
558 // Returns: | |
559 // - true or false in V0. | |
560 void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos, | |
561 intptr_t deopt_id, | |
562 const AbstractType& type, | |
563 LocationSummary* locs) { | |
564 ASSERT(type.IsFinalized() && !type.IsMalformed() && !type.IsMalbounded()); | |
565 ASSERT(!type.IsObjectType() && !type.IsDynamicType() && !type.IsVoidType()); | |
566 | |
567 // Preserve instantiator type arguments (A1) and function type arguments (A2). | |
568 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
569 __ sw(A1, Address(SP, 1 * kWordSize)); | |
570 __ sw(A2, Address(SP, 0 * kWordSize)); | |
571 | |
572 Label is_instance, is_not_instance; | |
573 // If type is instantiated and non-parameterized, we can inline code | |
574 // checking whether the tested instance is a Smi. | |
575 if (type.IsInstantiated()) { | |
576 // A null object is only an instance of Null, Object, and dynamic. | |
577 // Object and dynamic have already been checked above (if the type is | |
578 // instantiated). So we can return false here if the instance is null, | |
579 // unless the type is Null (and if the type is instantiated). | |
580 // We can only inline this null check if the type is instantiated at compile | |
581 // time, since an uninstantiated type at compile time could be Null, Object, | |
582 // or dynamic at run time. | |
583 __ BranchEqual(A0, Object::null_object(), | |
584 type.IsNullType() ? &is_instance : &is_not_instance); | |
585 } | |
586 | |
587 // Generate inline instanceof test. | |
588 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); | |
589 test_cache = | |
590 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); | |
591 | |
592 // test_cache is null if there is no fall-through. | |
593 Label done; | |
594 if (!test_cache.IsNull()) { | |
595 // Generate runtime call. | |
596 __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args. | |
597 __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args. | |
598 __ addiu(SP, SP, Immediate(-6 * kWordSize)); | |
599 __ LoadObject(TMP, Object::null_object()); | |
600 __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result. | |
601 __ sw(A0, Address(SP, 4 * kWordSize)); // Push the instance. | |
602 __ LoadObject(TMP, type); | |
603 __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type. | |
604 __ sw(A1, Address(SP, 2 * kWordSize)); // Push instantiator type args. | |
605 __ sw(A2, Address(SP, 1 * kWordSize)); // Push function type args. | |
606 __ LoadUniqueObject(A0, test_cache); | |
607 __ sw(A0, Address(SP, 0 * kWordSize)); | |
608 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); | |
609 // Pop the parameters supplied to the runtime entry. The result of the | |
610 // instanceof runtime call will be left as the result of the operation. | |
611 __ lw(V0, Address(SP, 5 * kWordSize)); | |
612 __ b(&done); | |
613 __ delay_slot()->addiu(SP, SP, Immediate(6 * kWordSize)); | |
614 } | |
615 __ Bind(&is_not_instance); | |
616 __ LoadObject(V0, Bool::Get(false)); | |
617 __ b(&done); | |
618 | |
619 __ Bind(&is_instance); | |
620 __ LoadObject(V0, Bool::Get(true)); | |
621 __ Bind(&done); | |
622 // Remove instantiator type arguments and function type arguments. | |
623 __ Drop(2); | |
624 } | |
625 | |
626 | |
627 // Optimize assignable type check by adding inlined tests for: | |
628 // - NULL -> return NULL. | |
629 // - Smi -> compile time subtype check (only if dst class is not parameterized). | |
630 // - Class equality (only if class is not parameterized). | |
631 // Inputs: | |
632 // - A0: instance being type checked. | |
633 // - A1: instantiator type arguments or raw_null. | |
634 // - A2: function type arguments or raw_null. | |
635 // Returns: | |
636 // - object in A0 for successful assignable check (or throws TypeError). | |
637 // Clobbers: T0, T1, T2 | |
638 // Performance notes: positive checks must be quick, negative checks can be slow | |
639 // as they throw an exception. | |
640 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos, | |
641 intptr_t deopt_id, | |
642 const AbstractType& dst_type, | |
643 const String& dst_name, | |
644 LocationSummary* locs) { | |
645 __ Comment("AssertAssignable"); | |
646 ASSERT(!token_pos.IsClassifying()); | |
647 ASSERT(!dst_type.IsNull()); | |
648 ASSERT(dst_type.IsFinalized()); | |
649 // Assignable check is skipped in FlowGraphBuilder, not here. | |
650 ASSERT(dst_type.IsMalformedOrMalbounded() || | |
651 (!dst_type.IsDynamicType() && !dst_type.IsObjectType() && | |
652 !dst_type.IsVoidType())); | |
653 | |
654 // Preserve instantiator type arguments (A1) and function type arguments (A2). | |
655 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
656 __ sw(A1, Address(SP, 1 * kWordSize)); | |
657 __ sw(A2, Address(SP, 0 * kWordSize)); | |
658 | |
659 // A null object is always assignable and is returned as result. | |
660 Label is_assignable, runtime_call; | |
661 | |
662 __ BranchEqual(A0, Object::null_object(), &is_assignable); | |
663 | |
664 // Generate throw new TypeError() if the type is malformed or malbounded. | |
665 if (dst_type.IsMalformedOrMalbounded()) { | |
666 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
667 __ LoadObject(TMP, Object::null_object()); | |
668 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. | |
669 __ sw(A0, Address(SP, 2 * kWordSize)); // Push the source object. | |
670 __ LoadObject(TMP, dst_name); | |
671 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the destination name. | |
672 __ LoadObject(TMP, dst_type); | |
673 __ sw(TMP, Address(SP, 0 * kWordSize)); // Push the destination type. | |
674 | |
675 GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3, | |
676 locs); | |
677 // We should never return here. | |
678 __ break_(0); | |
679 | |
680 __ Bind(&is_assignable); // For a null object. | |
681 __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args. | |
682 __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args. | |
683 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
684 return; | |
685 } | |
686 | |
687 // Generate inline type check, linking to runtime call if not assignable. | |
688 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); | |
689 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, | |
690 &runtime_call); | |
691 | |
692 __ Bind(&runtime_call); | |
693 __ lw(A1, Address(SP, 1 * kWordSize)); // Load instantiator type args. | |
694 __ lw(A2, Address(SP, 0 * kWordSize)); // Load function type args. | |
695 | |
696 __ addiu(SP, SP, Immediate(-7 * kWordSize)); | |
697 __ LoadObject(TMP, Object::null_object()); | |
698 __ sw(TMP, Address(SP, 6 * kWordSize)); // Make room for the result. | |
699 __ sw(A0, Address(SP, 5 * kWordSize)); // Push the source object. | |
700 __ LoadObject(TMP, dst_type); | |
701 __ sw(TMP, Address(SP, 4 * kWordSize)); // Push the type of the destination. | |
702 __ sw(A1, Address(SP, 3 * kWordSize)); // Push instantiator type args. | |
703 __ sw(A2, Address(SP, 2 * kWordSize)); // Push function type args. | |
704 __ LoadObject(TMP, dst_name); | |
705 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the name of the destination. | |
706 __ LoadUniqueObject(T0, test_cache); | |
707 __ sw(T0, Address(SP, 0 * kWordSize)); | |
708 | |
709 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); | |
710 // Pop the parameters supplied to the runtime entry. The result of the | |
711 // type check runtime call is the checked value. | |
712 __ lw(A0, Address(SP, 6 * kWordSize)); | |
713 __ addiu(SP, SP, Immediate(7 * kWordSize)); | |
714 | |
715 __ Bind(&is_assignable); | |
716 __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args. | |
717 __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args. | |
718 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
719 } | |
720 | |
721 | |
722 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { | |
723 if (is_optimizing()) return; | |
724 Definition* defn = instr->AsDefinition(); | |
725 if ((defn != NULL) && defn->HasTemp()) { | |
726 __ Push(defn->locs()->out(0).reg()); | |
727 } | |
728 } | |
729 | |
730 | |
731 // Input parameters: | |
732 // S4: arguments descriptor array. | |
733 void FlowGraphCompiler::CopyParameters() { | |
734 __ Comment("Copy parameters"); | |
735 const Function& function = parsed_function().function(); | |
736 LocalScope* scope = parsed_function().node_sequence()->scope(); | |
737 const int num_fixed_params = function.num_fixed_parameters(); | |
738 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); | |
739 const int num_opt_named_params = function.NumOptionalNamedParameters(); | |
740 const int num_params = | |
741 num_fixed_params + num_opt_pos_params + num_opt_named_params; | |
742 ASSERT(function.NumParameters() == num_params); | |
743 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp); | |
744 | |
745 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, | |
746 // where num_pos_args is the number of positional arguments passed in. | |
747 const int min_num_pos_args = num_fixed_params; | |
748 const int max_num_pos_args = num_fixed_params + num_opt_pos_params; | |
749 | |
750 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); | |
751 // Check that min_num_pos_args <= num_pos_args. | |
752 Label wrong_num_arguments; | |
753 __ BranchSignedLess(T2, Immediate(Smi::RawValue(min_num_pos_args)), | |
754 &wrong_num_arguments); | |
755 | |
756 // Check that num_pos_args <= max_num_pos_args. | |
757 __ BranchSignedGreater(T2, Immediate(Smi::RawValue(max_num_pos_args)), | |
758 &wrong_num_arguments); | |
759 | |
760 // Copy positional arguments. | |
761 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied | |
762 // to fp[kFirstLocalSlotFromFp - i]. | |
763 | |
764 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
765 // Since T1 and T2 are Smi, use sll 1 instead of sll 2. | |
766 // Let T1 point to the last passed positional argument, i.e. to | |
767 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. | |
768 __ subu(T1, T1, T2); | |
769 __ sll(T1, T1, 1); | |
770 __ addu(T1, FP, T1); | |
771 __ AddImmediate(T1, (kParamEndSlotFromFp + 1) * kWordSize); | |
772 | |
773 // Let T0 point to the last copied positional argument, i.e. to | |
774 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)]. | |
775 __ AddImmediate(T0, FP, (kFirstLocalSlotFromFp + 1) * kWordSize); | |
776 __ sll(T2, T2, 1); // T2 is a Smi. | |
777 | |
778 __ Comment("Argument Copy Loop"); | |
779 Label loop, loop_exit; | |
780 __ blez(T2, &loop_exit); | |
781 __ delay_slot()->subu(T0, T0, T2); | |
782 __ Bind(&loop); | |
783 __ addu(T4, T1, T2); | |
784 __ lw(T3, Address(T4, -kWordSize)); | |
785 __ addiu(T2, T2, Immediate(-kWordSize)); | |
786 __ addu(T5, T0, T2); | |
787 __ bgtz(T2, &loop); | |
788 __ delay_slot()->sw(T3, Address(T5)); | |
789 __ Bind(&loop_exit); | |
790 | |
791 // Copy or initialize optional named arguments. | |
792 Label all_arguments_processed; | |
793 #ifdef DEBUG | |
794 const bool check_correct_named_args = true; | |
795 #else | |
796 const bool check_correct_named_args = function.IsClosureFunction(); | |
797 #endif | |
798 if (num_opt_named_params > 0) { | |
799 __ Comment("There are named parameters"); | |
800 // Start by alphabetically sorting the names of the optional parameters. | |
801 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; | |
802 int* opt_param_position = new int[num_opt_named_params]; | |
803 for (int pos = num_fixed_params; pos < num_params; pos++) { | |
804 LocalVariable* parameter = scope->VariableAt(pos); | |
805 const String& opt_param_name = parameter->name(); | |
806 int i = pos - num_fixed_params; | |
807 while (--i >= 0) { | |
808 LocalVariable* param_i = opt_param[i]; | |
809 const intptr_t result = opt_param_name.CompareTo(param_i->name()); | |
810 ASSERT(result != 0); | |
811 if (result > 0) break; | |
812 opt_param[i + 1] = opt_param[i]; | |
813 opt_param_position[i + 1] = opt_param_position[i]; | |
814 } | |
815 opt_param[i + 1] = parameter; | |
816 opt_param_position[i + 1] = pos; | |
817 } | |
818 // Generate code handling each optional parameter in alphabetical order. | |
819 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
820 // Let T1 point to the first passed argument, i.e. to | |
821 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (T1) is Smi. | |
822 __ sll(T3, T1, 1); | |
823 __ addu(T1, FP, T3); | |
824 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); | |
825 // Let T0 point to the entry of the first named argument. | |
826 __ AddImmediate(T0, S4, ArgumentsDescriptor::first_named_entry_offset() - | |
827 kHeapObjectTag); | |
828 for (int i = 0; i < num_opt_named_params; i++) { | |
829 Label load_default_value, assign_optional_parameter; | |
830 const int param_pos = opt_param_position[i]; | |
831 // Check if this named parameter was passed in. | |
832 // Load T3 with the name of the argument. | |
833 __ lw(T3, Address(T0, ArgumentsDescriptor::name_offset())); | |
834 ASSERT(opt_param[i]->name().IsSymbol()); | |
835 __ BranchNotEqual(T3, opt_param[i]->name(), &load_default_value); | |
836 | |
837 // Load T3 with passed-in argument at provided arg_pos, i.e. at | |
838 // fp[kParamEndSlotFromFp + num_args - arg_pos]. | |
839 __ lw(T3, Address(T0, ArgumentsDescriptor::position_offset())); | |
840 // T3 is arg_pos as Smi. | |
841 // Point to next named entry. | |
842 __ AddImmediate(T0, ArgumentsDescriptor::named_entry_size()); | |
843 __ subu(T3, ZR, T3); | |
844 __ sll(T3, T3, 1); | |
845 __ addu(T3, T1, T3); | |
846 __ b(&assign_optional_parameter); | |
847 __ delay_slot()->lw(T3, Address(T3)); | |
848 | |
849 __ Bind(&load_default_value); | |
850 // Load T3 with default argument. | |
851 const Instance& value = parsed_function().DefaultParameterValueAt( | |
852 param_pos - num_fixed_params); | |
853 __ LoadObject(T3, value); | |
854 __ Bind(&assign_optional_parameter); | |
855 // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos]. | |
856 // We do not use the final allocation index of the variable here, i.e. | |
857 // scope->VariableAt(i)->index(), because captured variables still need | |
858 // to be copied to the context that is not yet allocated. | |
859 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; | |
860 __ sw(T3, Address(FP, computed_param_pos * kWordSize)); | |
861 } | |
862 delete[] opt_param; | |
863 delete[] opt_param_position; | |
864 if (check_correct_named_args) { | |
865 // Check that T0 now points to the null terminator in the arguments | |
866 // descriptor. | |
867 __ lw(T3, Address(T0)); | |
868 __ BranchEqual(T3, Object::null_object(), &all_arguments_processed); | |
869 } | |
870 } else { | |
871 ASSERT(num_opt_pos_params > 0); | |
872 __ Comment("There are optional positional parameters"); | |
873 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); | |
874 __ SmiUntag(T2); | |
875 for (int i = 0; i < num_opt_pos_params; i++) { | |
876 Label next_parameter; | |
877 // Handle this optional positional parameter only if k or fewer positional | |
878 // arguments have been passed, where k is param_pos, the position of this | |
879 // optional parameter in the formal parameter list. | |
880 const int param_pos = num_fixed_params + i; | |
881 __ BranchSignedGreater(T2, Immediate(param_pos), &next_parameter); | |
882 // Load T3 with default argument. | |
883 const Object& value = parsed_function().DefaultParameterValueAt(i); | |
884 __ LoadObject(T3, value); | |
885 // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos]. | |
886 // We do not use the final allocation index of the variable here, i.e. | |
887 // scope->VariableAt(i)->index(), because captured variables still need | |
888 // to be copied to the context that is not yet allocated. | |
889 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; | |
890 __ sw(T3, Address(FP, computed_param_pos * kWordSize)); | |
891 __ Bind(&next_parameter); | |
892 } | |
893 if (check_correct_named_args) { | |
894 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
895 __ SmiUntag(T1); | |
896 // Check that T2 equals T1, i.e. no named arguments passed. | |
897 __ beq(T2, T1, &all_arguments_processed); | |
898 } | |
899 } | |
900 | |
901 __ Bind(&wrong_num_arguments); | |
902 if (function.IsClosureFunction()) { | |
903 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. | |
904 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); | |
905 // The noSuchMethod call may return to the caller, but not here. | |
906 } else if (check_correct_named_args) { | |
907 __ Stop("Wrong arguments"); | |
908 } | |
909 | |
910 __ Bind(&all_arguments_processed); | |
911 // Nullify originally passed arguments only after they have been copied and | |
912 // checked, otherwise noSuchMethod would not see their original values. | |
913 // This step can be skipped in case we decide that formal parameters are | |
914 // implicitly final, since garbage collecting the unmodified value is not | |
915 // an issue anymore. | |
916 | |
917 // S4 : arguments descriptor array. | |
918 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
919 __ sll(T2, T2, 1); // T2 is a Smi. | |
920 | |
921 __ Comment("Null arguments loop"); | |
922 Label null_args_loop, null_args_loop_exit; | |
923 __ blez(T2, &null_args_loop_exit); | |
924 __ delay_slot()->addiu(T1, FP, | |
925 Immediate((kParamEndSlotFromFp + 1) * kWordSize)); | |
926 __ Bind(&null_args_loop); | |
927 __ addiu(T2, T2, Immediate(-kWordSize)); | |
928 __ addu(T3, T1, T2); | |
929 __ LoadObject(T5, Object::null_object()); | |
930 __ bgtz(T2, &null_args_loop); | |
931 __ delay_slot()->sw(T5, Address(T3)); | |
932 __ Bind(&null_args_loop_exit); | |
933 } | |
934 | |
935 | |
936 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { | |
937 // RA: return address. | |
938 // SP: receiver. | |
939 // Sequence node has one return node, its input is load field node. | |
940 __ Comment("Inlined Getter"); | |
941 __ lw(V0, Address(SP, 0 * kWordSize)); | |
942 __ LoadFieldFromOffset(V0, V0, offset); | |
943 __ Ret(); | |
944 } | |
945 | |
946 | |
947 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { | |
948 // RA: return address. | |
949 // SP+1: receiver. | |
950 // SP+0: value. | |
951 // Sequence node has one store node and one return NULL node. | |
952 __ Comment("Inlined Setter"); | |
953 __ lw(T0, Address(SP, 1 * kWordSize)); // Receiver. | |
954 __ lw(T1, Address(SP, 0 * kWordSize)); // Value. | |
955 __ StoreIntoObjectOffset(T0, offset, T1); | |
956 __ LoadObject(V0, Object::null_object()); | |
957 __ Ret(); | |
958 } | |
959 | |
960 | |
961 static const Register new_pp = T7; | |
962 | |
963 | |
964 void FlowGraphCompiler::EmitFrameEntry() { | |
965 const Function& function = parsed_function().function(); | |
966 if (CanOptimizeFunction() && function.IsOptimizable() && | |
967 (!is_optimizing() || may_reoptimize())) { | |
968 __ Comment("Invocation Count Check"); | |
969 const Register function_reg = T0; | |
970 | |
971 // Temporarily setup pool pointer for this dart function. | |
972 __ LoadPoolPointer(new_pp); | |
973 // Load function object from object pool. | |
974 __ LoadFunctionFromCalleePool(function_reg, function, new_pp); | |
975 | |
976 __ lw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); | |
977 // Reoptimization of an optimized function is triggered by counting in | |
978 // IC stubs, but not at the entry of the function. | |
979 if (!is_optimizing()) { | |
980 __ addiu(T1, T1, Immediate(1)); | |
981 __ sw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); | |
982 } | |
983 | |
984 // Skip Branch if T1 is less than the threshold. | |
985 Label dont_branch; | |
986 __ BranchSignedLess(T1, Immediate(GetOptimizationThreshold()), | |
987 &dont_branch); | |
988 | |
989 ASSERT(function_reg == T0); | |
990 __ Branch(*StubCode::OptimizeFunction_entry(), new_pp); | |
991 | |
992 __ Bind(&dont_branch); | |
993 } | |
994 __ Comment("Enter frame"); | |
995 if (flow_graph().IsCompiledForOsr()) { | |
996 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - | |
997 flow_graph().num_copied_params(); | |
998 ASSERT(extra_slots >= 0); | |
999 __ EnterOsrFrame(extra_slots * kWordSize); | |
1000 } else { | |
1001 ASSERT(StackSize() >= 0); | |
1002 __ EnterDartFrame(StackSize() * kWordSize); | |
1003 } | |
1004 } | |
1005 | |
1006 | |
1007 // Input parameters: | |
1008 // RA: return address. | |
1009 // SP: address of last argument. | |
1010 // FP: caller's frame pointer. | |
1011 // PP: caller's pool pointer. | |
1012 // S5: ic-data. | |
1013 // S4: arguments descriptor array. | |
1014 void FlowGraphCompiler::CompileGraph() { | |
1015 InitCompiler(); | |
1016 const Function& function = parsed_function().function(); | |
1017 | |
1018 #ifdef DART_PRECOMPILER | |
1019 if (function.IsDynamicFunction()) { | |
1020 __ MonomorphicCheckedEntry(); | |
1021 } | |
1022 #endif // DART_PRECOMPILER | |
1023 | |
1024 if (TryIntrinsify()) { | |
1025 // Skip regular code generation. | |
1026 return; | |
1027 } | |
1028 | |
1029 EmitFrameEntry(); | |
1030 ASSERT(assembler()->constant_pool_allowed()); | |
1031 | |
1032 const int num_fixed_params = function.num_fixed_parameters(); | |
1033 const int num_copied_params = parsed_function().num_copied_params(); | |
1034 const int num_locals = parsed_function().num_stack_locals(); | |
1035 | |
1036 // We check the number of passed arguments when we have to copy them due to | |
1037 // the presence of optional parameters. | |
1038 // No such checking code is generated if only fixed parameters are declared, | |
1039 // unless we are in debug mode or unless we are compiling a closure. | |
1040 if (num_copied_params == 0) { | |
1041 const bool check_arguments = | |
1042 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); | |
1043 if (check_arguments) { | |
1044 __ Comment("Check argument count"); | |
1045 // Check that exactly num_fixed arguments are passed in. | |
1046 Label correct_num_arguments, wrong_num_arguments; | |
1047 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
1048 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)), | |
1049 &wrong_num_arguments); | |
1050 | |
1051 __ lw(T1, | |
1052 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); | |
1053 __ beq(T0, T1, &correct_num_arguments); | |
1054 __ Bind(&wrong_num_arguments); | |
1055 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. | |
1056 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); | |
1057 // The noSuchMethod call may return to the caller, but not here. | |
1058 __ Bind(&correct_num_arguments); | |
1059 } | |
1060 } else if (!flow_graph().IsCompiledForOsr()) { | |
1061 CopyParameters(); | |
1062 } | |
1063 | |
1064 if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) { | |
1065 // Load context from the closure object (first argument). | |
1066 LocalScope* scope = parsed_function().node_sequence()->scope(); | |
1067 LocalVariable* closure_parameter = scope->VariableAt(0); | |
1068 __ lw(CTX, Address(FP, closure_parameter->index() * kWordSize)); | |
1069 __ lw(CTX, FieldAddress(CTX, Closure::context_offset())); | |
1070 } | |
1071 | |
1072 // In unoptimized code, initialize (non-argument) stack allocated slots to | |
1073 // null. | |
1074 if (!is_optimizing()) { | |
1075 ASSERT(num_locals > 0); // There is always at least context_var. | |
1076 __ Comment("Initialize spill slots"); | |
1077 const intptr_t slot_base = parsed_function().first_stack_local_index(); | |
1078 const intptr_t context_index = | |
1079 parsed_function().current_context_var()->index(); | |
1080 if (num_locals > 1) { | |
1081 __ LoadObject(V0, Object::null_object()); | |
1082 } | |
1083 for (intptr_t i = 0; i < num_locals; ++i) { | |
1084 // Subtract index i (locals lie at lower addresses than FP). | |
1085 if (((slot_base - i) == context_index)) { | |
1086 if (function.IsClosureFunction()) { | |
1087 __ sw(CTX, Address(FP, (slot_base - i) * kWordSize)); | |
1088 } else { | |
1089 __ LoadObject(V1, Object::empty_context()); | |
1090 __ sw(V1, Address(FP, (slot_base - i) * kWordSize)); | |
1091 } | |
1092 } else { | |
1093 ASSERT(num_locals > 1); | |
1094 __ sw(V0, Address(FP, (slot_base - i) * kWordSize)); | |
1095 } | |
1096 } | |
1097 } | |
1098 | |
1099 // Check for a passed type argument vector if the function is generic. | |
1100 if (FLAG_reify_generic_functions && function.IsGeneric()) { | |
1101 __ Comment("Check passed-in type args"); | |
1102 Label store_type_args, ok; | |
1103 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); | |
1104 if (is_optimizing()) { | |
1105 // Initialize type_args to null if none passed in. | |
1106 __ LoadObject(T0, Object::null_object()); | |
1107 __ BranchEqual(T1, Immediate(0), &store_type_args); | |
1108 } else { | |
1109 __ BranchEqual(T1, Immediate(0), &ok); // Already initialized to null. | |
1110 } | |
1111 // TODO(regis): Verify that type_args_len is correct. | |
1112 // Load the passed type args vector in T0 from | |
1113 // fp[kParamEndSlotFromFp + num_args + 1]; num_args (T1) is Smi. | |
1114 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
1115 __ sll(T1, T1, 1); | |
1116 __ addu(T1, FP, T1); | |
1117 __ lw(T0, Address(T1, (kParamEndSlotFromFp + 1) * kWordSize)); | |
1118 // Store T0 into the stack slot reserved for the function type arguments. | |
1119 // If the function type arguments variable is captured, a copy will happen | |
1120 // after the context is allocated. | |
1121 const intptr_t slot_base = parsed_function().first_stack_local_index(); | |
1122 ASSERT(parsed_function().function_type_arguments()->is_captured() || | |
1123 parsed_function().function_type_arguments()->index() == slot_base); | |
1124 __ Bind(&store_type_args); | |
1125 __ sw(T0, Address(FP, slot_base * kWordSize)); | |
1126 __ Bind(&ok); | |
1127 } | |
1128 | |
1129 // TODO(regis): Verify that no vector is passed if not generic, unless already | |
1130 // checked during resolution. | |
1131 | |
1132 EndCodeSourceRange(TokenPosition::kDartCodePrologue); | |
1133 VisitBlocks(); | |
1134 | |
1135 __ break_(0); | |
1136 GenerateDeferredCode(); | |
1137 } | |
1138 | |
1139 | |
1140 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, | |
1141 const StubEntry& stub_entry, | |
1142 RawPcDescriptors::Kind kind, | |
1143 LocationSummary* locs) { | |
1144 __ BranchLink(stub_entry); | |
1145 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); | |
1146 } | |
1147 | |
1148 | |
1149 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, | |
1150 const StubEntry& stub_entry, | |
1151 RawPcDescriptors::Kind kind, | |
1152 LocationSummary* locs) { | |
1153 __ BranchLinkPatchable(stub_entry); | |
1154 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); | |
1155 } | |
1156 | |
1157 | |
1158 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, | |
1159 TokenPosition token_pos, | |
1160 const StubEntry& stub_entry, | |
1161 RawPcDescriptors::Kind kind, | |
1162 LocationSummary* locs) { | |
1163 __ BranchLinkPatchable(stub_entry); | |
1164 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); | |
1165 // Marks either the continuation point in unoptimized code or the | |
1166 // deoptimization point in optimized code, after call. | |
1167 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | |
1168 if (is_optimizing()) { | |
1169 AddDeoptIndexAtCall(deopt_id_after); | |
1170 } else { | |
1171 // Add deoptimization continuation point after the call and before the | |
1172 // arguments are removed. | |
1173 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | |
1174 } | |
1175 } | |
1176 | |
1177 | |
1178 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, | |
1179 TokenPosition token_pos, | |
1180 const StubEntry& stub_entry, | |
1181 RawPcDescriptors::Kind kind, | |
1182 LocationSummary* locs, | |
1183 const Function& target) { | |
1184 // Call sites to the same target can share object pool entries. These | |
1185 // call sites are never patched for breakpoints: the function is deoptimized | |
1186 // and the unoptimized code with IC calls for static calls is patched instead. | |
1187 ASSERT(is_optimizing()); | |
1188 __ BranchLinkWithEquivalence(stub_entry, target); | |
1189 | |
1190 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); | |
1191 // Marks either the continuation point in unoptimized code or the | |
1192 // deoptimization point in optimized code, after call. | |
1193 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | |
1194 if (is_optimizing()) { | |
1195 AddDeoptIndexAtCall(deopt_id_after); | |
1196 } else { | |
1197 // Add deoptimization continuation point after the call and before the | |
1198 // arguments are removed. | |
1199 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | |
1200 } | |
1201 AddStaticCallTarget(target); | |
1202 } | |
1203 | |
1204 | |
1205 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, | |
1206 intptr_t deopt_id, | |
1207 const RuntimeEntry& entry, | |
1208 intptr_t argument_count, | |
1209 LocationSummary* locs) { | |
1210 __ CallRuntime(entry, argument_count); | |
1211 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); | |
1212 if (deopt_id != Thread::kNoDeoptId) { | |
1213 // Marks either the continuation point in unoptimized code or the | |
1214 // deoptimization point in optimized code, after call. | |
1215 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | |
1216 if (is_optimizing()) { | |
1217 AddDeoptIndexAtCall(deopt_id_after); | |
1218 } else { | |
1219 // Add deoptimization continuation point after the call and before the | |
1220 // arguments are removed. | |
1221 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | |
1222 } | |
1223 } | |
1224 } | |
1225 | |
1226 | |
1227 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { | |
1228 // We do not check for overflow when incrementing the edge counter. The | |
1229 // function should normally be optimized long before the counter can | |
1230 // overflow; and though we do not reset the counters when we optimize or | |
1231 // deoptimize, there is a bound on the number of | |
1232 // optimization/deoptimization cycles we will attempt. | |
1233 ASSERT(!edge_counters_array_.IsNull()); | |
1234 __ Comment("Edge counter"); | |
1235 __ LoadObject(T0, edge_counters_array_); | |
1236 __ LoadFieldFromOffset(T1, T0, Array::element_offset(edge_id)); | |
1237 __ AddImmediate(T1, T1, Smi::RawValue(1)); | |
1238 __ StoreFieldToOffset(T1, T0, Array::element_offset(edge_id)); | |
1239 } | |
1240 | |
1241 | |
1242 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, | |
1243 const ICData& ic_data, | |
1244 intptr_t argument_count, | |
1245 intptr_t deopt_id, | |
1246 TokenPosition token_pos, | |
1247 LocationSummary* locs) { | |
1248 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | |
1249 // Each ICData propagated from unoptimized to optimized code contains the | |
1250 // function that corresponds to the Dart function of that IC call. Due | |
1251 // to inlining in optimized code, that function may not correspond to the | |
1252 // top-level function (parsed_function().function()) which could be | |
1253 // reoptimized and which counter needs to be incremented. | |
1254 // Pass the function explicitly, it is used in IC stub. | |
1255 __ Comment("OptimizedInstanceCall"); | |
1256 __ LoadObject(T0, parsed_function().function()); | |
1257 __ LoadUniqueObject(S5, ic_data); | |
1258 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, | |
1259 locs); | |
1260 __ Drop(argument_count); | |
1261 } | |
1262 | |
1263 | |
1264 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, | |
1265 const ICData& ic_data, | |
1266 intptr_t argument_count, | |
1267 intptr_t deopt_id, | |
1268 TokenPosition token_pos, | |
1269 LocationSummary* locs) { | |
1270 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | |
1271 __ Comment("InstanceCall"); | |
1272 __ LoadUniqueObject(S5, ic_data); | |
1273 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, | |
1274 locs); | |
1275 __ Comment("InstanceCall return"); | |
1276 __ Drop(argument_count); | |
1277 } | |
1278 | |
1279 | |
1280 void FlowGraphCompiler::EmitMegamorphicInstanceCall( | |
1281 const String& name, | |
1282 const Array& arguments_descriptor, | |
1283 intptr_t argument_count, | |
1284 intptr_t deopt_id, | |
1285 TokenPosition token_pos, | |
1286 LocationSummary* locs, | |
1287 intptr_t try_index, | |
1288 intptr_t slow_path_argument_count) { | |
1289 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); | |
1290 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle( | |
1291 zone(), | |
1292 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor)); | |
1293 | |
1294 __ Comment("MegamorphicCall"); | |
1295 // Load receiver into T0, | |
1296 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); | |
1297 __ LoadObject(S5, cache); | |
1298 __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset())); | |
1299 __ jalr(T9); | |
1300 | |
1301 RecordSafepoint(locs, slow_path_argument_count); | |
1302 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | |
1303 if (FLAG_precompiled_mode) { | |
1304 // Megamorphic calls may occur in slow path stubs. | |
1305 // If valid use try_index argument. | |
1306 if (try_index == CatchClauseNode::kInvalidTryIndex) { | |
1307 try_index = CurrentTryIndex(); | |
1308 } | |
1309 AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(), | |
1310 Thread::kNoDeoptId, token_pos, try_index); | |
1311 } else if (is_optimizing()) { | |
1312 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, | |
1313 token_pos); | |
1314 AddDeoptIndexAtCall(deopt_id_after); | |
1315 } else { | |
1316 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, | |
1317 token_pos); | |
1318 // Add deoptimization continuation point after the call and before the | |
1319 // arguments are removed. | |
1320 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | |
1321 } | |
1322 EmitCatchEntryState(pending_deoptimization_env_, try_index); | |
1323 __ Drop(argument_count); | |
1324 } | |
1325 | |
1326 | |
1327 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, | |
1328 intptr_t argument_count, | |
1329 intptr_t deopt_id, | |
1330 TokenPosition token_pos, | |
1331 LocationSummary* locs) { | |
1332 ASSERT(ic_data.NumArgsTested() == 1); | |
1333 const Code& initial_stub = | |
1334 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); | |
1335 | |
1336 __ Comment("SwitchableCall"); | |
1337 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); | |
1338 __ LoadUniqueObject(CODE_REG, initial_stub); | |
1339 __ lw(T9, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
1340 __ LoadUniqueObject(S5, ic_data); | |
1341 __ jalr(T9); | |
1342 | |
1343 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, RawPcDescriptors::kOther, | |
1344 locs); | |
1345 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | |
1346 if (is_optimizing()) { | |
1347 AddDeoptIndexAtCall(deopt_id_after); | |
1348 } else { | |
1349 // Add deoptimization continuation point after the call and before the | |
1350 // arguments are removed. | |
1351 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | |
1352 } | |
1353 __ Drop(argument_count); | |
1354 } | |
1355 | |
1356 | |
1357 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, | |
1358 intptr_t deopt_id, | |
1359 TokenPosition token_pos, | |
1360 LocationSummary* locs, | |
1361 const ICData& ic_data) { | |
1362 const StubEntry* stub_entry = | |
1363 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); | |
1364 __ LoadObject(S5, ic_data); | |
1365 GenerateDartCall(deopt_id, token_pos, *stub_entry, | |
1366 RawPcDescriptors::kUnoptStaticCall, locs); | |
1367 __ Drop(argument_count); | |
1368 } | |
1369 | |
1370 | |
1371 void FlowGraphCompiler::EmitOptimizedStaticCall( | |
1372 const Function& function, | |
1373 const Array& arguments_descriptor, | |
1374 intptr_t argument_count, | |
1375 intptr_t deopt_id, | |
1376 TokenPosition token_pos, | |
1377 LocationSummary* locs) { | |
1378 __ Comment("StaticCall"); | |
1379 ASSERT(!function.IsClosureFunction()); | |
1380 if (function.HasOptionalParameters() || | |
1381 (FLAG_reify_generic_functions && function.IsGeneric())) { | |
1382 __ LoadObject(S4, arguments_descriptor); | |
1383 } else { | |
1384 __ LoadImmediate(S4, 0); // GC safe smi zero because of stub. | |
1385 } | |
1386 // Do not use the code from the function, but let the code be patched so that | |
1387 // we can record the outgoing edges to other code. | |
1388 GenerateStaticDartCall(deopt_id, token_pos, | |
1389 *StubCode::CallStaticFunction_entry(), | |
1390 RawPcDescriptors::kOther, locs, function); | |
1391 __ Drop(argument_count); | |
1392 } | |
1393 | |
1394 | |
1395 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( | |
1396 Register reg, | |
1397 const Object& obj, | |
1398 bool needs_number_check, | |
1399 TokenPosition token_pos, | |
1400 intptr_t deopt_id) { | |
1401 __ Comment("EqualityRegConstCompare"); | |
1402 ASSERT(!needs_number_check || | |
1403 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); | |
1404 if (needs_number_check) { | |
1405 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); | |
1406 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1407 __ sw(reg, Address(SP, 1 * kWordSize)); | |
1408 __ LoadObject(TMP, obj); | |
1409 __ sw(TMP, Address(SP, 0 * kWordSize)); | |
1410 if (is_optimizing()) { | |
1411 __ BranchLinkPatchable( | |
1412 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); | |
1413 } else { | |
1414 __ BranchLinkPatchable( | |
1415 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); | |
1416 } | |
1417 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); | |
1418 __ Comment("EqualityRegConstCompare return"); | |
1419 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal). | |
1420 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'. | |
1421 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant. | |
1422 return Condition(CMPRES1, ZR, EQ); | |
1423 } else { | |
1424 int16_t imm = 0; | |
1425 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm); | |
1426 return Condition(reg, obj_reg, EQ, imm); | |
1427 } | |
1428 } | |
1429 | |
1430 | |
1431 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, | |
1432 Register right, | |
1433 bool needs_number_check, | |
1434 TokenPosition token_pos, | |
1435 intptr_t deopt_id) { | |
1436 __ Comment("EqualityRegRegCompare"); | |
1437 if (needs_number_check) { | |
1438 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
1439 __ sw(left, Address(SP, 1 * kWordSize)); | |
1440 __ sw(right, Address(SP, 0 * kWordSize)); | |
1441 if (is_optimizing()) { | |
1442 __ BranchLinkPatchable( | |
1443 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); | |
1444 } else { | |
1445 __ BranchLinkPatchable( | |
1446 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); | |
1447 } | |
1448 if (token_pos.IsReal()) { | |
1449 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId, | |
1450 token_pos); | |
1451 } | |
1452 __ Comment("EqualityRegRegCompare return"); | |
1453 // Stub returns result in CMPRES1 (if it is 0, then left and right are | |
1454 // equal). | |
1455 __ lw(right, Address(SP, 0 * kWordSize)); | |
1456 __ lw(left, Address(SP, 1 * kWordSize)); | |
1457 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
1458 return Condition(CMPRES1, ZR, EQ); | |
1459 } else { | |
1460 return Condition(left, right, EQ); | |
1461 } | |
1462 } | |
1463 | |
1464 | |
1465 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and | |
1466 // FlowGraphCompiler::SlowPathEnvironmentFor. | |
1467 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { | |
1468 #if defined(DEBUG) | |
1469 locs->CheckWritableInputs(); | |
1470 ClobberDeadTempRegisters(locs); | |
1471 #endif | |
1472 | |
1473 __ Comment("SaveLiveRegisters"); | |
1474 // TODO(vegorov): consider saving only caller save (volatile) registers. | |
1475 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); | |
1476 if (fpu_regs_count > 0) { | |
1477 __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize)); | |
1478 // Store fpu registers with the lowest register number at the lowest | |
1479 // address. | |
1480 intptr_t offset = 0; | |
1481 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { | |
1482 DRegister fpu_reg = static_cast<DRegister>(i); | |
1483 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { | |
1484 __ StoreDToOffset(fpu_reg, SP, offset); | |
1485 offset += kFpuRegisterSize; | |
1486 } | |
1487 } | |
1488 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); | |
1489 } | |
1490 | |
1491 // The order in which the registers are pushed must match the order | |
1492 // in which the registers are encoded in the safe point's stack map. | |
1493 const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); | |
1494 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); | |
1495 const int register_count = Utils::CountOneBits(cpu_registers); | |
1496 if (register_count > 0) { | |
1497 __ addiu(SP, SP, Immediate(-register_count * kWordSize)); | |
1498 intptr_t offset = register_count * kWordSize; | |
1499 for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) { | |
1500 Register r = static_cast<Register>(i); | |
1501 if (locs->live_registers()->ContainsRegister(r)) { | |
1502 offset -= kWordSize; | |
1503 __ sw(r, Address(SP, offset)); | |
1504 } | |
1505 } | |
1506 ASSERT(offset == 0); | |
1507 } | |
1508 } | |
1509 | |
1510 | |
1511 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { | |
1512 __ Comment("RestoreLiveRegisters"); | |
1513 const intptr_t cpu_registers = locs->live_registers()->cpu_registers(); | |
1514 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0); | |
1515 const int register_count = Utils::CountOneBits(cpu_registers); | |
1516 if (register_count > 0) { | |
1517 intptr_t offset = register_count * kWordSize; | |
1518 for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) { | |
1519 Register r = static_cast<Register>(i); | |
1520 if (locs->live_registers()->ContainsRegister(r)) { | |
1521 offset -= kWordSize; | |
1522 __ lw(r, Address(SP, offset)); | |
1523 } | |
1524 } | |
1525 ASSERT(offset == 0); | |
1526 __ addiu(SP, SP, Immediate(register_count * kWordSize)); | |
1527 } | |
1528 | |
1529 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); | |
1530 if (fpu_regs_count > 0) { | |
1531 // Fpu registers have the lowest register number at the lowest address. | |
1532 intptr_t offset = 0; | |
1533 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { | |
1534 DRegister fpu_reg = static_cast<DRegister>(i); | |
1535 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { | |
1536 __ LoadDFromOffset(fpu_reg, SP, offset); | |
1537 offset += kFpuRegisterSize; | |
1538 } | |
1539 } | |
1540 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); | |
1541 __ AddImmediate(SP, offset); | |
1542 } | |
1543 } | |
1544 | |
1545 | |
1546 #if defined(DEBUG) | |
1547 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { | |
1548 // Clobber temporaries that have not been manually preserved. | |
1549 for (intptr_t i = 0; i < locs->temp_count(); ++i) { | |
1550 Location tmp = locs->temp(i); | |
1551 // TODO(zerny): clobber non-live temporary FPU registers. | |
1552 if (tmp.IsRegister() && | |
1553 !locs->live_registers()->ContainsRegister(tmp.reg())) { | |
1554 __ LoadImmediate(tmp.reg(), 0xf7); | |
1555 } | |
1556 } | |
1557 } | |
1558 #endif | |
1559 | |
1560 | |
1561 void FlowGraphCompiler::EmitTestAndCallLoadReceiver( | |
1562 intptr_t argument_count, | |
1563 const Array& arguments_descriptor) { | |
1564 __ Comment("EmitTestAndCall"); | |
1565 // Load receiver into T0. | |
1566 __ LoadFromOffset(T0, SP, (argument_count - 1) * kWordSize); | |
1567 __ LoadObject(S4, arguments_descriptor); | |
1568 } | |
1569 | |
1570 | |
1571 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) { | |
1572 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); | |
1573 if (if_smi) { | |
1574 // Jump if receiver is Smi. | |
1575 __ beq(CMPRES1, ZR, label); | |
1576 } else { | |
1577 // Jump if receiver is not Smi. | |
1578 __ bne(CMPRES1, ZR, label); | |
1579 } | |
1580 } | |
1581 | |
1582 | |
1583 void FlowGraphCompiler::EmitTestAndCallLoadCid() { | |
1584 __ LoadClassId(T2, T0); | |
1585 } | |
1586 | |
1587 | |
1588 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label, | |
1589 const CidRange& range, | |
1590 int bias) { | |
1591 intptr_t cid_start = range.cid_start; | |
1592 if (range.IsSingleCid()) { | |
1593 __ BranchNotEqual(T2, Immediate(cid_start - bias), next_label); | |
1594 } else { | |
1595 __ AddImmediate(T2, T2, bias - cid_start); | |
1596 bias = cid_start; | |
1597 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if | |
1598 // the range is small enough. | |
1599 __ LoadImmediate(TMP, range.Extent()); | |
1600 // Reverse comparison so we get 1 if biased cid > tmp ie cid is out of | |
1601 // range. | |
1602 __ sltu(TMP, TMP, T2); | |
1603 __ bne(TMP, ZR, next_label); | |
1604 } | |
1605 return bias; | |
1606 } | |
1607 | |
1608 | |
1609 #undef __ | |
1610 #define __ compiler_->assembler()-> | |
1611 | |
1612 | |
1613 void ParallelMoveResolver::EmitMove(int index) { | |
1614 MoveOperands* move = moves_[index]; | |
1615 const Location source = move->src(); | |
1616 const Location destination = move->dest(); | |
1617 __ Comment("ParallelMoveResolver::EmitMove"); | |
1618 | |
1619 if (source.IsRegister()) { | |
1620 if (destination.IsRegister()) { | |
1621 __ mov(destination.reg(), source.reg()); | |
1622 } else { | |
1623 ASSERT(destination.IsStackSlot()); | |
1624 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1625 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset); | |
1626 } | |
1627 } else if (source.IsStackSlot()) { | |
1628 if (destination.IsRegister()) { | |
1629 const intptr_t source_offset = source.ToStackSlotOffset(); | |
1630 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset); | |
1631 } else { | |
1632 ASSERT(destination.IsStackSlot()); | |
1633 const intptr_t source_offset = source.ToStackSlotOffset(); | |
1634 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1635 ScratchRegisterScope tmp(this, kNoRegister); | |
1636 __ LoadFromOffset(tmp.reg(), source.base_reg(), source_offset); | |
1637 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); | |
1638 } | |
1639 } else if (source.IsFpuRegister()) { | |
1640 if (destination.IsFpuRegister()) { | |
1641 DRegister dst = destination.fpu_reg(); | |
1642 DRegister src = source.fpu_reg(); | |
1643 __ movd(dst, src); | |
1644 } else { | |
1645 ASSERT(destination.IsDoubleStackSlot()); | |
1646 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1647 DRegister src = source.fpu_reg(); | |
1648 __ StoreDToOffset(src, destination.base_reg(), dest_offset); | |
1649 } | |
1650 } else if (source.IsDoubleStackSlot()) { | |
1651 if (destination.IsFpuRegister()) { | |
1652 const intptr_t source_offset = source.ToStackSlotOffset(); | |
1653 DRegister dst = destination.fpu_reg(); | |
1654 __ LoadDFromOffset(dst, source.base_reg(), source_offset); | |
1655 } else { | |
1656 ASSERT(destination.IsDoubleStackSlot()); | |
1657 const intptr_t source_offset = source.ToStackSlotOffset(); | |
1658 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1659 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); | |
1660 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); | |
1661 } | |
1662 } else { | |
1663 ASSERT(source.IsConstant()); | |
1664 const Object& constant = source.constant(); | |
1665 if (destination.IsRegister()) { | |
1666 if (constant.IsSmi() && | |
1667 (source.constant_instruction()->representation() == kUnboxedInt32)) { | |
1668 __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value()); | |
1669 } else { | |
1670 __ LoadObject(destination.reg(), constant); | |
1671 } | |
1672 } else if (destination.IsFpuRegister()) { | |
1673 __ LoadObject(TMP, constant); | |
1674 __ LoadDFromOffset(destination.fpu_reg(), TMP, | |
1675 Double::value_offset() - kHeapObjectTag); | |
1676 } else if (destination.IsDoubleStackSlot()) { | |
1677 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1678 __ LoadObject(TMP, constant); | |
1679 __ LoadDFromOffset(DTMP, TMP, Double::value_offset() - kHeapObjectTag); | |
1680 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); | |
1681 } else { | |
1682 ASSERT(destination.IsStackSlot()); | |
1683 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1684 ScratchRegisterScope tmp(this, kNoRegister); | |
1685 if (constant.IsSmi() && | |
1686 (source.constant_instruction()->representation() == kUnboxedInt32)) { | |
1687 __ LoadImmediate(tmp.reg(), Smi::Cast(constant).Value()); | |
1688 } else { | |
1689 __ LoadObject(tmp.reg(), constant); | |
1690 } | |
1691 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); | |
1692 } | |
1693 } | |
1694 | |
1695 move->Eliminate(); | |
1696 } | |
1697 | |
1698 | |
1699 void ParallelMoveResolver::EmitSwap(int index) { | |
1700 MoveOperands* move = moves_[index]; | |
1701 const Location source = move->src(); | |
1702 const Location destination = move->dest(); | |
1703 | |
1704 if (source.IsRegister() && destination.IsRegister()) { | |
1705 ASSERT(source.reg() != TMP); | |
1706 ASSERT(destination.reg() != TMP); | |
1707 __ mov(TMP, source.reg()); | |
1708 __ mov(source.reg(), destination.reg()); | |
1709 __ mov(destination.reg(), TMP); | |
1710 } else if (source.IsRegister() && destination.IsStackSlot()) { | |
1711 Exchange(source.reg(), destination.base_reg(), | |
1712 destination.ToStackSlotOffset()); | |
1713 } else if (source.IsStackSlot() && destination.IsRegister()) { | |
1714 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset()); | |
1715 } else if (source.IsStackSlot() && destination.IsStackSlot()) { | |
1716 Exchange(source.base_reg(), source.ToStackSlotOffset(), | |
1717 destination.base_reg(), destination.ToStackSlotOffset()); | |
1718 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { | |
1719 DRegister dst = destination.fpu_reg(); | |
1720 DRegister src = source.fpu_reg(); | |
1721 __ movd(DTMP, src); | |
1722 __ movd(src, dst); | |
1723 __ movd(dst, DTMP); | |
1724 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { | |
1725 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); | |
1726 DRegister reg = | |
1727 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg(); | |
1728 Register base_reg = | |
1729 source.IsFpuRegister() ? destination.base_reg() : source.base_reg(); | |
1730 const intptr_t slot_offset = source.IsFpuRegister() | |
1731 ? destination.ToStackSlotOffset() | |
1732 : source.ToStackSlotOffset(); | |
1733 __ LoadDFromOffset(DTMP, base_reg, slot_offset); | |
1734 __ StoreDToOffset(reg, base_reg, slot_offset); | |
1735 __ movd(reg, DTMP); | |
1736 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { | |
1737 const intptr_t source_offset = source.ToStackSlotOffset(); | |
1738 const intptr_t dest_offset = destination.ToStackSlotOffset(); | |
1739 | |
1740 ScratchFpuRegisterScope ensure_scratch(this, DTMP); | |
1741 DRegister scratch = ensure_scratch.reg(); | |
1742 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); | |
1743 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset); | |
1744 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); | |
1745 __ StoreDToOffset(scratch, source.base_reg(), source_offset); | |
1746 } else { | |
1747 UNREACHABLE(); | |
1748 } | |
1749 | |
1750 // The swap of source and destination has executed a move from source to | |
1751 // destination. | |
1752 move->Eliminate(); | |
1753 | |
1754 // Any unperformed (including pending) move with a source of either | |
1755 // this move's source or destination needs to have their source | |
1756 // changed to reflect the state of affairs after the swap. | |
1757 for (int i = 0; i < moves_.length(); ++i) { | |
1758 const MoveOperands& other_move = *moves_[i]; | |
1759 if (other_move.Blocks(source)) { | |
1760 moves_[i]->set_src(destination); | |
1761 } else if (other_move.Blocks(destination)) { | |
1762 moves_[i]->set_src(source); | |
1763 } | |
1764 } | |
1765 } | |
1766 | |
1767 | |
1768 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, | |
1769 const Address& src) { | |
1770 __ Comment("ParallelMoveResolver::MoveMemoryToMemory"); | |
1771 __ lw(TMP, src); | |
1772 __ sw(TMP, dst); | |
1773 } | |
1774 | |
1775 | |
1776 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { | |
1777 __ Comment("ParallelMoveResolver::StoreObject"); | |
1778 __ LoadObject(TMP, obj); | |
1779 __ sw(TMP, dst); | |
1780 } | |
1781 | |
1782 | |
1783 // Do not call or implement this function. Instead, use the form below that | |
1784 // uses an offset from the frame pointer instead of an Address. | |
1785 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { | |
1786 UNREACHABLE(); | |
1787 } | |
1788 | |
1789 | |
1790 // Do not call or implement this function. Instead, use the form below that | |
1791 // uses offsets from the frame pointer instead of Addresses. | |
1792 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | |
1793 UNREACHABLE(); | |
1794 } | |
1795 | |
1796 | |
1797 void ParallelMoveResolver::Exchange(Register reg, | |
1798 Register base_reg, | |
1799 intptr_t stack_offset) { | |
1800 ScratchRegisterScope tmp(this, reg); | |
1801 __ mov(tmp.reg(), reg); | |
1802 __ LoadFromOffset(reg, base_reg, stack_offset); | |
1803 __ StoreToOffset(tmp.reg(), base_reg, stack_offset); | |
1804 } | |
1805 | |
1806 | |
1807 void ParallelMoveResolver::Exchange(Register base_reg1, | |
1808 intptr_t stack_offset1, | |
1809 Register base_reg2, | |
1810 intptr_t stack_offset2) { | |
1811 ScratchRegisterScope tmp1(this, kNoRegister); | |
1812 ScratchRegisterScope tmp2(this, tmp1.reg()); | |
1813 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); | |
1814 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); | |
1815 __ StoreToOffset(tmp1.reg(), base_reg1, stack_offset2); | |
1816 __ StoreToOffset(tmp2.reg(), base_reg2, stack_offset1); | |
1817 } | |
1818 | |
1819 | |
1820 void ParallelMoveResolver::SpillScratch(Register reg) { | |
1821 __ Comment("ParallelMoveResolver::SpillScratch"); | |
1822 __ Push(reg); | |
1823 } | |
1824 | |
1825 | |
1826 void ParallelMoveResolver::RestoreScratch(Register reg) { | |
1827 __ Comment("ParallelMoveResolver::RestoreScratch"); | |
1828 __ Pop(reg); | |
1829 } | |
1830 | |
1831 | |
1832 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | |
1833 __ Comment("ParallelMoveResolver::SpillFpuScratch"); | |
1834 __ AddImmediate(SP, -kDoubleSize); | |
1835 __ StoreDToOffset(reg, SP, 0); | |
1836 } | |
1837 | |
1838 | |
1839 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | |
1840 __ Comment("ParallelMoveResolver::RestoreFpuScratch"); | |
1841 __ LoadDFromOffset(reg, SP, 0); | |
1842 __ AddImmediate(SP, kDoubleSize); | |
1843 } | |
1844 | |
1845 | |
1846 #undef __ | |
1847 | |
1848 | |
1849 } // namespace dart | |
1850 | |
1851 #endif // defined TARGET_ARCH_MIPS | |
OLD | NEW |