Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(121)

Side by Side Diff: runtime/vm/flow_graph_compiler_mips.cc

Issue 2858623002: Remove MIPS support (Closed)
Patch Set: Rebase Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file.
4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
6 #if defined(TARGET_ARCH_MIPS)
7
8 #include "vm/flow_graph_compiler.h"
9
10 #include "vm/ast_printer.h"
11 #include "vm/compiler.h"
12 #include "vm/dart_entry.h"
13 #include "vm/deopt_instructions.h"
14 #include "vm/il_printer.h"
15 #include "vm/instructions.h"
16 #include "vm/locations.h"
17 #include "vm/object_store.h"
18 #include "vm/parser.h"
19 #include "vm/stack_frame.h"
20 #include "vm/stub_code.h"
21 #include "vm/symbols.h"
22
23 namespace dart {
24
25 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
26
27
28 FlowGraphCompiler::~FlowGraphCompiler() {
29 // BlockInfos are zone-allocated, so their destructors are not called.
30 // Verify the labels explicitly here.
31 for (int i = 0; i < block_info_.length(); ++i) {
32 ASSERT(!block_info_[i]->jump_label()->IsLinked());
33 }
34 }
35
36
37 bool FlowGraphCompiler::SupportsUnboxedDoubles() {
38 return true;
39 }
40
41
42 bool FlowGraphCompiler::SupportsUnboxedMints() {
43 return true;
44 }
45
46
47 bool FlowGraphCompiler::SupportsUnboxedSimd128() {
48 return false;
49 }
50
51
52 bool FlowGraphCompiler::SupportsHardwareDivision() {
53 return true;
54 }
55
56
57 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
58 // TODO(johnmccutchan): Investigate possibility on MIPS once
59 // mints are implemented there.
60 return false;
61 }
62
63
64 void FlowGraphCompiler::EnterIntrinsicMode() {
65 ASSERT(!intrinsic_mode());
66 intrinsic_mode_ = true;
67 assembler()->set_constant_pool_allowed(false);
68 }
69
70
71 void FlowGraphCompiler::ExitIntrinsicMode() {
72 ASSERT(intrinsic_mode());
73 intrinsic_mode_ = false;
74 assembler()->set_constant_pool_allowed(true);
75 }
76
77
78 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
79 DeoptInfoBuilder* builder,
80 const Array& deopt_table) {
81 if (deopt_env_ == NULL) {
82 ++builder->current_info_number_;
83 return TypedData::null();
84 }
85
86 intptr_t stack_height = compiler->StackSize();
87 AllocateIncomingParametersRecursive(deopt_env_, &stack_height);
88
89 intptr_t slot_ix = 0;
90 Environment* current = deopt_env_;
91
92 // Emit all kMaterializeObject instructions describing objects to be
93 // materialized on the deoptimization as a prefix to the deoptimization info.
94 EmitMaterializations(deopt_env_, builder);
95
96 // The real frame starts here.
97 builder->MarkFrameStart();
98
99 Zone* zone = compiler->zone();
100
101 builder->AddPp(current->function(), slot_ix++);
102 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
103 builder->AddCallerFp(slot_ix++);
104 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
105
106
107 // Emit all values that are needed for materialization as a part of the
108 // expression stack for the bottom-most frame. This guarantees that GC
109 // will be able to find them during materialization.
110 slot_ix = builder->EmitMaterializationArguments(slot_ix);
111
112 // For the innermost environment, set outgoing arguments and the locals.
113 for (intptr_t i = current->Length() - 1;
114 i >= current->fixed_parameter_count(); i--) {
115 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
116 }
117
118 Environment* previous = current;
119 current = current->outer();
120 while (current != NULL) {
121 builder->AddPp(current->function(), slot_ix++);
122 builder->AddPcMarker(previous->function(), slot_ix++);
123 builder->AddCallerFp(slot_ix++);
124
125 // For any outer environment the deopt id is that of the call instruction
126 // which is recorded in the outer environment.
127 builder->AddReturnAddress(current->function(),
128 Thread::ToDeoptAfter(current->deopt_id()),
129 slot_ix++);
130
131 // The values of outgoing arguments can be changed from the inlined call so
132 // we must read them from the previous environment.
133 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
134 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
135 slot_ix++);
136 }
137
138 // Set the locals, note that outgoing arguments are not in the environment.
139 for (intptr_t i = current->Length() - 1;
140 i >= current->fixed_parameter_count(); i--) {
141 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
142 }
143
144 // Iterate on the outer environment.
145 previous = current;
146 current = current->outer();
147 }
148 // The previous pointer is now the outermost environment.
149 ASSERT(previous != NULL);
150
151 // Set slots for the outermost environment.
152 builder->AddCallerPp(slot_ix++);
153 builder->AddPcMarker(previous->function(), slot_ix++);
154 builder->AddCallerFp(slot_ix++);
155 builder->AddCallerPc(slot_ix++);
156
157 // For the outermost environment, set the incoming arguments.
158 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
159 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
160 }
161
162 return builder->CreateDeoptInfo(deopt_table);
163 }
164
165
166 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
167 intptr_t stub_ix) {
168 // Calls do not need stubs, they share a deoptimization trampoline.
169 ASSERT(reason() != ICData::kDeoptAtCall);
170 Assembler* assembler = compiler->assembler();
171 #define __ assembler->
172 __ Comment("%s", Name());
173 __ Bind(entry_label());
174 if (FLAG_trap_on_deoptimization) {
175 __ break_(0);
176 }
177
178 ASSERT(deopt_env() != NULL);
179 __ Push(CODE_REG);
180 __ BranchLink(*StubCode::Deoptimize_entry());
181 set_pc_offset(assembler->CodeSize());
182 #undef __
183 }
184
185
186 #define __ assembler()->
187
188
189 // Fall through if bool_register contains null.
190 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
191 Label* is_true,
192 Label* is_false) {
193 __ Comment("BoolToJump");
194 Label fall_through;
195 __ BranchEqual(bool_register, Object::null_object(), &fall_through);
196 __ BranchEqual(bool_register, Bool::True(), is_true);
197 __ b(is_false);
198 __ Bind(&fall_through);
199 }
200
201
202 // A0: instance (must be preserved).
203 // A1: instantiator type arguments (if used).
204 // A2: function type arguments (if used).
205 // Clobbers A3.
206 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
207 TypeTestStubKind test_kind,
208 Register instance_reg,
209 Register instantiator_type_arguments_reg,
210 Register function_type_arguments_reg,
211 Register temp_reg,
212 Label* is_instance_lbl,
213 Label* is_not_instance_lbl) {
214 __ Comment("CallSubtypeTestStub");
215 ASSERT(instance_reg == A0);
216 ASSERT(temp_reg == kNoRegister); // Unused on MIPS.
217 const SubtypeTestCache& type_test_cache =
218 SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New());
219 __ LoadUniqueObject(A3, type_test_cache);
220 if (test_kind == kTestTypeOneArg) {
221 ASSERT(instantiator_type_arguments_reg == kNoRegister);
222 ASSERT(function_type_arguments_reg == kNoRegister);
223 __ BranchLink(*StubCode::Subtype1TestCache_entry());
224 } else if (test_kind == kTestTypeTwoArgs) {
225 ASSERT(instantiator_type_arguments_reg == kNoRegister);
226 ASSERT(function_type_arguments_reg == kNoRegister);
227 __ BranchLink(*StubCode::Subtype2TestCache_entry());
228 } else if (test_kind == kTestTypeFourArgs) {
229 ASSERT(instantiator_type_arguments_reg == A1);
230 ASSERT(function_type_arguments_reg == A2);
231 __ BranchLink(*StubCode::Subtype4TestCache_entry());
232 } else {
233 UNREACHABLE();
234 }
235 // Result is in V0: null -> not found, otherwise Bool::True or Bool::False.
236 GenerateBoolToJump(V0, is_instance_lbl, is_not_instance_lbl);
237 return type_test_cache.raw();
238 }
239
240
241 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if
242 // type test is conclusive, otherwise fallthrough if a type test could not
243 // be completed.
244 // A0: instance being type checked (preserved).
245 // Clobbers T0.
246 RawSubtypeTestCache*
247 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
248 TokenPosition token_pos,
249 const AbstractType& type,
250 Label* is_instance_lbl,
251 Label* is_not_instance_lbl) {
252 __ Comment("InstantiatedTypeWithArgumentsTest");
253 ASSERT(type.IsInstantiated());
254 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
255 ASSERT(type.IsFunctionType() || (type_class.NumTypeArguments() > 0));
256 const Register kInstanceReg = A0;
257 Error& bound_error = Error::Handle(zone());
258 const Type& int_type = Type::Handle(zone(), Type::IntType());
259 const bool smi_is_ok =
260 int_type.IsSubtypeOf(type, &bound_error, NULL, Heap::kOld);
261 // Malformed type should have been handled at graph construction time.
262 ASSERT(smi_is_ok || bound_error.IsNull());
263 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask));
264 if (smi_is_ok) {
265 __ beq(CMPRES1, ZR, is_instance_lbl);
266 } else {
267 __ beq(CMPRES1, ZR, is_not_instance_lbl);
268 }
269 // A function type test requires checking the function signature.
270 if (!type.IsFunctionType()) {
271 const intptr_t num_type_args = type_class.NumTypeArguments();
272 const intptr_t num_type_params = type_class.NumTypeParameters();
273 const intptr_t from_index = num_type_args - num_type_params;
274 const TypeArguments& type_arguments =
275 TypeArguments::ZoneHandle(zone(), type.arguments());
276 const bool is_raw_type = type_arguments.IsNull() ||
277 type_arguments.IsRaw(from_index, num_type_params);
278 if (is_raw_type) {
279 const Register kClassIdReg = T0;
280 // dynamic type argument, check only classes.
281 __ LoadClassId(kClassIdReg, kInstanceReg);
282 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl);
283 // List is a very common case.
284 if (IsListClass(type_class)) {
285 GenerateListTypeCheck(kClassIdReg, is_instance_lbl);
286 }
287 return GenerateSubtype1TestCacheLookup(
288 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
289 }
290 // If one type argument only, check if type argument is Object or dynamic.
291 if (type_arguments.Length() == 1) {
292 const AbstractType& tp_argument =
293 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0));
294 ASSERT(!tp_argument.IsMalformed());
295 if (tp_argument.IsType()) {
296 ASSERT(tp_argument.HasResolvedTypeClass());
297 // Check if type argument is dynamic or Object.
298 const Type& object_type = Type::Handle(zone(), Type::ObjectType());
299 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
300 // Instance class test only necessary.
301 return GenerateSubtype1TestCacheLookup(
302 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
303 }
304 }
305 }
306 }
307 // Regular subtype test cache involving instance's type arguments.
308 const Register kInstantiatorTypeArgumentsReg = kNoRegister;
309 const Register kFunctionTypeArgumentsReg = kNoRegister;
310 const Register kTempReg = kNoRegister;
311 // A0: instance (must be preserved).
312 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg,
313 kInstantiatorTypeArgumentsReg,
314 kFunctionTypeArgumentsReg, kTempReg,
315 is_instance_lbl, is_not_instance_lbl);
316 }
317
318
319 void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
320 const GrowableArray<intptr_t>& class_ids,
321 Label* is_equal_lbl,
322 Label* is_not_equal_lbl) {
323 __ Comment("CheckClassIds");
324 for (intptr_t i = 0; i < class_ids.length(); i++) {
325 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl);
326 }
327 __ b(is_not_equal_lbl);
328 }
329
330
331 // Testing against an instantiated type with no arguments, without
332 // SubtypeTestCache.
333 // A0: instance being type checked (preserved).
334 // Clobbers: T0, T1, T2
335 // Returns true if there is a fallthrough.
336 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
337 TokenPosition token_pos,
338 const AbstractType& type,
339 Label* is_instance_lbl,
340 Label* is_not_instance_lbl) {
341 __ Comment("InstantiatedTypeNoArgumentsTest");
342 ASSERT(type.IsInstantiated());
343 if (type.IsFunctionType()) {
344 // Fallthrough.
345 return true;
346 }
347 const Class& type_class = Class::Handle(zone(), type.type_class());
348 ASSERT(type_class.NumTypeArguments() == 0);
349
350 const Register kInstanceReg = A0;
351 __ andi(T0, A0, Immediate(kSmiTagMask));
352 // If instance is Smi, check directly.
353 const Class& smi_class = Class::Handle(zone(), Smi::Class());
354 if (smi_class.IsSubtypeOf(Object::null_type_arguments(), type_class,
355 Object::null_type_arguments(), NULL, NULL,
356 Heap::kOld)) {
357 __ beq(T0, ZR, is_instance_lbl);
358 } else {
359 __ beq(T0, ZR, is_not_instance_lbl);
360 }
361 const Register kClassIdReg = T0;
362 __ LoadClassId(kClassIdReg, kInstanceReg);
363 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted
364 // interfaces.
365 // Bool interface can be implemented only by core class Bool.
366 if (type.IsBoolType()) {
367 __ BranchEqual(kClassIdReg, Immediate(kBoolCid), is_instance_lbl);
368 __ b(is_not_instance_lbl);
369 return false;
370 }
371 // Custom checking for numbers (Smi, Mint, Bigint and Double).
372 // Note that instance is not Smi (checked above).
373 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) {
374 GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl,
375 is_not_instance_lbl);
376 return false;
377 }
378 if (type.IsStringType()) {
379 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl);
380 return false;
381 }
382 if (type.IsDartFunctionType()) {
383 // Check if instance is a closure.
384 __ BranchEqual(kClassIdReg, Immediate(kClosureCid), is_instance_lbl);
385 return true; // Fall through
386 }
387 // Compare if the classes are equal.
388 if (!type_class.is_abstract()) {
389 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl);
390 }
391 // Otherwise fallthrough.
392 return true;
393 }
394
395
396 // Uses SubtypeTestCache to store instance class and result.
397 // A0: instance to test.
398 // Clobbers A1-A3, T0-T3.
399 // Immediate class test already done.
400 // TODO(srdjan): Implement a quicker subtype check, as type test
401 // arrays can grow too high, but they may be useful when optimizing
402 // code (type-feedback).
403 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
404 TokenPosition token_pos,
405 const Class& type_class,
406 Label* is_instance_lbl,
407 Label* is_not_instance_lbl) {
408 __ Comment("Subtype1TestCacheLookup");
409 const Register kInstanceReg = A0;
410 __ LoadClass(T0, kInstanceReg);
411 // T0: instance class.
412 // Check immediate superclass equality.
413 __ lw(T0, FieldAddress(T0, Class::super_type_offset()));
414 __ lw(T0, FieldAddress(T0, Type::type_class_id_offset()));
415 __ BranchEqual(T0, Immediate(Smi::RawValue(type_class.id())),
416 is_instance_lbl);
417
418 const Register kInstantiatorTypeArgumentsReg = kNoRegister;
419 const Register kFunctionTypeArgumentsReg = kNoRegister;
420 const Register kTempReg = kNoRegister;
421 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg,
422 kInstantiatorTypeArgumentsReg,
423 kFunctionTypeArgumentsReg, kTempReg,
424 is_instance_lbl, is_not_instance_lbl);
425 }
426
427
428 // Generates inlined check if 'type' is a type parameter or type itself
429 // A0: instance (preserved).
430 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
431 TokenPosition token_pos,
432 const AbstractType& type,
433 Label* is_instance_lbl,
434 Label* is_not_instance_lbl) {
435 __ Comment("UninstantiatedTypeTest");
436 ASSERT(!type.IsInstantiated());
437 // Skip check if destination is a dynamic type.
438 if (type.IsTypeParameter()) {
439 const TypeParameter& type_param = TypeParameter::Cast(type);
440 __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args.
441 __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args.
442 // A1: instantiator type arguments.
443 // A2: function type arguments.
444 const Register kTypeArgumentsReg =
445 type_param.IsClassTypeParameter() ? A1 : A2;
446 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
447 __ LoadObject(T7, Object::null_object());
448 __ beq(kTypeArgumentsReg, T7, is_instance_lbl);
449 __ lw(T2, FieldAddress(kTypeArgumentsReg,
450 TypeArguments::type_at_offset(type_param.index())));
451 // T2: concrete type of type.
452 // Check if type argument is dynamic.
453 __ BranchEqual(T2, Object::dynamic_type(), is_instance_lbl);
454 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::ObjectType()),
455 is_instance_lbl);
456 // TODO(regis): Optimize void type as well once allowed as type argument.
457
458 // For Smi check quickly against int and num interfaces.
459 Label not_smi;
460 __ andi(CMPRES1, A0, Immediate(kSmiTagMask));
461 __ bne(CMPRES1, ZR, &not_smi); // Value is Smi?
462 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::IntType()),
463 is_instance_lbl);
464 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::Number()),
465 is_instance_lbl);
466 // Smi must be handled in runtime.
467 Label fall_through;
468 __ b(&fall_through);
469
470 __ Bind(&not_smi);
471 // A0: instance.
472 // A1: instantiator type arguments.
473 // A2: function type arguments.
474 const Register kInstanceReg = A0;
475 const Register kInstantiatorTypeArgumentsReg = A1;
476 const Register kFunctionTypeArgumentsReg = A2;
477 const Register kTempReg = kNoRegister;
478 const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
479 zone(), GenerateCallSubtypeTestStub(
480 kTestTypeFourArgs, kInstanceReg,
481 kInstantiatorTypeArgumentsReg, kFunctionTypeArgumentsReg,
482 kTempReg, is_instance_lbl, is_not_instance_lbl));
483 __ Bind(&fall_through);
484 return type_test_cache.raw();
485 }
486 if (type.IsType()) {
487 const Register kInstanceReg = A0;
488 const Register kInstantiatorTypeArgumentsReg = A1;
489 const Register kFunctionTypeArgumentsReg = A2;
490 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask));
491 __ beq(CMPRES1, ZR, is_not_instance_lbl); // Is instance Smi?
492 __ lw(kInstantiatorTypeArgumentsReg, Address(SP, 1 * kWordSize));
493 __ lw(kFunctionTypeArgumentsReg, Address(SP, 0 * kWordSize));
494 // Uninstantiated type class is known at compile time, but the type
495 // arguments are determined at runtime by the instantiator.
496 const Register kTempReg = kNoRegister;
497 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg,
498 kInstantiatorTypeArgumentsReg,
499 kFunctionTypeArgumentsReg, kTempReg,
500 is_instance_lbl, is_not_instance_lbl);
501 }
502 return SubtypeTestCache::null();
503 }
504
505
506 // Inputs:
507 // - A0: instance being type checked (preserved).
508 // - A1: optional instantiator type arguments (preserved).
509 // - A2: optional function type arguments (preserved).
510 // Returns:
511 // - preserved instance in A0, optional instantiator type arguments in A1, and
512 // optional function type arguments in A2.
513 // Clobbers: T0, T1, T2
514 // Note that this inlined code must be followed by the runtime_call code, as it
515 // may fall through to it. Otherwise, this inline code will jump to the label
516 // is_instance or to the label is_not_instance.
517 RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
518 TokenPosition token_pos,
519 const AbstractType& type,
520 Label* is_instance_lbl,
521 Label* is_not_instance_lbl) {
522 __ Comment("InlineInstanceof");
523 if (type.IsInstantiated()) {
524 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
525 // A class equality check is only applicable with a dst type (not a
526 // function type) of a non-parameterized class or with a raw dst type of
527 // a parameterized class.
528 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) {
529 return GenerateInstantiatedTypeWithArgumentsTest(
530 token_pos, type, is_instance_lbl, is_not_instance_lbl);
531 // Fall through to runtime call.
532 }
533 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
534 token_pos, type, is_instance_lbl, is_not_instance_lbl);
535 if (has_fall_through) {
536 // If test non-conclusive so far, try the inlined type-test cache.
537 // 'type' is known at compile time.
538 return GenerateSubtype1TestCacheLookup(
539 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
540 } else {
541 return SubtypeTestCache::null();
542 }
543 }
544 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl,
545 is_not_instance_lbl);
546 }
547
548
549 // If instanceof type test cannot be performed successfully at compile time and
550 // therefore eliminated, optimize it by adding inlined tests for:
551 // - NULL -> return type == Null (type is not Object or dynamic).
552 // - Smi -> compile time subtype check (only if dst class is not parameterized).
553 // - Class equality (only if class is not parameterized).
554 // Inputs:
555 // - A0: object.
556 // - A1: instantiator type arguments or raw_null.
557 // - A2: function type arguments or raw_null.
558 // Returns:
559 // - true or false in V0.
560 void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
561 intptr_t deopt_id,
562 const AbstractType& type,
563 LocationSummary* locs) {
564 ASSERT(type.IsFinalized() && !type.IsMalformed() && !type.IsMalbounded());
565 ASSERT(!type.IsObjectType() && !type.IsDynamicType() && !type.IsVoidType());
566
567 // Preserve instantiator type arguments (A1) and function type arguments (A2).
568 __ addiu(SP, SP, Immediate(-2 * kWordSize));
569 __ sw(A1, Address(SP, 1 * kWordSize));
570 __ sw(A2, Address(SP, 0 * kWordSize));
571
572 Label is_instance, is_not_instance;
573 // If type is instantiated and non-parameterized, we can inline code
574 // checking whether the tested instance is a Smi.
575 if (type.IsInstantiated()) {
576 // A null object is only an instance of Null, Object, and dynamic.
577 // Object and dynamic have already been checked above (if the type is
578 // instantiated). So we can return false here if the instance is null,
579 // unless the type is Null (and if the type is instantiated).
580 // We can only inline this null check if the type is instantiated at compile
581 // time, since an uninstantiated type at compile time could be Null, Object,
582 // or dynamic at run time.
583 __ BranchEqual(A0, Object::null_object(),
584 type.IsNullType() ? &is_instance : &is_not_instance);
585 }
586
587 // Generate inline instanceof test.
588 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
589 test_cache =
590 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
591
592 // test_cache is null if there is no fall-through.
593 Label done;
594 if (!test_cache.IsNull()) {
595 // Generate runtime call.
596 __ lw(A1, Address(SP, 1 * kWordSize)); // Get instantiator type args.
597 __ lw(A2, Address(SP, 0 * kWordSize)); // Get function type args.
598 __ addiu(SP, SP, Immediate(-6 * kWordSize));
599 __ LoadObject(TMP, Object::null_object());
600 __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result.
601 __ sw(A0, Address(SP, 4 * kWordSize)); // Push the instance.
602 __ LoadObject(TMP, type);
603 __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type.
604 __ sw(A1, Address(SP, 2 * kWordSize)); // Push instantiator type args.
605 __ sw(A2, Address(SP, 1 * kWordSize)); // Push function type args.
606 __ LoadUniqueObject(A0, test_cache);
607 __ sw(A0, Address(SP, 0 * kWordSize));
608 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs);
609 // Pop the parameters supplied to the runtime entry. The result of the
610 // instanceof runtime call will be left as the result of the operation.
611 __ lw(V0, Address(SP, 5 * kWordSize));
612 __ b(&done);
613 __ delay_slot()->addiu(SP, SP, Immediate(6 * kWordSize));
614 }
615 __ Bind(&is_not_instance);
616 __ LoadObject(V0, Bool::Get(false));
617 __ b(&done);
618
619 __ Bind(&is_instance);
620 __ LoadObject(V0, Bool::Get(true));
621 __ Bind(&done);
622 // Remove instantiator type arguments and function type arguments.
623 __ Drop(2);
624 }
625
626
627 // Optimize assignable type check by adding inlined tests for:
628 // - NULL -> return NULL.
629 // - Smi -> compile time subtype check (only if dst class is not parameterized).
630 // - Class equality (only if class is not parameterized).
631 // Inputs:
632 // - A0: instance being type checked.
633 // - A1: instantiator type arguments or raw_null.
634 // - A2: function type arguments or raw_null.
635 // Returns:
636 // - object in A0 for successful assignable check (or throws TypeError).
637 // Clobbers: T0, T1, T2
638 // Performance notes: positive checks must be quick, negative checks can be slow
639 // as they throw an exception.
640 void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
641 intptr_t deopt_id,
642 const AbstractType& dst_type,
643 const String& dst_name,
644 LocationSummary* locs) {
645 __ Comment("AssertAssignable");
646 ASSERT(!token_pos.IsClassifying());
647 ASSERT(!dst_type.IsNull());
648 ASSERT(dst_type.IsFinalized());
649 // Assignable check is skipped in FlowGraphBuilder, not here.
650 ASSERT(dst_type.IsMalformedOrMalbounded() ||
651 (!dst_type.IsDynamicType() && !dst_type.IsObjectType() &&
652 !dst_type.IsVoidType()));
653
654 // Preserve instantiator type arguments (A1) and function type arguments (A2).
655 __ addiu(SP, SP, Immediate(-2 * kWordSize));
656 __ sw(A1, Address(SP, 1 * kWordSize));
657 __ sw(A2, Address(SP, 0 * kWordSize));
658
659 // A null object is always assignable and is returned as result.
660 Label is_assignable, runtime_call;
661
662 __ BranchEqual(A0, Object::null_object(), &is_assignable);
663
664 // Generate throw new TypeError() if the type is malformed or malbounded.
665 if (dst_type.IsMalformedOrMalbounded()) {
666 __ addiu(SP, SP, Immediate(-4 * kWordSize));
667 __ LoadObject(TMP, Object::null_object());
668 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result.
669 __ sw(A0, Address(SP, 2 * kWordSize)); // Push the source object.
670 __ LoadObject(TMP, dst_name);
671 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the destination name.
672 __ LoadObject(TMP, dst_type);
673 __ sw(TMP, Address(SP, 0 * kWordSize)); // Push the destination type.
674
675 GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3,
676 locs);
677 // We should never return here.
678 __ break_(0);
679
680 __ Bind(&is_assignable); // For a null object.
681 __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args.
682 __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args.
683 __ addiu(SP, SP, Immediate(2 * kWordSize));
684 return;
685 }
686
687 // Generate inline type check, linking to runtime call if not assignable.
688 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
689 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable,
690 &runtime_call);
691
692 __ Bind(&runtime_call);
693 __ lw(A1, Address(SP, 1 * kWordSize)); // Load instantiator type args.
694 __ lw(A2, Address(SP, 0 * kWordSize)); // Load function type args.
695
696 __ addiu(SP, SP, Immediate(-7 * kWordSize));
697 __ LoadObject(TMP, Object::null_object());
698 __ sw(TMP, Address(SP, 6 * kWordSize)); // Make room for the result.
699 __ sw(A0, Address(SP, 5 * kWordSize)); // Push the source object.
700 __ LoadObject(TMP, dst_type);
701 __ sw(TMP, Address(SP, 4 * kWordSize)); // Push the type of the destination.
702 __ sw(A1, Address(SP, 3 * kWordSize)); // Push instantiator type args.
703 __ sw(A2, Address(SP, 2 * kWordSize)); // Push function type args.
704 __ LoadObject(TMP, dst_name);
705 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the name of the destination.
706 __ LoadUniqueObject(T0, test_cache);
707 __ sw(T0, Address(SP, 0 * kWordSize));
708
709 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs);
710 // Pop the parameters supplied to the runtime entry. The result of the
711 // type check runtime call is the checked value.
712 __ lw(A0, Address(SP, 6 * kWordSize));
713 __ addiu(SP, SP, Immediate(7 * kWordSize));
714
715 __ Bind(&is_assignable);
716 __ lw(A1, Address(SP, 1 * kWordSize)); // Restore instantiator type args.
717 __ lw(A2, Address(SP, 0 * kWordSize)); // Restore function type args.
718 __ addiu(SP, SP, Immediate(2 * kWordSize));
719 }
720
721
722 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
723 if (is_optimizing()) return;
724 Definition* defn = instr->AsDefinition();
725 if ((defn != NULL) && defn->HasTemp()) {
726 __ Push(defn->locs()->out(0).reg());
727 }
728 }
729
730
731 // Input parameters:
732 // S4: arguments descriptor array.
733 void FlowGraphCompiler::CopyParameters() {
734 __ Comment("Copy parameters");
735 const Function& function = parsed_function().function();
736 LocalScope* scope = parsed_function().node_sequence()->scope();
737 const int num_fixed_params = function.num_fixed_parameters();
738 const int num_opt_pos_params = function.NumOptionalPositionalParameters();
739 const int num_opt_named_params = function.NumOptionalNamedParameters();
740 const int num_params =
741 num_fixed_params + num_opt_pos_params + num_opt_named_params;
742 ASSERT(function.NumParameters() == num_params);
743 ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
744
745 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
746 // where num_pos_args is the number of positional arguments passed in.
747 const int min_num_pos_args = num_fixed_params;
748 const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
749
750 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
751 // Check that min_num_pos_args <= num_pos_args.
752 Label wrong_num_arguments;
753 __ BranchSignedLess(T2, Immediate(Smi::RawValue(min_num_pos_args)),
754 &wrong_num_arguments);
755
756 // Check that num_pos_args <= max_num_pos_args.
757 __ BranchSignedGreater(T2, Immediate(Smi::RawValue(max_num_pos_args)),
758 &wrong_num_arguments);
759
760 // Copy positional arguments.
761 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
762 // to fp[kFirstLocalSlotFromFp - i].
763
764 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
765 // Since T1 and T2 are Smi, use sll 1 instead of sll 2.
766 // Let T1 point to the last passed positional argument, i.e. to
767 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
768 __ subu(T1, T1, T2);
769 __ sll(T1, T1, 1);
770 __ addu(T1, FP, T1);
771 __ AddImmediate(T1, (kParamEndSlotFromFp + 1) * kWordSize);
772
773 // Let T0 point to the last copied positional argument, i.e. to
774 // fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
775 __ AddImmediate(T0, FP, (kFirstLocalSlotFromFp + 1) * kWordSize);
776 __ sll(T2, T2, 1); // T2 is a Smi.
777
778 __ Comment("Argument Copy Loop");
779 Label loop, loop_exit;
780 __ blez(T2, &loop_exit);
781 __ delay_slot()->subu(T0, T0, T2);
782 __ Bind(&loop);
783 __ addu(T4, T1, T2);
784 __ lw(T3, Address(T4, -kWordSize));
785 __ addiu(T2, T2, Immediate(-kWordSize));
786 __ addu(T5, T0, T2);
787 __ bgtz(T2, &loop);
788 __ delay_slot()->sw(T3, Address(T5));
789 __ Bind(&loop_exit);
790
791 // Copy or initialize optional named arguments.
792 Label all_arguments_processed;
793 #ifdef DEBUG
794 const bool check_correct_named_args = true;
795 #else
796 const bool check_correct_named_args = function.IsClosureFunction();
797 #endif
798 if (num_opt_named_params > 0) {
799 __ Comment("There are named parameters");
800 // Start by alphabetically sorting the names of the optional parameters.
801 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
802 int* opt_param_position = new int[num_opt_named_params];
803 for (int pos = num_fixed_params; pos < num_params; pos++) {
804 LocalVariable* parameter = scope->VariableAt(pos);
805 const String& opt_param_name = parameter->name();
806 int i = pos - num_fixed_params;
807 while (--i >= 0) {
808 LocalVariable* param_i = opt_param[i];
809 const intptr_t result = opt_param_name.CompareTo(param_i->name());
810 ASSERT(result != 0);
811 if (result > 0) break;
812 opt_param[i + 1] = opt_param[i];
813 opt_param_position[i + 1] = opt_param_position[i];
814 }
815 opt_param[i + 1] = parameter;
816 opt_param_position[i + 1] = pos;
817 }
818 // Generate code handling each optional parameter in alphabetical order.
819 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
820 // Let T1 point to the first passed argument, i.e. to
821 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (T1) is Smi.
822 __ sll(T3, T1, 1);
823 __ addu(T1, FP, T3);
824 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize);
825 // Let T0 point to the entry of the first named argument.
826 __ AddImmediate(T0, S4, ArgumentsDescriptor::first_named_entry_offset() -
827 kHeapObjectTag);
828 for (int i = 0; i < num_opt_named_params; i++) {
829 Label load_default_value, assign_optional_parameter;
830 const int param_pos = opt_param_position[i];
831 // Check if this named parameter was passed in.
832 // Load T3 with the name of the argument.
833 __ lw(T3, Address(T0, ArgumentsDescriptor::name_offset()));
834 ASSERT(opt_param[i]->name().IsSymbol());
835 __ BranchNotEqual(T3, opt_param[i]->name(), &load_default_value);
836
837 // Load T3 with passed-in argument at provided arg_pos, i.e. at
838 // fp[kParamEndSlotFromFp + num_args - arg_pos].
839 __ lw(T3, Address(T0, ArgumentsDescriptor::position_offset()));
840 // T3 is arg_pos as Smi.
841 // Point to next named entry.
842 __ AddImmediate(T0, ArgumentsDescriptor::named_entry_size());
843 __ subu(T3, ZR, T3);
844 __ sll(T3, T3, 1);
845 __ addu(T3, T1, T3);
846 __ b(&assign_optional_parameter);
847 __ delay_slot()->lw(T3, Address(T3));
848
849 __ Bind(&load_default_value);
850 // Load T3 with default argument.
851 const Instance& value = parsed_function().DefaultParameterValueAt(
852 param_pos - num_fixed_params);
853 __ LoadObject(T3, value);
854 __ Bind(&assign_optional_parameter);
855 // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos].
856 // We do not use the final allocation index of the variable here, i.e.
857 // scope->VariableAt(i)->index(), because captured variables still need
858 // to be copied to the context that is not yet allocated.
859 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
860 __ sw(T3, Address(FP, computed_param_pos * kWordSize));
861 }
862 delete[] opt_param;
863 delete[] opt_param_position;
864 if (check_correct_named_args) {
865 // Check that T0 now points to the null terminator in the arguments
866 // descriptor.
867 __ lw(T3, Address(T0));
868 __ BranchEqual(T3, Object::null_object(), &all_arguments_processed);
869 }
870 } else {
871 ASSERT(num_opt_pos_params > 0);
872 __ Comment("There are optional positional parameters");
873 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
874 __ SmiUntag(T2);
875 for (int i = 0; i < num_opt_pos_params; i++) {
876 Label next_parameter;
877 // Handle this optional positional parameter only if k or fewer positional
878 // arguments have been passed, where k is param_pos, the position of this
879 // optional parameter in the formal parameter list.
880 const int param_pos = num_fixed_params + i;
881 __ BranchSignedGreater(T2, Immediate(param_pos), &next_parameter);
882 // Load T3 with default argument.
883 const Object& value = parsed_function().DefaultParameterValueAt(i);
884 __ LoadObject(T3, value);
885 // Assign T3 to fp[kFirstLocalSlotFromFp - param_pos].
886 // We do not use the final allocation index of the variable here, i.e.
887 // scope->VariableAt(i)->index(), because captured variables still need
888 // to be copied to the context that is not yet allocated.
889 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
890 __ sw(T3, Address(FP, computed_param_pos * kWordSize));
891 __ Bind(&next_parameter);
892 }
893 if (check_correct_named_args) {
894 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
895 __ SmiUntag(T1);
896 // Check that T2 equals T1, i.e. no named arguments passed.
897 __ beq(T2, T1, &all_arguments_processed);
898 }
899 }
900
901 __ Bind(&wrong_num_arguments);
902 if (function.IsClosureFunction()) {
903 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
904 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
905 // The noSuchMethod call may return to the caller, but not here.
906 } else if (check_correct_named_args) {
907 __ Stop("Wrong arguments");
908 }
909
910 __ Bind(&all_arguments_processed);
911 // Nullify originally passed arguments only after they have been copied and
912 // checked, otherwise noSuchMethod would not see their original values.
913 // This step can be skipped in case we decide that formal parameters are
914 // implicitly final, since garbage collecting the unmodified value is not
915 // an issue anymore.
916
917 // S4 : arguments descriptor array.
918 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
919 __ sll(T2, T2, 1); // T2 is a Smi.
920
921 __ Comment("Null arguments loop");
922 Label null_args_loop, null_args_loop_exit;
923 __ blez(T2, &null_args_loop_exit);
924 __ delay_slot()->addiu(T1, FP,
925 Immediate((kParamEndSlotFromFp + 1) * kWordSize));
926 __ Bind(&null_args_loop);
927 __ addiu(T2, T2, Immediate(-kWordSize));
928 __ addu(T3, T1, T2);
929 __ LoadObject(T5, Object::null_object());
930 __ bgtz(T2, &null_args_loop);
931 __ delay_slot()->sw(T5, Address(T3));
932 __ Bind(&null_args_loop_exit);
933 }
934
935
936 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
937 // RA: return address.
938 // SP: receiver.
939 // Sequence node has one return node, its input is load field node.
940 __ Comment("Inlined Getter");
941 __ lw(V0, Address(SP, 0 * kWordSize));
942 __ LoadFieldFromOffset(V0, V0, offset);
943 __ Ret();
944 }
945
946
947 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
948 // RA: return address.
949 // SP+1: receiver.
950 // SP+0: value.
951 // Sequence node has one store node and one return NULL node.
952 __ Comment("Inlined Setter");
953 __ lw(T0, Address(SP, 1 * kWordSize)); // Receiver.
954 __ lw(T1, Address(SP, 0 * kWordSize)); // Value.
955 __ StoreIntoObjectOffset(T0, offset, T1);
956 __ LoadObject(V0, Object::null_object());
957 __ Ret();
958 }
959
960
961 static const Register new_pp = T7;
962
963
964 void FlowGraphCompiler::EmitFrameEntry() {
965 const Function& function = parsed_function().function();
966 if (CanOptimizeFunction() && function.IsOptimizable() &&
967 (!is_optimizing() || may_reoptimize())) {
968 __ Comment("Invocation Count Check");
969 const Register function_reg = T0;
970
971 // Temporarily setup pool pointer for this dart function.
972 __ LoadPoolPointer(new_pp);
973 // Load function object from object pool.
974 __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
975
976 __ lw(T1, FieldAddress(function_reg, Function::usage_counter_offset()));
977 // Reoptimization of an optimized function is triggered by counting in
978 // IC stubs, but not at the entry of the function.
979 if (!is_optimizing()) {
980 __ addiu(T1, T1, Immediate(1));
981 __ sw(T1, FieldAddress(function_reg, Function::usage_counter_offset()));
982 }
983
984 // Skip Branch if T1 is less than the threshold.
985 Label dont_branch;
986 __ BranchSignedLess(T1, Immediate(GetOptimizationThreshold()),
987 &dont_branch);
988
989 ASSERT(function_reg == T0);
990 __ Branch(*StubCode::OptimizeFunction_entry(), new_pp);
991
992 __ Bind(&dont_branch);
993 }
994 __ Comment("Enter frame");
995 if (flow_graph().IsCompiledForOsr()) {
996 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() -
997 flow_graph().num_copied_params();
998 ASSERT(extra_slots >= 0);
999 __ EnterOsrFrame(extra_slots * kWordSize);
1000 } else {
1001 ASSERT(StackSize() >= 0);
1002 __ EnterDartFrame(StackSize() * kWordSize);
1003 }
1004 }
1005
1006
1007 // Input parameters:
1008 // RA: return address.
1009 // SP: address of last argument.
1010 // FP: caller's frame pointer.
1011 // PP: caller's pool pointer.
1012 // S5: ic-data.
1013 // S4: arguments descriptor array.
1014 void FlowGraphCompiler::CompileGraph() {
1015 InitCompiler();
1016 const Function& function = parsed_function().function();
1017
1018 #ifdef DART_PRECOMPILER
1019 if (function.IsDynamicFunction()) {
1020 __ MonomorphicCheckedEntry();
1021 }
1022 #endif // DART_PRECOMPILER
1023
1024 if (TryIntrinsify()) {
1025 // Skip regular code generation.
1026 return;
1027 }
1028
1029 EmitFrameEntry();
1030 ASSERT(assembler()->constant_pool_allowed());
1031
1032 const int num_fixed_params = function.num_fixed_parameters();
1033 const int num_copied_params = parsed_function().num_copied_params();
1034 const int num_locals = parsed_function().num_stack_locals();
1035
1036 // We check the number of passed arguments when we have to copy them due to
1037 // the presence of optional parameters.
1038 // No such checking code is generated if only fixed parameters are declared,
1039 // unless we are in debug mode or unless we are compiling a closure.
1040 if (num_copied_params == 0) {
1041 const bool check_arguments =
1042 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr();
1043 if (check_arguments) {
1044 __ Comment("Check argument count");
1045 // Check that exactly num_fixed arguments are passed in.
1046 Label correct_num_arguments, wrong_num_arguments;
1047 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
1048 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)),
1049 &wrong_num_arguments);
1050
1051 __ lw(T1,
1052 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
1053 __ beq(T0, T1, &correct_num_arguments);
1054 __ Bind(&wrong_num_arguments);
1055 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
1056 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
1057 // The noSuchMethod call may return to the caller, but not here.
1058 __ Bind(&correct_num_arguments);
1059 }
1060 } else if (!flow_graph().IsCompiledForOsr()) {
1061 CopyParameters();
1062 }
1063
1064 if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) {
1065 // Load context from the closure object (first argument).
1066 LocalScope* scope = parsed_function().node_sequence()->scope();
1067 LocalVariable* closure_parameter = scope->VariableAt(0);
1068 __ lw(CTX, Address(FP, closure_parameter->index() * kWordSize));
1069 __ lw(CTX, FieldAddress(CTX, Closure::context_offset()));
1070 }
1071
1072 // In unoptimized code, initialize (non-argument) stack allocated slots to
1073 // null.
1074 if (!is_optimizing()) {
1075 ASSERT(num_locals > 0); // There is always at least context_var.
1076 __ Comment("Initialize spill slots");
1077 const intptr_t slot_base = parsed_function().first_stack_local_index();
1078 const intptr_t context_index =
1079 parsed_function().current_context_var()->index();
1080 if (num_locals > 1) {
1081 __ LoadObject(V0, Object::null_object());
1082 }
1083 for (intptr_t i = 0; i < num_locals; ++i) {
1084 // Subtract index i (locals lie at lower addresses than FP).
1085 if (((slot_base - i) == context_index)) {
1086 if (function.IsClosureFunction()) {
1087 __ sw(CTX, Address(FP, (slot_base - i) * kWordSize));
1088 } else {
1089 __ LoadObject(V1, Object::empty_context());
1090 __ sw(V1, Address(FP, (slot_base - i) * kWordSize));
1091 }
1092 } else {
1093 ASSERT(num_locals > 1);
1094 __ sw(V0, Address(FP, (slot_base - i) * kWordSize));
1095 }
1096 }
1097 }
1098
1099 EndCodeSourceRange(TokenPosition::kDartCodePrologue);
1100 VisitBlocks();
1101
1102 __ break_(0);
1103 GenerateDeferredCode();
1104 }
1105
1106
1107 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
1108 const StubEntry& stub_entry,
1109 RawPcDescriptors::Kind kind,
1110 LocationSummary* locs) {
1111 __ BranchLink(stub_entry);
1112 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
1113 }
1114
1115
1116 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
1117 const StubEntry& stub_entry,
1118 RawPcDescriptors::Kind kind,
1119 LocationSummary* locs) {
1120 __ BranchLinkPatchable(stub_entry);
1121 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
1122 }
1123
1124
1125 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
1126 TokenPosition token_pos,
1127 const StubEntry& stub_entry,
1128 RawPcDescriptors::Kind kind,
1129 LocationSummary* locs) {
1130 __ BranchLinkPatchable(stub_entry);
1131 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs);
1132 // Marks either the continuation point in unoptimized code or the
1133 // deoptimization point in optimized code, after call.
1134 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1135 if (is_optimizing()) {
1136 AddDeoptIndexAtCall(deopt_id_after);
1137 } else {
1138 // Add deoptimization continuation point after the call and before the
1139 // arguments are removed.
1140 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1141 }
1142 }
1143
1144
1145 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
1146 TokenPosition token_pos,
1147 const StubEntry& stub_entry,
1148 RawPcDescriptors::Kind kind,
1149 LocationSummary* locs,
1150 const Function& target) {
1151 // Call sites to the same target can share object pool entries. These
1152 // call sites are never patched for breakpoints: the function is deoptimized
1153 // and the unoptimized code with IC calls for static calls is patched instead.
1154 ASSERT(is_optimizing());
1155 __ BranchLinkWithEquivalence(stub_entry, target);
1156
1157 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs);
1158 // Marks either the continuation point in unoptimized code or the
1159 // deoptimization point in optimized code, after call.
1160 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1161 if (is_optimizing()) {
1162 AddDeoptIndexAtCall(deopt_id_after);
1163 } else {
1164 // Add deoptimization continuation point after the call and before the
1165 // arguments are removed.
1166 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1167 }
1168 AddStaticCallTarget(target);
1169 }
1170
1171
1172 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
1173 intptr_t deopt_id,
1174 const RuntimeEntry& entry,
1175 intptr_t argument_count,
1176 LocationSummary* locs) {
1177 __ CallRuntime(entry, argument_count);
1178 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
1179 if (deopt_id != Thread::kNoDeoptId) {
1180 // Marks either the continuation point in unoptimized code or the
1181 // deoptimization point in optimized code, after call.
1182 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1183 if (is_optimizing()) {
1184 AddDeoptIndexAtCall(deopt_id_after);
1185 } else {
1186 // Add deoptimization continuation point after the call and before the
1187 // arguments are removed.
1188 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1189 }
1190 }
1191 }
1192
1193
1194 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
1195 // We do not check for overflow when incrementing the edge counter. The
1196 // function should normally be optimized long before the counter can
1197 // overflow; and though we do not reset the counters when we optimize or
1198 // deoptimize, there is a bound on the number of
1199 // optimization/deoptimization cycles we will attempt.
1200 ASSERT(!edge_counters_array_.IsNull());
1201 __ Comment("Edge counter");
1202 __ LoadObject(T0, edge_counters_array_);
1203 __ LoadFieldFromOffset(T1, T0, Array::element_offset(edge_id));
1204 __ AddImmediate(T1, T1, Smi::RawValue(1));
1205 __ StoreFieldToOffset(T1, T0, Array::element_offset(edge_id));
1206 }
1207
1208
1209 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
1210 const ICData& ic_data,
1211 intptr_t argument_count,
1212 intptr_t deopt_id,
1213 TokenPosition token_pos,
1214 LocationSummary* locs) {
1215 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1216 // Each ICData propagated from unoptimized to optimized code contains the
1217 // function that corresponds to the Dart function of that IC call. Due
1218 // to inlining in optimized code, that function may not correspond to the
1219 // top-level function (parsed_function().function()) which could be
1220 // reoptimized and which counter needs to be incremented.
1221 // Pass the function explicitly, it is used in IC stub.
1222 __ Comment("OptimizedInstanceCall");
1223 __ LoadObject(T0, parsed_function().function());
1224 __ LoadUniqueObject(S5, ic_data);
1225 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1226 locs);
1227 __ Drop(argument_count);
1228 }
1229
1230
1231 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
1232 const ICData& ic_data,
1233 intptr_t argument_count,
1234 intptr_t deopt_id,
1235 TokenPosition token_pos,
1236 LocationSummary* locs) {
1237 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1238 __ Comment("InstanceCall");
1239 __ LoadUniqueObject(S5, ic_data);
1240 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1241 locs);
1242 __ Comment("InstanceCall return");
1243 __ Drop(argument_count);
1244 }
1245
1246
1247 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1248 const String& name,
1249 const Array& arguments_descriptor,
1250 intptr_t argument_count,
1251 intptr_t deopt_id,
1252 TokenPosition token_pos,
1253 LocationSummary* locs,
1254 intptr_t try_index,
1255 intptr_t slow_path_argument_count) {
1256 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
1257 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
1258 zone(),
1259 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor));
1260
1261 __ Comment("MegamorphicCall");
1262 // Load receiver into T0,
1263 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
1264 __ LoadObject(S5, cache);
1265 __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
1266 __ jalr(T9);
1267
1268 RecordSafepoint(locs, slow_path_argument_count);
1269 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1270 if (FLAG_precompiled_mode) {
1271 // Megamorphic calls may occur in slow path stubs.
1272 // If valid use try_index argument.
1273 if (try_index == CatchClauseNode::kInvalidTryIndex) {
1274 try_index = CurrentTryIndex();
1275 }
1276 AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(),
1277 Thread::kNoDeoptId, token_pos, try_index);
1278 } else if (is_optimizing()) {
1279 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1280 token_pos);
1281 AddDeoptIndexAtCall(deopt_id_after);
1282 } else {
1283 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1284 token_pos);
1285 // Add deoptimization continuation point after the call and before the
1286 // arguments are removed.
1287 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1288 }
1289 EmitCatchEntryState(pending_deoptimization_env_, try_index);
1290 __ Drop(argument_count);
1291 }
1292
1293
1294 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
1295 intptr_t argument_count,
1296 intptr_t deopt_id,
1297 TokenPosition token_pos,
1298 LocationSummary* locs) {
1299 ASSERT(ic_data.NumArgsTested() == 1);
1300 const Code& initial_stub =
1301 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code());
1302
1303 __ Comment("SwitchableCall");
1304 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
1305 __ LoadUniqueObject(CODE_REG, initial_stub);
1306 __ lw(T9, FieldAddress(CODE_REG, Code::checked_entry_point_offset()));
1307 __ LoadUniqueObject(S5, ic_data);
1308 __ jalr(T9);
1309
1310 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, RawPcDescriptors::kOther,
1311 locs);
1312 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1313 if (is_optimizing()) {
1314 AddDeoptIndexAtCall(deopt_id_after);
1315 } else {
1316 // Add deoptimization continuation point after the call and before the
1317 // arguments are removed.
1318 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1319 }
1320 __ Drop(argument_count);
1321 }
1322
1323
1324 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
1325 intptr_t deopt_id,
1326 TokenPosition token_pos,
1327 LocationSummary* locs,
1328 const ICData& ic_data) {
1329 const StubEntry* stub_entry =
1330 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
1331 __ LoadObject(S5, ic_data);
1332 GenerateDartCall(deopt_id, token_pos, *stub_entry,
1333 RawPcDescriptors::kUnoptStaticCall, locs);
1334 __ Drop(argument_count);
1335 }
1336
1337
1338 void FlowGraphCompiler::EmitOptimizedStaticCall(
1339 const Function& function,
1340 const Array& arguments_descriptor,
1341 intptr_t argument_count,
1342 intptr_t deopt_id,
1343 TokenPosition token_pos,
1344 LocationSummary* locs) {
1345 __ Comment("StaticCall");
1346 ASSERT(!function.IsClosureFunction());
1347 if (function.HasOptionalParameters()) {
1348 __ LoadObject(S4, arguments_descriptor);
1349 } else {
1350 __ LoadImmediate(S4, 0); // GC safe smi zero because of stub.
1351 }
1352 // Do not use the code from the function, but let the code be patched so that
1353 // we can record the outgoing edges to other code.
1354 GenerateStaticDartCall(deopt_id, token_pos,
1355 *StubCode::CallStaticFunction_entry(),
1356 RawPcDescriptors::kOther, locs, function);
1357 __ Drop(argument_count);
1358 }
1359
1360
1361 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1362 Register reg,
1363 const Object& obj,
1364 bool needs_number_check,
1365 TokenPosition token_pos) {
1366 __ Comment("EqualityRegConstCompare");
1367 ASSERT(!needs_number_check ||
1368 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()));
1369 if (needs_number_check) {
1370 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint());
1371 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1372 __ sw(reg, Address(SP, 1 * kWordSize));
1373 __ LoadObject(TMP, obj);
1374 __ sw(TMP, Address(SP, 0 * kWordSize));
1375 if (is_optimizing()) {
1376 __ BranchLinkPatchable(
1377 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1378 } else {
1379 __ BranchLinkPatchable(
1380 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1381 }
1382 if (token_pos.IsReal()) {
1383 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1384 token_pos);
1385 }
1386 __ Comment("EqualityRegConstCompare return");
1387 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal).
1388 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'.
1389 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant.
1390 return Condition(CMPRES1, ZR, EQ);
1391 } else {
1392 int16_t imm = 0;
1393 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm);
1394 return Condition(reg, obj_reg, EQ, imm);
1395 }
1396 }
1397
1398
1399 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
1400 Register left,
1401 Register right,
1402 bool needs_number_check,
1403 TokenPosition token_pos) {
1404 __ Comment("EqualityRegRegCompare");
1405 if (needs_number_check) {
1406 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1407 __ sw(left, Address(SP, 1 * kWordSize));
1408 __ sw(right, Address(SP, 0 * kWordSize));
1409 if (is_optimizing()) {
1410 __ BranchLinkPatchable(
1411 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1412 } else {
1413 __ BranchLinkPatchable(
1414 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1415 }
1416 if (token_pos.IsReal()) {
1417 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1418 token_pos);
1419 }
1420 __ Comment("EqualityRegRegCompare return");
1421 // Stub returns result in CMPRES1 (if it is 0, then left and right are
1422 // equal).
1423 __ lw(right, Address(SP, 0 * kWordSize));
1424 __ lw(left, Address(SP, 1 * kWordSize));
1425 __ addiu(SP, SP, Immediate(2 * kWordSize));
1426 return Condition(CMPRES1, ZR, EQ);
1427 } else {
1428 return Condition(left, right, EQ);
1429 }
1430 }
1431
1432
1433 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and
1434 // FlowGraphCompiler::SlowPathEnvironmentFor.
1435 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1436 #if defined(DEBUG)
1437 locs->CheckWritableInputs();
1438 ClobberDeadTempRegisters(locs);
1439 #endif
1440
1441 __ Comment("SaveLiveRegisters");
1442 // TODO(vegorov): consider saving only caller save (volatile) registers.
1443 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
1444 if (fpu_regs_count > 0) {
1445 __ AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
1446 // Store fpu registers with the lowest register number at the lowest
1447 // address.
1448 intptr_t offset = 0;
1449 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
1450 DRegister fpu_reg = static_cast<DRegister>(i);
1451 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
1452 __ StoreDToOffset(fpu_reg, SP, offset);
1453 offset += kFpuRegisterSize;
1454 }
1455 }
1456 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
1457 }
1458
1459 // The order in which the registers are pushed must match the order
1460 // in which the registers are encoded in the safe point's stack map.
1461 const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
1462 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
1463 const int register_count = Utils::CountOneBits(cpu_registers);
1464 if (register_count > 0) {
1465 __ addiu(SP, SP, Immediate(-register_count * kWordSize));
1466 intptr_t offset = register_count * kWordSize;
1467 for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1468 Register r = static_cast<Register>(i);
1469 if (locs->live_registers()->ContainsRegister(r)) {
1470 offset -= kWordSize;
1471 __ sw(r, Address(SP, offset));
1472 }
1473 }
1474 ASSERT(offset == 0);
1475 }
1476 }
1477
1478
1479 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1480 __ Comment("RestoreLiveRegisters");
1481 const intptr_t cpu_registers = locs->live_registers()->cpu_registers();
1482 ASSERT((cpu_registers & ~kAllCpuRegistersList) == 0);
1483 const int register_count = Utils::CountOneBits(cpu_registers);
1484 if (register_count > 0) {
1485 intptr_t offset = register_count * kWordSize;
1486 for (int i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1487 Register r = static_cast<Register>(i);
1488 if (locs->live_registers()->ContainsRegister(r)) {
1489 offset -= kWordSize;
1490 __ lw(r, Address(SP, offset));
1491 }
1492 }
1493 ASSERT(offset == 0);
1494 __ addiu(SP, SP, Immediate(register_count * kWordSize));
1495 }
1496
1497 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
1498 if (fpu_regs_count > 0) {
1499 // Fpu registers have the lowest register number at the lowest address.
1500 intptr_t offset = 0;
1501 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
1502 DRegister fpu_reg = static_cast<DRegister>(i);
1503 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
1504 __ LoadDFromOffset(fpu_reg, SP, offset);
1505 offset += kFpuRegisterSize;
1506 }
1507 }
1508 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
1509 __ AddImmediate(SP, offset);
1510 }
1511 }
1512
1513
1514 #if defined(DEBUG)
1515 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
1516 // Clobber temporaries that have not been manually preserved.
1517 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
1518 Location tmp = locs->temp(i);
1519 // TODO(zerny): clobber non-live temporary FPU registers.
1520 if (tmp.IsRegister() &&
1521 !locs->live_registers()->ContainsRegister(tmp.reg())) {
1522 __ LoadImmediate(tmp.reg(), 0xf7);
1523 }
1524 }
1525 }
1526 #endif
1527
1528
1529 void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
1530 intptr_t argument_count,
1531 const Array& arguments_descriptor) {
1532 __ Comment("EmitTestAndCall");
1533 // Load receiver into T0.
1534 __ LoadFromOffset(T0, SP, (argument_count - 1) * kWordSize);
1535 __ LoadObject(S4, arguments_descriptor);
1536 }
1537
1538
1539 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
1540 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1541 if (if_smi) {
1542 // Jump if receiver is Smi.
1543 __ beq(CMPRES1, ZR, label);
1544 } else {
1545 // Jump if receiver is not Smi.
1546 __ bne(CMPRES1, ZR, label);
1547 }
1548 }
1549
1550
1551 void FlowGraphCompiler::EmitTestAndCallLoadCid() {
1552 __ LoadClassId(T2, T0);
1553 }
1554
1555
1556 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
1557 const CidRange& range,
1558 int bias) {
1559 intptr_t cid_start = range.cid_start;
1560 if (range.IsSingleCid()) {
1561 __ BranchNotEqual(T2, Immediate(cid_start - bias), next_label);
1562 } else {
1563 __ AddImmediate(T2, T2, bias - cid_start);
1564 bias = cid_start;
1565 // TODO(erikcorry): We should use sltiu instead of the temporary TMP if
1566 // the range is small enough.
1567 __ LoadImmediate(TMP, range.Extent());
1568 // Reverse comparison so we get 1 if biased cid > tmp ie cid is out of
1569 // range.
1570 __ sltu(TMP, TMP, T2);
1571 __ bne(TMP, ZR, next_label);
1572 }
1573 return bias;
1574 }
1575
1576
1577 #undef __
1578 #define __ compiler_->assembler()->
1579
1580
1581 void ParallelMoveResolver::EmitMove(int index) {
1582 MoveOperands* move = moves_[index];
1583 const Location source = move->src();
1584 const Location destination = move->dest();
1585 __ Comment("ParallelMoveResolver::EmitMove");
1586
1587 if (source.IsRegister()) {
1588 if (destination.IsRegister()) {
1589 __ mov(destination.reg(), source.reg());
1590 } else {
1591 ASSERT(destination.IsStackSlot());
1592 const intptr_t dest_offset = destination.ToStackSlotOffset();
1593 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
1594 }
1595 } else if (source.IsStackSlot()) {
1596 if (destination.IsRegister()) {
1597 const intptr_t source_offset = source.ToStackSlotOffset();
1598 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
1599 } else {
1600 ASSERT(destination.IsStackSlot());
1601 const intptr_t source_offset = source.ToStackSlotOffset();
1602 const intptr_t dest_offset = destination.ToStackSlotOffset();
1603 ScratchRegisterScope tmp(this, kNoRegister);
1604 __ LoadFromOffset(tmp.reg(), source.base_reg(), source_offset);
1605 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset);
1606 }
1607 } else if (source.IsFpuRegister()) {
1608 if (destination.IsFpuRegister()) {
1609 DRegister dst = destination.fpu_reg();
1610 DRegister src = source.fpu_reg();
1611 __ movd(dst, src);
1612 } else {
1613 ASSERT(destination.IsDoubleStackSlot());
1614 const intptr_t dest_offset = destination.ToStackSlotOffset();
1615 DRegister src = source.fpu_reg();
1616 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
1617 }
1618 } else if (source.IsDoubleStackSlot()) {
1619 if (destination.IsFpuRegister()) {
1620 const intptr_t source_offset = source.ToStackSlotOffset();
1621 DRegister dst = destination.fpu_reg();
1622 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
1623 } else {
1624 ASSERT(destination.IsDoubleStackSlot());
1625 const intptr_t source_offset = source.ToStackSlotOffset();
1626 const intptr_t dest_offset = destination.ToStackSlotOffset();
1627 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
1628 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1629 }
1630 } else {
1631 ASSERT(source.IsConstant());
1632 const Object& constant = source.constant();
1633 if (destination.IsRegister()) {
1634 if (constant.IsSmi() &&
1635 (source.constant_instruction()->representation() == kUnboxedInt32)) {
1636 __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value());
1637 } else {
1638 __ LoadObject(destination.reg(), constant);
1639 }
1640 } else if (destination.IsFpuRegister()) {
1641 __ LoadObject(TMP, constant);
1642 __ LoadDFromOffset(destination.fpu_reg(), TMP,
1643 Double::value_offset() - kHeapObjectTag);
1644 } else if (destination.IsDoubleStackSlot()) {
1645 const intptr_t dest_offset = destination.ToStackSlotOffset();
1646 __ LoadObject(TMP, constant);
1647 __ LoadDFromOffset(DTMP, TMP, Double::value_offset() - kHeapObjectTag);
1648 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1649 } else {
1650 ASSERT(destination.IsStackSlot());
1651 const intptr_t dest_offset = destination.ToStackSlotOffset();
1652 ScratchRegisterScope tmp(this, kNoRegister);
1653 if (constant.IsSmi() &&
1654 (source.constant_instruction()->representation() == kUnboxedInt32)) {
1655 __ LoadImmediate(tmp.reg(), Smi::Cast(constant).Value());
1656 } else {
1657 __ LoadObject(tmp.reg(), constant);
1658 }
1659 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset);
1660 }
1661 }
1662
1663 move->Eliminate();
1664 }
1665
1666
1667 void ParallelMoveResolver::EmitSwap(int index) {
1668 MoveOperands* move = moves_[index];
1669 const Location source = move->src();
1670 const Location destination = move->dest();
1671
1672 if (source.IsRegister() && destination.IsRegister()) {
1673 ASSERT(source.reg() != TMP);
1674 ASSERT(destination.reg() != TMP);
1675 __ mov(TMP, source.reg());
1676 __ mov(source.reg(), destination.reg());
1677 __ mov(destination.reg(), TMP);
1678 } else if (source.IsRegister() && destination.IsStackSlot()) {
1679 Exchange(source.reg(), destination.base_reg(),
1680 destination.ToStackSlotOffset());
1681 } else if (source.IsStackSlot() && destination.IsRegister()) {
1682 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1683 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1684 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1685 destination.base_reg(), destination.ToStackSlotOffset());
1686 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1687 DRegister dst = destination.fpu_reg();
1688 DRegister src = source.fpu_reg();
1689 __ movd(DTMP, src);
1690 __ movd(src, dst);
1691 __ movd(dst, DTMP);
1692 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1693 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1694 DRegister reg =
1695 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1696 Register base_reg =
1697 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1698 const intptr_t slot_offset = source.IsFpuRegister()
1699 ? destination.ToStackSlotOffset()
1700 : source.ToStackSlotOffset();
1701 __ LoadDFromOffset(DTMP, base_reg, slot_offset);
1702 __ StoreDToOffset(reg, base_reg, slot_offset);
1703 __ movd(reg, DTMP);
1704 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1705 const intptr_t source_offset = source.ToStackSlotOffset();
1706 const intptr_t dest_offset = destination.ToStackSlotOffset();
1707
1708 ScratchFpuRegisterScope ensure_scratch(this, DTMP);
1709 DRegister scratch = ensure_scratch.reg();
1710 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
1711 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1712 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1713 __ StoreDToOffset(scratch, source.base_reg(), source_offset);
1714 } else {
1715 UNREACHABLE();
1716 }
1717
1718 // The swap of source and destination has executed a move from source to
1719 // destination.
1720 move->Eliminate();
1721
1722 // Any unperformed (including pending) move with a source of either
1723 // this move's source or destination needs to have their source
1724 // changed to reflect the state of affairs after the swap.
1725 for (int i = 0; i < moves_.length(); ++i) {
1726 const MoveOperands& other_move = *moves_[i];
1727 if (other_move.Blocks(source)) {
1728 moves_[i]->set_src(destination);
1729 } else if (other_move.Blocks(destination)) {
1730 moves_[i]->set_src(source);
1731 }
1732 }
1733 }
1734
1735
1736 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
1737 const Address& src) {
1738 __ Comment("ParallelMoveResolver::MoveMemoryToMemory");
1739 __ lw(TMP, src);
1740 __ sw(TMP, dst);
1741 }
1742
1743
1744 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
1745 __ Comment("ParallelMoveResolver::StoreObject");
1746 __ LoadObject(TMP, obj);
1747 __ sw(TMP, dst);
1748 }
1749
1750
1751 // Do not call or implement this function. Instead, use the form below that
1752 // uses an offset from the frame pointer instead of an Address.
1753 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
1754 UNREACHABLE();
1755 }
1756
1757
1758 // Do not call or implement this function. Instead, use the form below that
1759 // uses offsets from the frame pointer instead of Addresses.
1760 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
1761 UNREACHABLE();
1762 }
1763
1764
1765 void ParallelMoveResolver::Exchange(Register reg,
1766 Register base_reg,
1767 intptr_t stack_offset) {
1768 ScratchRegisterScope tmp(this, reg);
1769 __ mov(tmp.reg(), reg);
1770 __ LoadFromOffset(reg, base_reg, stack_offset);
1771 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1772 }
1773
1774
1775 void ParallelMoveResolver::Exchange(Register base_reg1,
1776 intptr_t stack_offset1,
1777 Register base_reg2,
1778 intptr_t stack_offset2) {
1779 ScratchRegisterScope tmp1(this, kNoRegister);
1780 ScratchRegisterScope tmp2(this, tmp1.reg());
1781 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1782 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1783 __ StoreToOffset(tmp1.reg(), base_reg1, stack_offset2);
1784 __ StoreToOffset(tmp2.reg(), base_reg2, stack_offset1);
1785 }
1786
1787
1788 void ParallelMoveResolver::SpillScratch(Register reg) {
1789 __ Comment("ParallelMoveResolver::SpillScratch");
1790 __ Push(reg);
1791 }
1792
1793
1794 void ParallelMoveResolver::RestoreScratch(Register reg) {
1795 __ Comment("ParallelMoveResolver::RestoreScratch");
1796 __ Pop(reg);
1797 }
1798
1799
1800 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
1801 __ Comment("ParallelMoveResolver::SpillFpuScratch");
1802 __ AddImmediate(SP, -kDoubleSize);
1803 __ StoreDToOffset(reg, SP, 0);
1804 }
1805
1806
1807 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
1808 __ Comment("ParallelMoveResolver::RestoreFpuScratch");
1809 __ LoadDFromOffset(reg, SP, 0);
1810 __ AddImmediate(SP, kDoubleSize);
1811 }
1812
1813
1814 #undef __
1815
1816
1817 } // namespace dart
1818
1819 #endif // defined TARGET_ARCH_MIPS
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698