Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(80)

Side by Side Diff: runtime/vm/flow_graph_compiler_mips.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.cc ('k') | runtime/vm/flow_graph_compiler_x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_MIPS.
6 #if defined(TARGET_ARCH_MIPS) 6 #if defined(TARGET_ARCH_MIPS)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); 109 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
110 110
111 111
112 // Emit all values that are needed for materialization as a part of the 112 // Emit all values that are needed for materialization as a part of the
113 // expression stack for the bottom-most frame. This guarantees that GC 113 // expression stack for the bottom-most frame. This guarantees that GC
114 // will be able to find them during materialization. 114 // will be able to find them during materialization.
115 slot_ix = builder->EmitMaterializationArguments(slot_ix); 115 slot_ix = builder->EmitMaterializationArguments(slot_ix);
116 116
117 // For the innermost environment, set outgoing arguments and the locals. 117 // For the innermost environment, set outgoing arguments and the locals.
118 for (intptr_t i = current->Length() - 1; 118 for (intptr_t i = current->Length() - 1;
119 i >= current->fixed_parameter_count(); 119 i >= current->fixed_parameter_count(); i--) {
120 i--) {
121 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); 120 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
122 } 121 }
123 122
124 Environment* previous = current; 123 Environment* previous = current;
125 current = current->outer(); 124 current = current->outer();
126 while (current != NULL) { 125 while (current != NULL) {
127 builder->AddPp(current->function(), slot_ix++); 126 builder->AddPp(current->function(), slot_ix++);
128 builder->AddPcMarker(previous->function(), slot_ix++); 127 builder->AddPcMarker(previous->function(), slot_ix++);
129 builder->AddCallerFp(slot_ix++); 128 builder->AddCallerFp(slot_ix++);
130 129
131 // For any outer environment the deopt id is that of the call instruction 130 // For any outer environment the deopt id is that of the call instruction
132 // which is recorded in the outer environment. 131 // which is recorded in the outer environment.
133 builder->AddReturnAddress( 132 builder->AddReturnAddress(current->function(),
134 current->function(), 133 Thread::ToDeoptAfter(current->deopt_id()),
135 Thread::ToDeoptAfter(current->deopt_id()), 134 slot_ix++);
136 slot_ix++);
137 135
138 // The values of outgoing arguments can be changed from the inlined call so 136 // The values of outgoing arguments can be changed from the inlined call so
139 // we must read them from the previous environment. 137 // we must read them from the previous environment.
140 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { 138 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
141 builder->AddCopy(previous->ValueAt(i), 139 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
142 previous->LocationAt(i),
143 slot_ix++); 140 slot_ix++);
144 } 141 }
145 142
146 // Set the locals, note that outgoing arguments are not in the environment. 143 // Set the locals, note that outgoing arguments are not in the environment.
147 for (intptr_t i = current->Length() - 1; 144 for (intptr_t i = current->Length() - 1;
148 i >= current->fixed_parameter_count(); 145 i >= current->fixed_parameter_count(); i--) {
149 i--) { 146 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
150 builder->AddCopy(current->ValueAt(i),
151 current->LocationAt(i),
152 slot_ix++);
153 } 147 }
154 148
155 // Iterate on the outer environment. 149 // Iterate on the outer environment.
156 previous = current; 150 previous = current;
157 current = current->outer(); 151 current = current->outer();
158 } 152 }
159 // The previous pointer is now the outermost environment. 153 // The previous pointer is now the outermost environment.
160 ASSERT(previous != NULL); 154 ASSERT(previous != NULL);
161 155
162 // Set slots for the outermost environment. 156 // Set slots for the outermost environment.
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
274 __ beq(CMPRES1, ZR, is_not_instance_lbl); 268 __ beq(CMPRES1, ZR, is_not_instance_lbl);
275 } 269 }
276 // A function type test requires checking the function signature. 270 // A function type test requires checking the function signature.
277 if (!type.IsFunctionType()) { 271 if (!type.IsFunctionType()) {
278 const intptr_t num_type_args = type_class.NumTypeArguments(); 272 const intptr_t num_type_args = type_class.NumTypeArguments();
279 const intptr_t num_type_params = type_class.NumTypeParameters(); 273 const intptr_t num_type_params = type_class.NumTypeParameters();
280 const intptr_t from_index = num_type_args - num_type_params; 274 const intptr_t from_index = num_type_args - num_type_params;
281 const TypeArguments& type_arguments = 275 const TypeArguments& type_arguments =
282 TypeArguments::ZoneHandle(zone(), type.arguments()); 276 TypeArguments::ZoneHandle(zone(), type.arguments());
283 const bool is_raw_type = type_arguments.IsNull() || 277 const bool is_raw_type = type_arguments.IsNull() ||
284 type_arguments.IsRaw(from_index, num_type_params); 278 type_arguments.IsRaw(from_index, num_type_params);
285 if (is_raw_type) { 279 if (is_raw_type) {
286 const Register kClassIdReg = T0; 280 const Register kClassIdReg = T0;
287 // dynamic type argument, check only classes. 281 // dynamic type argument, check only classes.
288 __ LoadClassId(kClassIdReg, kInstanceReg); 282 __ LoadClassId(kClassIdReg, kInstanceReg);
289 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); 283 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl);
290 // List is a very common case. 284 // List is a very common case.
291 if (IsListClass(type_class)) { 285 if (IsListClass(type_class)) {
292 GenerateListTypeCheck(kClassIdReg, is_instance_lbl); 286 GenerateListTypeCheck(kClassIdReg, is_instance_lbl);
293 } 287 }
294 return GenerateSubtype1TestCacheLookup( 288 return GenerateSubtype1TestCacheLookup(
295 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 289 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
296 } 290 }
297 // If one type argument only, check if type argument is Object or dynamic. 291 // If one type argument only, check if type argument is Object or dynamic.
298 if (type_arguments.Length() == 1) { 292 if (type_arguments.Length() == 1) {
299 const AbstractType& tp_argument = AbstractType::ZoneHandle(zone(), 293 const AbstractType& tp_argument =
300 type_arguments.TypeAt(0)); 294 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0));
301 ASSERT(!tp_argument.IsMalformed()); 295 ASSERT(!tp_argument.IsMalformed());
302 if (tp_argument.IsType()) { 296 if (tp_argument.IsType()) {
303 ASSERT(tp_argument.HasResolvedTypeClass()); 297 ASSERT(tp_argument.HasResolvedTypeClass());
304 // Check if type argument is dynamic or Object. 298 // Check if type argument is dynamic or Object.
305 const Type& object_type = Type::Handle(zone(), Type::ObjectType()); 299 const Type& object_type = Type::Handle(zone(), Type::ObjectType());
306 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) { 300 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
307 // Instance class test only necessary. 301 // Instance class test only necessary.
308 return GenerateSubtype1TestCacheLookup( 302 return GenerateSubtype1TestCacheLookup(
309 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 303 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
310 } 304 }
311 } 305 }
312 } 306 }
313 } 307 }
314 // Regular subtype test cache involving instance's type arguments. 308 // Regular subtype test cache involving instance's type arguments.
315 const Register kTypeArgumentsReg = kNoRegister; 309 const Register kTypeArgumentsReg = kNoRegister;
316 const Register kTempReg = kNoRegister; 310 const Register kTempReg = kNoRegister;
317 // A0: instance (must be preserved). 311 // A0: instance (must be preserved).
318 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, 312 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg,
319 kInstanceReg, 313 kTypeArgumentsReg, kTempReg,
320 kTypeArgumentsReg, 314 is_instance_lbl, is_not_instance_lbl);
321 kTempReg,
322 is_instance_lbl,
323 is_not_instance_lbl);
324 } 315 }
325 316
326 317
327 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, 318 void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
328 const GrowableArray<intptr_t>& class_ids, 319 const GrowableArray<intptr_t>& class_ids,
329 Label* is_equal_lbl, 320 Label* is_equal_lbl,
330 Label* is_not_equal_lbl) { 321 Label* is_not_equal_lbl) {
331 __ Comment("CheckClassIds"); 322 __ Comment("CheckClassIds");
332 for (intptr_t i = 0; i < class_ids.length(); i++) { 323 for (intptr_t i = 0; i < class_ids.length(); i++) {
333 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl); 324 __ BranchEqual(class_id_reg, Immediate(class_ids[i]), is_equal_lbl);
(...skipping 18 matching lines...) Expand all
352 // Fallthrough. 343 // Fallthrough.
353 return true; 344 return true;
354 } 345 }
355 const Class& type_class = Class::Handle(zone(), type.type_class()); 346 const Class& type_class = Class::Handle(zone(), type.type_class());
356 ASSERT(type_class.NumTypeArguments() == 0); 347 ASSERT(type_class.NumTypeArguments() == 0);
357 348
358 const Register kInstanceReg = A0; 349 const Register kInstanceReg = A0;
359 __ andi(T0, A0, Immediate(kSmiTagMask)); 350 __ andi(T0, A0, Immediate(kSmiTagMask));
360 // If instance is Smi, check directly. 351 // If instance is Smi, check directly.
361 const Class& smi_class = Class::Handle(zone(), Smi::Class()); 352 const Class& smi_class = Class::Handle(zone(), Smi::Class());
362 if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), 353 if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), type_class,
363 type_class, 354 TypeArguments::Handle(zone()), NULL, NULL,
364 TypeArguments::Handle(zone()),
365 NULL,
366 NULL,
367 Heap::kOld)) { 355 Heap::kOld)) {
368 __ beq(T0, ZR, is_instance_lbl); 356 __ beq(T0, ZR, is_instance_lbl);
369 } else { 357 } else {
370 __ beq(T0, ZR, is_not_instance_lbl); 358 __ beq(T0, ZR, is_not_instance_lbl);
371 } 359 }
372 const Register kClassIdReg = T0; 360 const Register kClassIdReg = T0;
373 __ LoadClassId(kClassIdReg, kInstanceReg); 361 __ LoadClassId(kClassIdReg, kInstanceReg);
374 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted 362 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted
375 // interfaces. 363 // interfaces.
376 // Bool interface can be implemented only by core class Bool. 364 // Bool interface can be implemented only by core class Bool.
377 if (type.IsBoolType()) { 365 if (type.IsBoolType()) {
378 __ BranchEqual(kClassIdReg, Immediate(kBoolCid), is_instance_lbl); 366 __ BranchEqual(kClassIdReg, Immediate(kBoolCid), is_instance_lbl);
379 __ b(is_not_instance_lbl); 367 __ b(is_not_instance_lbl);
380 return false; 368 return false;
381 } 369 }
382 // Custom checking for numbers (Smi, Mint, Bigint and Double). 370 // Custom checking for numbers (Smi, Mint, Bigint and Double).
383 // Note that instance is not Smi (checked above). 371 // Note that instance is not Smi (checked above).
384 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { 372 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) {
385 GenerateNumberTypeCheck( 373 GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl,
386 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); 374 is_not_instance_lbl);
387 return false; 375 return false;
388 } 376 }
389 if (type.IsStringType()) { 377 if (type.IsStringType()) {
390 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); 378 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl);
391 return false; 379 return false;
392 } 380 }
393 if (type.IsDartFunctionType()) { 381 if (type.IsDartFunctionType()) {
394 // Check if instance is a closure. 382 // Check if instance is a closure.
395 __ BranchEqual(kClassIdReg, Immediate(kClosureCid), is_instance_lbl); 383 __ BranchEqual(kClassIdReg, Immediate(kClosureCid), is_instance_lbl);
396 return true; // Fall through 384 return true; // Fall through
397 } 385 }
398 // Compare if the classes are equal. 386 // Compare if the classes are equal.
399 if (!type_class.is_abstract()) { 387 if (!type_class.is_abstract()) {
400 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl); 388 __ BranchEqual(kClassIdReg, Immediate(type_class.id()), is_instance_lbl);
401 } 389 }
402 // Otherwise fallthrough. 390 // Otherwise fallthrough.
403 return true; 391 return true;
404 } 392 }
405 393
406 394
407 // Uses SubtypeTestCache to store instance class and result. 395 // Uses SubtypeTestCache to store instance class and result.
408 // A0: instance to test. 396 // A0: instance to test.
409 // Clobbers A1, A2, T0-T3. 397 // Clobbers A1, A2, T0-T3.
410 // Immediate class test already done. 398 // Immediate class test already done.
(...skipping 10 matching lines...) Expand all
421 __ LoadClass(T0, kInstanceReg); 409 __ LoadClass(T0, kInstanceReg);
422 // T0: instance class. 410 // T0: instance class.
423 // Check immediate superclass equality. 411 // Check immediate superclass equality.
424 __ lw(T0, FieldAddress(T0, Class::super_type_offset())); 412 __ lw(T0, FieldAddress(T0, Class::super_type_offset()));
425 __ lw(T0, FieldAddress(T0, Type::type_class_id_offset())); 413 __ lw(T0, FieldAddress(T0, Type::type_class_id_offset()));
426 __ BranchEqual(T0, Immediate(Smi::RawValue(type_class.id())), 414 __ BranchEqual(T0, Immediate(Smi::RawValue(type_class.id())),
427 is_instance_lbl); 415 is_instance_lbl);
428 416
429 const Register kTypeArgumentsReg = kNoRegister; 417 const Register kTypeArgumentsReg = kNoRegister;
430 const Register kTempReg = kNoRegister; 418 const Register kTempReg = kNoRegister;
431 return GenerateCallSubtypeTestStub(kTestTypeOneArg, 419 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg,
432 kInstanceReg, 420 kTypeArgumentsReg, kTempReg,
433 kTypeArgumentsReg, 421 is_instance_lbl, is_not_instance_lbl);
434 kTempReg,
435 is_instance_lbl,
436 is_not_instance_lbl);
437 } 422 }
438 423
439 424
440 // Generates inlined check if 'type' is a type parameter or type itself 425 // Generates inlined check if 'type' is a type parameter or type itself
441 // A0: instance (preserved). 426 // A0: instance (preserved).
442 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 427 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
443 TokenPosition token_pos, 428 TokenPosition token_pos,
444 const AbstractType& type, 429 const AbstractType& type,
445 Label* is_instance_lbl, 430 Label* is_instance_lbl,
446 Label* is_not_instance_lbl) { 431 Label* is_not_instance_lbl) {
447 __ Comment("UninstantiatedTypeTest"); 432 __ Comment("UninstantiatedTypeTest");
448 ASSERT(!type.IsInstantiated()); 433 ASSERT(!type.IsInstantiated());
449 // Skip check if destination is a dynamic type. 434 // Skip check if destination is a dynamic type.
450 if (type.IsTypeParameter()) { 435 if (type.IsTypeParameter()) {
451 const TypeParameter& type_param = TypeParameter::Cast(type); 436 const TypeParameter& type_param = TypeParameter::Cast(type);
452 // Load instantiator type arguments on stack. 437 // Load instantiator type arguments on stack.
453 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments. 438 __ lw(A1, Address(SP, 0)); // Get instantiator type arguments.
454 // A1: instantiator type arguments. 439 // A1: instantiator type arguments.
455 // Check if type arguments are null, i.e. equivalent to vector of dynamic. 440 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
456 __ LoadObject(T7, Object::null_object()); 441 __ LoadObject(T7, Object::null_object());
457 __ beq(A1, T7, is_instance_lbl); 442 __ beq(A1, T7, is_instance_lbl);
458 __ lw(T2, 443 __ lw(T2,
459 FieldAddress(A1, TypeArguments::type_at_offset(type_param.index()))); 444 FieldAddress(A1, TypeArguments::type_at_offset(type_param.index())));
460 // R2: concrete type of type. 445 // R2: concrete type of type.
461 // Check if type argument is dynamic. 446 // Check if type argument is dynamic.
462 __ BranchEqual(T2, 447 __ BranchEqual(T2, Object::dynamic_type(), is_instance_lbl);
463 Object::dynamic_type(), is_instance_lbl); 448 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::ObjectType()),
464 __ BranchEqual(T2, 449 is_instance_lbl);
465 Type::ZoneHandle(zone(), Type::ObjectType()), is_instance_lbl);
466 450
467 // For Smi check quickly against int and num interfaces. 451 // For Smi check quickly against int and num interfaces.
468 Label not_smi; 452 Label not_smi;
469 __ andi(CMPRES1, A0, Immediate(kSmiTagMask)); 453 __ andi(CMPRES1, A0, Immediate(kSmiTagMask));
470 __ bne(CMPRES1, ZR, &not_smi); // Value is Smi? 454 __ bne(CMPRES1, ZR, &not_smi); // Value is Smi?
471 __ BranchEqual(T2, 455 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::IntType()),
472 Type::ZoneHandle(zone(), Type::IntType()), is_instance_lbl); 456 is_instance_lbl);
473 __ BranchEqual(T2, 457 __ BranchEqual(T2, Type::ZoneHandle(zone(), Type::Number()),
474 Type::ZoneHandle(zone(), Type::Number()), is_instance_lbl); 458 is_instance_lbl);
475 // Smi must be handled in runtime. 459 // Smi must be handled in runtime.
476 Label fall_through; 460 Label fall_through;
477 __ b(&fall_through); 461 __ b(&fall_through);
478 462
479 __ Bind(&not_smi); 463 __ Bind(&not_smi);
480 // T1: instantiator type arguments. 464 // T1: instantiator type arguments.
481 // A0: instance. 465 // A0: instance.
482 const Register kInstanceReg = A0; 466 const Register kInstanceReg = A0;
483 const Register kTypeArgumentsReg = A1; 467 const Register kTypeArgumentsReg = A1;
484 const Register kTempReg = kNoRegister; 468 const Register kTempReg = kNoRegister;
485 const SubtypeTestCache& type_test_cache = 469 const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
486 SubtypeTestCache::ZoneHandle(zone(), 470 zone(), GenerateCallSubtypeTestStub(
487 GenerateCallSubtypeTestStub(kTestTypeThreeArgs, 471 kTestTypeThreeArgs, kInstanceReg, kTypeArgumentsReg,
488 kInstanceReg, 472 kTempReg, is_instance_lbl, is_not_instance_lbl));
489 kTypeArgumentsReg,
490 kTempReg,
491 is_instance_lbl,
492 is_not_instance_lbl));
493 __ Bind(&fall_through); 473 __ Bind(&fall_through);
494 return type_test_cache.raw(); 474 return type_test_cache.raw();
495 } 475 }
496 if (type.IsType()) { 476 if (type.IsType()) {
497 const Register kInstanceReg = A0; 477 const Register kInstanceReg = A0;
498 const Register kTypeArgumentsReg = A1; 478 const Register kTypeArgumentsReg = A1;
499 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask)); 479 __ andi(CMPRES1, kInstanceReg, Immediate(kSmiTagMask));
500 __ beq(CMPRES1, ZR, is_not_instance_lbl); // Is instance Smi? 480 __ beq(CMPRES1, ZR, is_not_instance_lbl); // Is instance Smi?
501 __ lw(kTypeArgumentsReg, Address(SP, 0)); // Instantiator type args. 481 __ lw(kTypeArgumentsReg, Address(SP, 0)); // Instantiator type args.
502 // Uninstantiated type class is known at compile time, but the type 482 // Uninstantiated type class is known at compile time, but the type
503 // arguments are determined at runtime by the instantiator. 483 // arguments are determined at runtime by the instantiator.
504 const Register kTempReg = kNoRegister; 484 const Register kTempReg = kNoRegister;
505 return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, 485 return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, kInstanceReg,
506 kInstanceReg, 486 kTypeArgumentsReg, kTempReg,
507 kTypeArgumentsReg, 487 is_instance_lbl, is_not_instance_lbl);
508 kTempReg,
509 is_instance_lbl,
510 is_not_instance_lbl);
511 } 488 }
512 return SubtypeTestCache::null(); 489 return SubtypeTestCache::null();
513 } 490 }
514 491
515 492
516 // Inputs: 493 // Inputs:
517 // - A0: instance being type checked (preserved). 494 // - A0: instance being type checked (preserved).
518 // - A1: optional instantiator type arguments (preserved). 495 // - A1: optional instantiator type arguments (preserved).
519 // Returns: 496 // Returns:
520 // - preserved instance in A0 and optional instantiator type arguments in A1. 497 // - preserved instance in A0 and optional instantiator type arguments in A1.
(...skipping 11 matching lines...) Expand all
532 // A non-null value is returned from a void function, which will result in a 509 // A non-null value is returned from a void function, which will result in a
533 // type error. A null value is handled prior to executing this inline code. 510 // type error. A null value is handled prior to executing this inline code.
534 return SubtypeTestCache::null(); 511 return SubtypeTestCache::null();
535 } 512 }
536 if (type.IsInstantiated()) { 513 if (type.IsInstantiated()) {
537 const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); 514 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
538 // A class equality check is only applicable with a dst type (not a 515 // A class equality check is only applicable with a dst type (not a
539 // function type) of a non-parameterized class or with a raw dst type of 516 // function type) of a non-parameterized class or with a raw dst type of
540 // a parameterized class. 517 // a parameterized class.
541 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) { 518 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) {
542 return GenerateInstantiatedTypeWithArgumentsTest(token_pos, 519 return GenerateInstantiatedTypeWithArgumentsTest(
543 type, 520 token_pos, type, is_instance_lbl, is_not_instance_lbl);
544 is_instance_lbl,
545 is_not_instance_lbl);
546 // Fall through to runtime call. 521 // Fall through to runtime call.
547 } 522 }
548 const bool has_fall_through = 523 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
549 GenerateInstantiatedTypeNoArgumentsTest(token_pos, 524 token_pos, type, is_instance_lbl, is_not_instance_lbl);
550 type,
551 is_instance_lbl,
552 is_not_instance_lbl);
553 if (has_fall_through) { 525 if (has_fall_through) {
554 // If test non-conclusive so far, try the inlined type-test cache. 526 // If test non-conclusive so far, try the inlined type-test cache.
555 // 'type' is known at compile time. 527 // 'type' is known at compile time.
556 return GenerateSubtype1TestCacheLookup( 528 return GenerateSubtype1TestCacheLookup(
557 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 529 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
558 } else { 530 } else {
559 return SubtypeTestCache::null(); 531 return SubtypeTestCache::null();
560 } 532 }
561 } 533 }
562 return GenerateUninstantiatedTypeTest(token_pos, 534 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl,
563 type,
564 is_instance_lbl,
565 is_not_instance_lbl); 535 is_not_instance_lbl);
566 } 536 }
567 537
568 538
569 // If instanceof type test cannot be performed successfully at compile time and 539 // If instanceof type test cannot be performed successfully at compile time and
570 // therefore eliminated, optimize it by adding inlined tests for: 540 // therefore eliminated, optimize it by adding inlined tests for:
571 // - NULL -> return false. 541 // - NULL -> return false.
572 // - Smi -> compile time subtype check (only if dst class is not parameterized). 542 // - Smi -> compile time subtype check (only if dst class is not parameterized).
573 // - Class equality (only if class is not parameterized). 543 // - Class equality (only if class is not parameterized).
574 // Inputs: 544 // Inputs:
(...skipping 17 matching lines...) Expand all
592 // checking whether the tested instance is a Smi. 562 // checking whether the tested instance is a Smi.
593 if (type.IsInstantiated()) { 563 if (type.IsInstantiated()) {
594 // A null object is only an instance of Object and dynamic, which has 564 // A null object is only an instance of Object and dynamic, which has
595 // already been checked above (if the type is instantiated). So we can 565 // already been checked above (if the type is instantiated). So we can
596 // return false here if the instance is null (and if the type is 566 // return false here if the instance is null (and if the type is
597 // instantiated). 567 // instantiated).
598 // We can only inline this null check if the type is instantiated at compile 568 // We can only inline this null check if the type is instantiated at compile
599 // time, since an uninstantiated type at compile time could be Object or 569 // time, since an uninstantiated type at compile time could be Object or
600 // dynamic at run time. 570 // dynamic at run time.
601 __ BranchEqual(A0, Object::null_object(), 571 __ BranchEqual(A0, Object::null_object(),
602 type.IsNullType() ? &is_instance : &is_not_instance); 572 type.IsNullType() ? &is_instance : &is_not_instance);
603 } 573 }
604 574
605 // Generate inline instanceof test. 575 // Generate inline instanceof test.
606 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 576 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
607 test_cache = GenerateInlineInstanceof(token_pos, type, 577 test_cache =
608 &is_instance, &is_not_instance); 578 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
609 579
610 // test_cache is null if there is no fall-through. 580 // test_cache is null if there is no fall-through.
611 Label done; 581 Label done;
612 if (!test_cache.IsNull()) { 582 if (!test_cache.IsNull()) {
613 // Generate runtime call. 583 // Generate runtime call.
614 // Load instantiator type arguments (A1). 584 // Load instantiator type arguments (A1).
615 __ lw(A1, Address(SP, 0 * kWordSize)); 585 __ lw(A1, Address(SP, 0 * kWordSize));
616 586
617 __ addiu(SP, SP, Immediate(-5 * kWordSize)); 587 __ addiu(SP, SP, Immediate(-5 * kWordSize));
618 __ LoadObject(TMP, Object::null_object()); 588 __ LoadObject(TMP, Object::null_object());
619 __ sw(TMP, Address(SP, 4 * kWordSize)); // Make room for the result. 589 __ sw(TMP, Address(SP, 4 * kWordSize)); // Make room for the result.
620 __ sw(A0, Address(SP, 3 * kWordSize)); // Push the instance. 590 __ sw(A0, Address(SP, 3 * kWordSize)); // Push the instance.
621 __ LoadObject(TMP, type); 591 __ LoadObject(TMP, type);
622 __ sw(TMP, Address(SP, 2 * kWordSize)); // Push the type. 592 __ sw(TMP, Address(SP, 2 * kWordSize)); // Push the type.
623 __ sw(A1, Address(SP, 1 * kWordSize)); // Push type arguments. 593 __ sw(A1, Address(SP, 1 * kWordSize)); // Push type arguments.
624 __ LoadUniqueObject(A0, test_cache); 594 __ LoadUniqueObject(A0, test_cache);
625 __ sw(A0, Address(SP, 0 * kWordSize)); 595 __ sw(A0, Address(SP, 0 * kWordSize));
626 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 4, locs); 596 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 4, locs);
627 // Pop the parameters supplied to the runtime entry. The result of the 597 // Pop the parameters supplied to the runtime entry. The result of the
628 // instanceof runtime call will be left as the result of the operation. 598 // instanceof runtime call will be left as the result of the operation.
629 __ lw(T0, Address(SP, 4 * kWordSize)); 599 __ lw(T0, Address(SP, 4 * kWordSize));
630 __ addiu(SP, SP, Immediate(5 * kWordSize)); 600 __ addiu(SP, SP, Immediate(5 * kWordSize));
631 if (negate_result) { 601 if (negate_result) {
632 __ LoadObject(V0, Bool::True()); 602 __ LoadObject(V0, Bool::True());
633 __ bne(T0, V0, &done); 603 __ bne(T0, V0, &done);
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
681 Label is_assignable, runtime_call; 651 Label is_assignable, runtime_call;
682 652
683 __ BranchEqual(A0, Object::null_object(), &is_assignable); 653 __ BranchEqual(A0, Object::null_object(), &is_assignable);
684 __ delay_slot()->sw(A1, Address(SP, 0 * kWordSize)); 654 __ delay_slot()->sw(A1, Address(SP, 0 * kWordSize));
685 655
686 // Generate throw new TypeError() if the type is malformed or malbounded. 656 // Generate throw new TypeError() if the type is malformed or malbounded.
687 if (dst_type.IsMalformedOrMalbounded()) { 657 if (dst_type.IsMalformedOrMalbounded()) {
688 __ addiu(SP, SP, Immediate(-4 * kWordSize)); 658 __ addiu(SP, SP, Immediate(-4 * kWordSize));
689 __ LoadObject(TMP, Object::null_object()); 659 __ LoadObject(TMP, Object::null_object());
690 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result. 660 __ sw(TMP, Address(SP, 3 * kWordSize)); // Make room for the result.
691 __ sw(A0, Address(SP, 2 * kWordSize)); // Push the source object. 661 __ sw(A0, Address(SP, 2 * kWordSize)); // Push the source object.
692 __ LoadObject(TMP, dst_name); 662 __ LoadObject(TMP, dst_name);
693 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the destination name. 663 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the destination name.
694 __ LoadObject(TMP, dst_type); 664 __ LoadObject(TMP, dst_type);
695 __ sw(TMP, Address(SP, 0 * kWordSize)); // Push the destination type. 665 __ sw(TMP, Address(SP, 0 * kWordSize)); // Push the destination type.
696 666
697 GenerateRuntimeCall(token_pos, 667 GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3,
698 deopt_id,
699 kBadTypeErrorRuntimeEntry,
700 3,
701 locs); 668 locs);
702 // We should never return here. 669 // We should never return here.
703 __ break_(0); 670 __ break_(0);
704 671
705 __ Bind(&is_assignable); // For a null object. 672 __ Bind(&is_assignable); // For a null object.
706 // Restore instantiator type arguments. 673 // Restore instantiator type arguments.
707 __ lw(A1, Address(SP, 0 * kWordSize)); 674 __ lw(A1, Address(SP, 0 * kWordSize));
708 __ addiu(SP, SP, Immediate(1 * kWordSize)); 675 __ addiu(SP, SP, Immediate(1 * kWordSize));
709 return; 676 return;
710 } 677 }
711 678
712 // Generate inline type check, linking to runtime call if not assignable. 679 // Generate inline type check, linking to runtime call if not assignable.
713 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 680 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
714 test_cache = GenerateInlineInstanceof(token_pos, dst_type, 681 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable,
715 &is_assignable, &runtime_call); 682 &runtime_call);
716 683
717 __ Bind(&runtime_call); 684 __ Bind(&runtime_call);
718 // Load instantiator type arguments (A1). 685 // Load instantiator type arguments (A1).
719 __ lw(A1, Address(SP, 0 * kWordSize)); 686 __ lw(A1, Address(SP, 0 * kWordSize));
720 687
721 __ addiu(SP, SP, Immediate(-6 * kWordSize)); 688 __ addiu(SP, SP, Immediate(-6 * kWordSize));
722 __ LoadObject(TMP, Object::null_object()); 689 __ LoadObject(TMP, Object::null_object());
723 __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result. 690 __ sw(TMP, Address(SP, 5 * kWordSize)); // Make room for the result.
724 __ sw(A0, Address(SP, 4 * kWordSize)); // Push the source object. 691 __ sw(A0, Address(SP, 4 * kWordSize)); // Push the source object.
725 __ LoadObject(TMP, dst_type); 692 __ LoadObject(TMP, dst_type);
726 __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type of the destination. 693 __ sw(TMP, Address(SP, 3 * kWordSize)); // Push the type of the destination.
727 __ sw(A1, Address(SP, 2 * kWordSize)); // Push type arguments. 694 __ sw(A1, Address(SP, 2 * kWordSize)); // Push type arguments.
728 __ LoadObject(TMP, dst_name); 695 __ LoadObject(TMP, dst_name);
729 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the name of the destination. 696 __ sw(TMP, Address(SP, 1 * kWordSize)); // Push the name of the destination.
730 __ LoadUniqueObject(T0, test_cache); 697 __ LoadUniqueObject(T0, test_cache);
731 __ sw(T0, Address(SP, 0 * kWordSize)); 698 __ sw(T0, Address(SP, 0 * kWordSize));
732 699
733 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 5, locs); 700 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 5, locs);
734 // Pop the parameters supplied to the runtime entry. The result of the 701 // Pop the parameters supplied to the runtime entry. The result of the
735 // type check runtime call is the checked value. 702 // type check runtime call is the checked value.
736 __ lw(A0, Address(SP, 5 * kWordSize)); 703 __ lw(A0, Address(SP, 5 * kWordSize));
737 __ addiu(SP, SP, Immediate(6 * kWordSize)); 704 __ addiu(SP, SP, Immediate(6 * kWordSize));
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
808 __ lw(T3, Address(T4, -kWordSize)); 775 __ lw(T3, Address(T4, -kWordSize));
809 __ addiu(T2, T2, Immediate(-kWordSize)); 776 __ addiu(T2, T2, Immediate(-kWordSize));
810 __ addu(T5, T0, T2); 777 __ addu(T5, T0, T2);
811 __ bgtz(T2, &loop); 778 __ bgtz(T2, &loop);
812 __ delay_slot()->sw(T3, Address(T5)); 779 __ delay_slot()->sw(T3, Address(T5));
813 __ Bind(&loop_exit); 780 __ Bind(&loop_exit);
814 781
815 // Copy or initialize optional named arguments. 782 // Copy or initialize optional named arguments.
816 Label all_arguments_processed; 783 Label all_arguments_processed;
817 #ifdef DEBUG 784 #ifdef DEBUG
818 const bool check_correct_named_args = true; 785 const bool check_correct_named_args = true;
819 #else 786 #else
820 const bool check_correct_named_args = function.IsClosureFunction(); 787 const bool check_correct_named_args = function.IsClosureFunction();
821 #endif 788 #endif
822 if (num_opt_named_params > 0) { 789 if (num_opt_named_params > 0) {
823 __ Comment("There are named parameters"); 790 __ Comment("There are named parameters");
824 // Start by alphabetically sorting the names of the optional parameters. 791 // Start by alphabetically sorting the names of the optional parameters.
825 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; 792 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
826 int* opt_param_position = new int[num_opt_named_params]; 793 int* opt_param_position = new int[num_opt_named_params];
827 for (int pos = num_fixed_params; pos < num_params; pos++) { 794 for (int pos = num_fixed_params; pos < num_params; pos++) {
828 LocalVariable* parameter = scope->VariableAt(pos); 795 LocalVariable* parameter = scope->VariableAt(pos);
829 const String& opt_param_name = parameter->name(); 796 const String& opt_param_name = parameter->name();
830 int i = pos - num_fixed_params; 797 int i = pos - num_fixed_params;
(...skipping 11 matching lines...) Expand all
842 // Generate code handling each optional parameter in alphabetical order. 809 // Generate code handling each optional parameter in alphabetical order.
843 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 810 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
844 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset())); 811 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
845 __ SmiUntag(T2); 812 __ SmiUntag(T2);
846 // Let T1 point to the first passed argument, i.e. to 813 // Let T1 point to the first passed argument, i.e. to
847 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (T1) is Smi. 814 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (T1) is Smi.
848 __ sll(T3, T1, 1); 815 __ sll(T3, T1, 1);
849 __ addu(T1, FP, T3); 816 __ addu(T1, FP, T3);
850 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); 817 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize);
851 // Let T0 point to the entry of the first named argument. 818 // Let T0 point to the entry of the first named argument.
852 __ AddImmediate(T0, S4, 819 __ AddImmediate(T0, S4, ArgumentsDescriptor::first_named_entry_offset() -
853 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag); 820 kHeapObjectTag);
854 for (int i = 0; i < num_opt_named_params; i++) { 821 for (int i = 0; i < num_opt_named_params; i++) {
855 Label load_default_value, assign_optional_parameter; 822 Label load_default_value, assign_optional_parameter;
856 const int param_pos = opt_param_position[i]; 823 const int param_pos = opt_param_position[i];
857 // Check if this named parameter was passed in. 824 // Check if this named parameter was passed in.
858 // Load T3 with the name of the argument. 825 // Load T3 with the name of the argument.
859 __ lw(T3, Address(T0, ArgumentsDescriptor::name_offset())); 826 __ lw(T3, Address(T0, ArgumentsDescriptor::name_offset()));
860 ASSERT(opt_param[i]->name().IsSymbol()); 827 ASSERT(opt_param[i]->name().IsSymbol());
861 __ BranchNotEqual(T3, opt_param[i]->name(), &load_default_value); 828 __ BranchNotEqual(T3, opt_param[i]->name(), &load_default_value);
862 829
863 // Load T3 with passed-in argument at provided arg_pos, i.e. at 830 // Load T3 with passed-in argument at provided arg_pos, i.e. at
(...skipping 25 matching lines...) Expand all
889 delete[] opt_param_position; 856 delete[] opt_param_position;
890 if (check_correct_named_args) { 857 if (check_correct_named_args) {
891 // Check that T0 now points to the null terminator in the arguments 858 // Check that T0 now points to the null terminator in the arguments
892 // descriptor. 859 // descriptor.
893 __ lw(T3, Address(T0)); 860 __ lw(T3, Address(T0));
894 __ BranchEqual(T3, Object::null_object(), &all_arguments_processed); 861 __ BranchEqual(T3, Object::null_object(), &all_arguments_processed);
895 } 862 }
896 } else { 863 } else {
897 ASSERT(num_opt_pos_params > 0); 864 ASSERT(num_opt_pos_params > 0);
898 __ Comment("There are optional positional parameters"); 865 __ Comment("There are optional positional parameters");
899 __ lw(T2, 866 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
900 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
901 __ SmiUntag(T2); 867 __ SmiUntag(T2);
902 for (int i = 0; i < num_opt_pos_params; i++) { 868 for (int i = 0; i < num_opt_pos_params; i++) {
903 Label next_parameter; 869 Label next_parameter;
904 // Handle this optional positional parameter only if k or fewer positional 870 // Handle this optional positional parameter only if k or fewer positional
905 // arguments have been passed, where k is param_pos, the position of this 871 // arguments have been passed, where k is param_pos, the position of this
906 // optional parameter in the formal parameter list. 872 // optional parameter in the formal parameter list.
907 const int param_pos = num_fixed_params + i; 873 const int param_pos = num_fixed_params + i;
908 __ BranchSignedGreater(T2, Immediate(param_pos), &next_parameter); 874 __ BranchSignedGreater(T2, Immediate(param_pos), &next_parameter);
909 // Load T3 with default argument. 875 // Load T3 with default argument.
910 const Object& value = parsed_function().DefaultParameterValueAt(i); 876 const Object& value = parsed_function().DefaultParameterValueAt(i);
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
983 __ LoadObject(V0, Object::null_object()); 949 __ LoadObject(V0, Object::null_object());
984 __ Ret(); 950 __ Ret();
985 } 951 }
986 952
987 953
988 static const Register new_pp = T7; 954 static const Register new_pp = T7;
989 955
990 956
991 void FlowGraphCompiler::EmitFrameEntry() { 957 void FlowGraphCompiler::EmitFrameEntry() {
992 const Function& function = parsed_function().function(); 958 const Function& function = parsed_function().function();
993 if (CanOptimizeFunction() && 959 if (CanOptimizeFunction() && function.IsOptimizable() &&
994 function.IsOptimizable() &&
995 (!is_optimizing() || may_reoptimize())) { 960 (!is_optimizing() || may_reoptimize())) {
996 __ Comment("Invocation Count Check"); 961 __ Comment("Invocation Count Check");
997 const Register function_reg = T0; 962 const Register function_reg = T0;
998 963
999 // Temporarily setup pool pointer for this dart function. 964 // Temporarily setup pool pointer for this dart function.
1000 __ LoadPoolPointer(new_pp); 965 __ LoadPoolPointer(new_pp);
1001 // Load function object from object pool. 966 // Load function object from object pool.
1002 __ LoadFunctionFromCalleePool(function_reg, function, new_pp); 967 __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
1003 968
1004 __ lw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); 969 __ lw(T1, FieldAddress(function_reg, Function::usage_counter_offset()));
1005 // Reoptimization of an optimized function is triggered by counting in 970 // Reoptimization of an optimized function is triggered by counting in
1006 // IC stubs, but not at the entry of the function. 971 // IC stubs, but not at the entry of the function.
1007 if (!is_optimizing()) { 972 if (!is_optimizing()) {
1008 __ addiu(T1, T1, Immediate(1)); 973 __ addiu(T1, T1, Immediate(1));
1009 __ sw(T1, FieldAddress(function_reg, Function::usage_counter_offset())); 974 __ sw(T1, FieldAddress(function_reg, Function::usage_counter_offset()));
1010 } 975 }
1011 976
1012 // Skip Branch if T1 is less than the threshold. 977 // Skip Branch if T1 is less than the threshold.
1013 Label dont_branch; 978 Label dont_branch;
1014 __ BranchSignedLess( 979 __ BranchSignedLess(T1, Immediate(GetOptimizationThreshold()),
1015 T1, Immediate(GetOptimizationThreshold()), &dont_branch); 980 &dont_branch);
1016 981
1017 ASSERT(function_reg == T0); 982 ASSERT(function_reg == T0);
1018 __ Branch(*StubCode::OptimizeFunction_entry(), new_pp); 983 __ Branch(*StubCode::OptimizeFunction_entry(), new_pp);
1019 984
1020 __ Bind(&dont_branch); 985 __ Bind(&dont_branch);
1021 } 986 }
1022 __ Comment("Enter frame"); 987 __ Comment("Enter frame");
1023 if (flow_graph().IsCompiledForOsr()) { 988 if (flow_graph().IsCompiledForOsr()) {
1024 intptr_t extra_slots = StackSize() 989 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() -
1025 - flow_graph().num_stack_locals() 990 flow_graph().num_copied_params();
1026 - flow_graph().num_copied_params();
1027 ASSERT(extra_slots >= 0); 991 ASSERT(extra_slots >= 0);
1028 __ EnterOsrFrame(extra_slots * kWordSize); 992 __ EnterOsrFrame(extra_slots * kWordSize);
1029 } else { 993 } else {
1030 ASSERT(StackSize() >= 0); 994 ASSERT(StackSize() >= 0);
1031 __ EnterDartFrame(StackSize() * kWordSize); 995 __ EnterDartFrame(StackSize() * kWordSize);
1032 } 996 }
1033 } 997 }
1034 998
1035 999
1036 // Input parameters: 1000 // Input parameters:
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1070 const bool check_arguments = 1034 const bool check_arguments =
1071 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); 1035 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr();
1072 if (check_arguments) { 1036 if (check_arguments) {
1073 __ Comment("Check argument count"); 1037 __ Comment("Check argument count");
1074 // Check that exactly num_fixed arguments are passed in. 1038 // Check that exactly num_fixed arguments are passed in.
1075 Label correct_num_arguments, wrong_num_arguments; 1039 Label correct_num_arguments, wrong_num_arguments;
1076 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset())); 1040 __ lw(T0, FieldAddress(S4, ArgumentsDescriptor::count_offset()));
1077 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)), 1041 __ BranchNotEqual(T0, Immediate(Smi::RawValue(num_fixed_params)),
1078 &wrong_num_arguments); 1042 &wrong_num_arguments);
1079 1043
1080 __ lw(T1, FieldAddress(S4, 1044 __ lw(T1,
1081 ArgumentsDescriptor::positional_count_offset())); 1045 FieldAddress(S4, ArgumentsDescriptor::positional_count_offset()));
1082 __ beq(T0, T1, &correct_num_arguments); 1046 __ beq(T0, T1, &correct_num_arguments);
1083 __ Bind(&wrong_num_arguments); 1047 __ Bind(&wrong_num_arguments);
1084 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. 1048 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
1085 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); 1049 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
1086 // The noSuchMethod call may return to the caller, but not here. 1050 // The noSuchMethod call may return to the caller, but not here.
1087 __ Bind(&correct_num_arguments); 1051 __ Bind(&correct_num_arguments);
1088 } 1052 }
1089 } else if (!flow_graph().IsCompiledForOsr()) { 1053 } else if (!flow_graph().IsCompiledForOsr()) {
1090 CopyParameters(); 1054 CopyParameters();
1091 } 1055 }
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1154 AddCurrentDescriptor(kind, deopt_id, token_pos); 1118 AddCurrentDescriptor(kind, deopt_id, token_pos);
1155 RecordSafepoint(locs); 1119 RecordSafepoint(locs);
1156 // Marks either the continuation point in unoptimized code or the 1120 // Marks either the continuation point in unoptimized code or the
1157 // deoptimization point in optimized code, after call. 1121 // deoptimization point in optimized code, after call.
1158 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1122 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1159 if (is_optimizing()) { 1123 if (is_optimizing()) {
1160 AddDeoptIndexAtCall(deopt_id_after); 1124 AddDeoptIndexAtCall(deopt_id_after);
1161 } else { 1125 } else {
1162 // Add deoptimization continuation point after the call and before the 1126 // Add deoptimization continuation point after the call and before the
1163 // arguments are removed. 1127 // arguments are removed.
1164 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1128 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1165 deopt_id_after,
1166 token_pos);
1167 } 1129 }
1168 } 1130 }
1169 1131
1170 1132
1171 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, 1133 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
1172 TokenPosition token_pos, 1134 TokenPosition token_pos,
1173 const StubEntry& stub_entry, 1135 const StubEntry& stub_entry,
1174 RawPcDescriptors::Kind kind, 1136 RawPcDescriptors::Kind kind,
1175 LocationSummary* locs, 1137 LocationSummary* locs,
1176 const Function& target) { 1138 const Function& target) {
1177 // Call sites to the same target can share object pool entries. These 1139 // Call sites to the same target can share object pool entries. These
1178 // call sites are never patched for breakpoints: the function is deoptimized 1140 // call sites are never patched for breakpoints: the function is deoptimized
1179 // and the unoptimized code with IC calls for static calls is patched instead. 1141 // and the unoptimized code with IC calls for static calls is patched instead.
1180 ASSERT(is_optimizing()); 1142 ASSERT(is_optimizing());
1181 __ BranchLinkWithEquivalence(stub_entry, target); 1143 __ BranchLinkWithEquivalence(stub_entry, target);
1182 1144
1183 AddCurrentDescriptor(kind, deopt_id, token_pos); 1145 AddCurrentDescriptor(kind, deopt_id, token_pos);
1184 RecordSafepoint(locs); 1146 RecordSafepoint(locs);
1185 // Marks either the continuation point in unoptimized code or the 1147 // Marks either the continuation point in unoptimized code or the
1186 // deoptimization point in optimized code, after call. 1148 // deoptimization point in optimized code, after call.
1187 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1149 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1188 if (is_optimizing()) { 1150 if (is_optimizing()) {
1189 AddDeoptIndexAtCall(deopt_id_after); 1151 AddDeoptIndexAtCall(deopt_id_after);
1190 } else { 1152 } else {
1191 // Add deoptimization continuation point after the call and before the 1153 // Add deoptimization continuation point after the call and before the
1192 // arguments are removed. 1154 // arguments are removed.
1193 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1155 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1194 deopt_id_after,
1195 token_pos);
1196 } 1156 }
1197 AddStaticCallTarget(target); 1157 AddStaticCallTarget(target);
1198 } 1158 }
1199 1159
1200 1160
1201 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, 1161 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
1202 intptr_t deopt_id, 1162 intptr_t deopt_id,
1203 const RuntimeEntry& entry, 1163 const RuntimeEntry& entry,
1204 intptr_t argument_count, 1164 intptr_t argument_count,
1205 LocationSummary* locs) { 1165 LocationSummary* locs) {
1206 __ CallRuntime(entry, argument_count); 1166 __ CallRuntime(entry, argument_count);
1207 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos); 1167 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos);
1208 RecordSafepoint(locs); 1168 RecordSafepoint(locs);
1209 if (deopt_id != Thread::kNoDeoptId) { 1169 if (deopt_id != Thread::kNoDeoptId) {
1210 // Marks either the continuation point in unoptimized code or the 1170 // Marks either the continuation point in unoptimized code or the
1211 // deoptimization point in optimized code, after call. 1171 // deoptimization point in optimized code, after call.
1212 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1172 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1213 if (is_optimizing()) { 1173 if (is_optimizing()) {
1214 AddDeoptIndexAtCall(deopt_id_after); 1174 AddDeoptIndexAtCall(deopt_id_after);
1215 } else { 1175 } else {
1216 // Add deoptimization continuation point after the call and before the 1176 // Add deoptimization continuation point after the call and before the
1217 // arguments are removed. 1177 // arguments are removed.
1218 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1178 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1219 deopt_id_after,
1220 token_pos);
1221 } 1179 }
1222 } 1180 }
1223 } 1181 }
1224 1182
1225 1183
1226 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { 1184 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
1227 // We do not check for overflow when incrementing the edge counter. The 1185 // We do not check for overflow when incrementing the edge counter. The
1228 // function should normally be optimized long before the counter can 1186 // function should normally be optimized long before the counter can
1229 // overflow; and though we do not reset the counters when we optimize or 1187 // overflow; and though we do not reset the counters when we optimize or
1230 // deoptimize, there is a bound on the number of 1188 // deoptimize, there is a bound on the number of
1231 // optimization/deoptimization cycles we will attempt. 1189 // optimization/deoptimization cycles we will attempt.
1232 ASSERT(!edge_counters_array_.IsNull()); 1190 ASSERT(!edge_counters_array_.IsNull());
1233 __ Comment("Edge counter"); 1191 __ Comment("Edge counter");
1234 __ LoadObject(T0, edge_counters_array_); 1192 __ LoadObject(T0, edge_counters_array_);
1235 __ LoadFieldFromOffset(T1, T0, Array::element_offset(edge_id)); 1193 __ LoadFieldFromOffset(T1, T0, Array::element_offset(edge_id));
1236 __ AddImmediate(T1, T1, Smi::RawValue(1)); 1194 __ AddImmediate(T1, T1, Smi::RawValue(1));
1237 __ StoreFieldToOffset(T1, T0, Array::element_offset(edge_id)); 1195 __ StoreFieldToOffset(T1, T0, Array::element_offset(edge_id));
1238 } 1196 }
1239 1197
1240 1198
1241 void FlowGraphCompiler::EmitOptimizedInstanceCall( 1199 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
1242 const StubEntry& stub_entry, 1200 const ICData& ic_data,
1243 const ICData& ic_data, 1201 intptr_t argument_count,
1244 intptr_t argument_count, 1202 intptr_t deopt_id,
1245 intptr_t deopt_id, 1203 TokenPosition token_pos,
1246 TokenPosition token_pos, 1204 LocationSummary* locs) {
1247 LocationSummary* locs) {
1248 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1205 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1249 // Each ICData propagated from unoptimized to optimized code contains the 1206 // Each ICData propagated from unoptimized to optimized code contains the
1250 // function that corresponds to the Dart function of that IC call. Due 1207 // function that corresponds to the Dart function of that IC call. Due
1251 // to inlining in optimized code, that function may not correspond to the 1208 // to inlining in optimized code, that function may not correspond to the
1252 // top-level function (parsed_function().function()) which could be 1209 // top-level function (parsed_function().function()) which could be
1253 // reoptimized and which counter needs to be incremented. 1210 // reoptimized and which counter needs to be incremented.
1254 // Pass the function explicitly, it is used in IC stub. 1211 // Pass the function explicitly, it is used in IC stub.
1255 __ Comment("OptimizedInstanceCall"); 1212 __ Comment("OptimizedInstanceCall");
1256 __ LoadObject(T0, parsed_function().function()); 1213 __ LoadObject(T0, parsed_function().function());
1257 __ LoadUniqueObject(S5, ic_data); 1214 __ LoadUniqueObject(S5, ic_data);
1258 GenerateDartCall(deopt_id, 1215 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1259 token_pos,
1260 stub_entry,
1261 RawPcDescriptors::kIcCall,
1262 locs); 1216 locs);
1263 __ Drop(argument_count); 1217 __ Drop(argument_count);
1264 } 1218 }
1265 1219
1266 1220
1267 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, 1221 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
1268 const ICData& ic_data, 1222 const ICData& ic_data,
1269 intptr_t argument_count, 1223 intptr_t argument_count,
1270 intptr_t deopt_id, 1224 intptr_t deopt_id,
1271 TokenPosition token_pos, 1225 TokenPosition token_pos,
1272 LocationSummary* locs) { 1226 LocationSummary* locs) {
1273 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1227 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1274 __ Comment("InstanceCall"); 1228 __ Comment("InstanceCall");
1275 __ LoadUniqueObject(S5, ic_data); 1229 __ LoadUniqueObject(S5, ic_data);
1276 GenerateDartCall(deopt_id, 1230 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1277 token_pos,
1278 stub_entry,
1279 RawPcDescriptors::kIcCall,
1280 locs); 1231 locs);
1281 __ Comment("InstanceCall return"); 1232 __ Comment("InstanceCall return");
1282 __ Drop(argument_count); 1233 __ Drop(argument_count);
1283 } 1234 }
1284 1235
1285 1236
1286 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1237 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1287 const ICData& ic_data, 1238 const ICData& ic_data,
1288 intptr_t argument_count, 1239 intptr_t argument_count,
1289 intptr_t deopt_id, 1240 intptr_t deopt_id,
1290 TokenPosition token_pos, 1241 TokenPosition token_pos,
1291 LocationSummary* locs, 1242 LocationSummary* locs,
1292 intptr_t try_index, 1243 intptr_t try_index,
1293 intptr_t slow_path_argument_count) { 1244 intptr_t slow_path_argument_count) {
1294 const String& name = String::Handle(zone(), ic_data.target_name()); 1245 const String& name = String::Handle(zone(), ic_data.target_name());
1295 const Array& arguments_descriptor = 1246 const Array& arguments_descriptor =
1296 Array::ZoneHandle(zone(), ic_data.arguments_descriptor()); 1247 Array::ZoneHandle(zone(), ic_data.arguments_descriptor());
1297 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); 1248 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
1298 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(zone(), 1249 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
1250 zone(),
1299 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor)); 1251 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor));
1300 1252
1301 __ Comment("MegamorphicCall"); 1253 __ Comment("MegamorphicCall");
1302 // Load receiver into T0, 1254 // Load receiver into T0,
1303 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); 1255 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
1304 Label done; 1256 Label done;
1305 if (ShouldInlineSmiStringHashCode(ic_data)) { 1257 if (ShouldInlineSmiStringHashCode(ic_data)) {
1306 Label megamorphic_call; 1258 Label megamorphic_call;
1307 __ Comment("Inlined get:hashCode for Smi and OneByteString"); 1259 __ Comment("Inlined get:hashCode for Smi and OneByteString");
1308 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); 1260 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1309 __ beq(CMPRES1, ZR, &done); // Is Smi. 1261 __ beq(CMPRES1, ZR, &done); // Is Smi.
1310 __ delay_slot()->mov(V0, T0); // Move Smi hashcode to V0. 1262 __ delay_slot()->mov(V0, T0); // Move Smi hashcode to V0.
1311 1263
1312 __ LoadClassId(CMPRES1, T0); // Class ID check. 1264 __ LoadClassId(CMPRES1, T0); // Class ID check.
1313 __ BranchNotEqual( 1265 __ BranchNotEqual(CMPRES1, Immediate(kOneByteStringCid), &megamorphic_call);
1314 CMPRES1, Immediate(kOneByteStringCid), &megamorphic_call);
1315 1266
1316 __ lw(V0, FieldAddress(T0, String::hash_offset())); 1267 __ lw(V0, FieldAddress(T0, String::hash_offset()));
1317 __ bne(V0, ZR, &done); 1268 __ bne(V0, ZR, &done);
1318 1269
1319 __ Bind(&megamorphic_call); 1270 __ Bind(&megamorphic_call);
1320 __ Comment("Slow case: megamorphic call"); 1271 __ Comment("Slow case: megamorphic call");
1321 } 1272 }
1322 __ LoadObject(S5, cache); 1273 __ LoadObject(S5, cache);
1323 __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset())); 1274 __ lw(T9, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
1324 __ jalr(T9); 1275 __ jalr(T9);
1325 1276
1326 __ Bind(&done); 1277 __ Bind(&done);
1327 RecordSafepoint(locs, slow_path_argument_count); 1278 RecordSafepoint(locs, slow_path_argument_count);
1328 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1279 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1329 if (FLAG_precompiled_mode) { 1280 if (FLAG_precompiled_mode) {
1330 // Megamorphic calls may occur in slow path stubs. 1281 // Megamorphic calls may occur in slow path stubs.
1331 // If valid use try_index argument. 1282 // If valid use try_index argument.
1332 if (try_index == CatchClauseNode::kInvalidTryIndex) { 1283 if (try_index == CatchClauseNode::kInvalidTryIndex) {
1333 try_index = CurrentTryIndex(); 1284 try_index = CurrentTryIndex();
1334 } 1285 }
1335 pc_descriptors_list()->AddDescriptor(RawPcDescriptors::kOther, 1286 pc_descriptors_list()->AddDescriptor(
1336 assembler()->CodeSize(), 1287 RawPcDescriptors::kOther, assembler()->CodeSize(), Thread::kNoDeoptId,
1337 Thread::kNoDeoptId, 1288 token_pos, try_index);
1338 token_pos,
1339 try_index);
1340 } else if (is_optimizing()) { 1289 } else if (is_optimizing()) {
1341 AddCurrentDescriptor(RawPcDescriptors::kOther, 1290 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1342 Thread::kNoDeoptId, token_pos); 1291 token_pos);
1343 AddDeoptIndexAtCall(deopt_id_after); 1292 AddDeoptIndexAtCall(deopt_id_after);
1344 } else { 1293 } else {
1345 AddCurrentDescriptor(RawPcDescriptors::kOther, 1294 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1346 Thread::kNoDeoptId, token_pos); 1295 token_pos);
1347 // Add deoptimization continuation point after the call and before the 1296 // Add deoptimization continuation point after the call and before the
1348 // arguments are removed. 1297 // arguments are removed.
1349 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1298 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1350 } 1299 }
1351 __ Drop(argument_count); 1300 __ Drop(argument_count);
1352 } 1301 }
1353 1302
1354 1303
1355 void FlowGraphCompiler::EmitSwitchableInstanceCall( 1304 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
1356 const ICData& ic_data, 1305 intptr_t argument_count,
1357 intptr_t argument_count, 1306 intptr_t deopt_id,
1358 intptr_t deopt_id, 1307 TokenPosition token_pos,
1359 TokenPosition token_pos, 1308 LocationSummary* locs) {
1360 LocationSummary* locs) {
1361 ASSERT(ic_data.NumArgsTested() == 1); 1309 ASSERT(ic_data.NumArgsTested() == 1);
1362 const Code& initial_stub = Code::ZoneHandle( 1310 const Code& initial_stub =
1363 StubCode::ICCallThroughFunction_entry()->code()); 1311 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code());
1364 1312
1365 __ Comment("SwitchableCall"); 1313 __ Comment("SwitchableCall");
1366 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize)); 1314 __ lw(T0, Address(SP, (argument_count - 1) * kWordSize));
1367 __ LoadUniqueObject(CODE_REG, initial_stub); 1315 __ LoadUniqueObject(CODE_REG, initial_stub);
1368 __ lw(T9, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); 1316 __ lw(T9, FieldAddress(CODE_REG, Code::checked_entry_point_offset()));
1369 __ LoadUniqueObject(S5, ic_data); 1317 __ LoadUniqueObject(S5, ic_data);
1370 __ jalr(T9); 1318 __ jalr(T9);
1371 1319
1372 AddCurrentDescriptor(RawPcDescriptors::kOther, 1320 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, token_pos);
1373 Thread::kNoDeoptId, token_pos);
1374 RecordSafepoint(locs); 1321 RecordSafepoint(locs);
1375 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1322 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1376 if (is_optimizing()) { 1323 if (is_optimizing()) {
1377 AddDeoptIndexAtCall(deopt_id_after); 1324 AddDeoptIndexAtCall(deopt_id_after);
1378 } else { 1325 } else {
1379 // Add deoptimization continuation point after the call and before the 1326 // Add deoptimization continuation point after the call and before the
1380 // arguments are removed. 1327 // arguments are removed.
1381 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1328 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1382 } 1329 }
1383 __ Drop(argument_count); 1330 __ Drop(argument_count);
1384 } 1331 }
1385 1332
1386 1333
1387 void FlowGraphCompiler::EmitUnoptimizedStaticCall( 1334 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
1388 intptr_t argument_count, 1335 intptr_t deopt_id,
1389 intptr_t deopt_id, 1336 TokenPosition token_pos,
1390 TokenPosition token_pos, 1337 LocationSummary* locs,
1391 LocationSummary* locs, 1338 const ICData& ic_data) {
1392 const ICData& ic_data) {
1393 const StubEntry* stub_entry = 1339 const StubEntry* stub_entry =
1394 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); 1340 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
1395 __ LoadObject(S5, ic_data); 1341 __ LoadObject(S5, ic_data);
1396 GenerateDartCall(deopt_id, 1342 GenerateDartCall(deopt_id, token_pos, *stub_entry,
1397 token_pos, 1343 RawPcDescriptors::kUnoptStaticCall, locs);
1398 *stub_entry,
1399 RawPcDescriptors::kUnoptStaticCall,
1400 locs);
1401 __ Drop(argument_count); 1344 __ Drop(argument_count);
1402 } 1345 }
1403 1346
1404 1347
1405 void FlowGraphCompiler::EmitOptimizedStaticCall( 1348 void FlowGraphCompiler::EmitOptimizedStaticCall(
1406 const Function& function, 1349 const Function& function,
1407 const Array& arguments_descriptor, 1350 const Array& arguments_descriptor,
1408 intptr_t argument_count, 1351 intptr_t argument_count,
1409 intptr_t deopt_id, 1352 intptr_t deopt_id,
1410 TokenPosition token_pos, 1353 TokenPosition token_pos,
1411 LocationSummary* locs) { 1354 LocationSummary* locs) {
1412 __ Comment("StaticCall"); 1355 __ Comment("StaticCall");
1413 ASSERT(!function.IsClosureFunction()); 1356 ASSERT(!function.IsClosureFunction());
1414 if (function.HasOptionalParameters()) { 1357 if (function.HasOptionalParameters()) {
1415 __ LoadObject(S4, arguments_descriptor); 1358 __ LoadObject(S4, arguments_descriptor);
1416 } else { 1359 } else {
1417 __ LoadImmediate(S4, 0); // GC safe smi zero because of stub. 1360 __ LoadImmediate(S4, 0); // GC safe smi zero because of stub.
1418 } 1361 }
1419 // Do not use the code from the function, but let the code be patched so that 1362 // Do not use the code from the function, but let the code be patched so that
1420 // we can record the outgoing edges to other code. 1363 // we can record the outgoing edges to other code.
1421 GenerateStaticDartCall(deopt_id, 1364 GenerateStaticDartCall(deopt_id, token_pos,
1422 token_pos,
1423 *StubCode::CallStaticFunction_entry(), 1365 *StubCode::CallStaticFunction_entry(),
1424 RawPcDescriptors::kOther, 1366 RawPcDescriptors::kOther, locs, function);
1425 locs,
1426 function);
1427 __ Drop(argument_count); 1367 __ Drop(argument_count);
1428 } 1368 }
1429 1369
1430 1370
1431 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1371 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1432 Register reg, 1372 Register reg,
1433 const Object& obj, 1373 const Object& obj,
1434 bool needs_number_check, 1374 bool needs_number_check,
1435 TokenPosition token_pos) { 1375 TokenPosition token_pos) {
1436 __ Comment("EqualityRegConstCompare"); 1376 __ Comment("EqualityRegConstCompare");
1437 ASSERT(!needs_number_check || 1377 ASSERT(!needs_number_check ||
1438 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); 1378 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()));
1439 if (needs_number_check) { 1379 if (needs_number_check) {
1440 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); 1380 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint());
1441 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1381 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1442 __ sw(reg, Address(SP, 1 * kWordSize)); 1382 __ sw(reg, Address(SP, 1 * kWordSize));
1443 __ LoadObject(TMP, obj); 1383 __ LoadObject(TMP, obj);
1444 __ sw(TMP, Address(SP, 0 * kWordSize)); 1384 __ sw(TMP, Address(SP, 0 * kWordSize));
1445 if (is_optimizing()) { 1385 if (is_optimizing()) {
1446 __ BranchLinkPatchable( 1386 __ BranchLinkPatchable(
1447 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1387 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1448 } else { 1388 } else {
1449 __ BranchLinkPatchable( 1389 __ BranchLinkPatchable(
1450 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1390 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1451 } 1391 }
1452 if (token_pos.IsReal()) { 1392 if (token_pos.IsReal()) {
1453 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1393 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1454 Thread::kNoDeoptId,
1455 token_pos); 1394 token_pos);
1456 } 1395 }
1457 __ Comment("EqualityRegConstCompare return"); 1396 __ Comment("EqualityRegConstCompare return");
1458 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal). 1397 // Stub returns result in CMPRES1 (if it is 0, then reg and obj are equal).
1459 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'. 1398 __ lw(reg, Address(SP, 1 * kWordSize)); // Restore 'reg'.
1460 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant. 1399 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Discard constant.
1461 return Condition(CMPRES1, ZR, EQ); 1400 return Condition(CMPRES1, ZR, EQ);
1462 } else { 1401 } else {
1463 int16_t imm = 0; 1402 int16_t imm = 0;
1464 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm); 1403 const Register obj_reg = __ LoadConditionOperand(CMPRES1, obj, &imm);
1465 return Condition(reg, obj_reg, EQ, imm); 1404 return Condition(reg, obj_reg, EQ, imm);
1466 } 1405 }
1467 } 1406 }
1468 1407
1469 1408
1470 Condition FlowGraphCompiler::EmitEqualityRegRegCompare( 1409 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
1471 Register left, 1410 Register left,
1472 Register right, 1411 Register right,
1473 bool needs_number_check, 1412 bool needs_number_check,
1474 TokenPosition token_pos) { 1413 TokenPosition token_pos) {
1475 __ Comment("EqualityRegRegCompare"); 1414 __ Comment("EqualityRegRegCompare");
1476 if (needs_number_check) { 1415 if (needs_number_check) {
1477 __ addiu(SP, SP, Immediate(-2 * kWordSize)); 1416 __ addiu(SP, SP, Immediate(-2 * kWordSize));
1478 __ sw(left, Address(SP, 1 * kWordSize)); 1417 __ sw(left, Address(SP, 1 * kWordSize));
1479 __ sw(right, Address(SP, 0 * kWordSize)); 1418 __ sw(right, Address(SP, 0 * kWordSize));
1480 if (is_optimizing()) { 1419 if (is_optimizing()) {
1481 __ BranchLinkPatchable( 1420 __ BranchLinkPatchable(
1482 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1421 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1483 } else { 1422 } else {
1484 __ BranchLinkPatchable( 1423 __ BranchLinkPatchable(
1485 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1424 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1486 } 1425 }
1487 if (token_pos.IsReal()) { 1426 if (token_pos.IsReal()) {
1488 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1427 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1489 Thread::kNoDeoptId,
1490 token_pos); 1428 token_pos);
1491 } 1429 }
1492 __ Comment("EqualityRegRegCompare return"); 1430 __ Comment("EqualityRegRegCompare return");
1493 // Stub returns result in CMPRES1 (if it is 0, then left and right are 1431 // Stub returns result in CMPRES1 (if it is 0, then left and right are
1494 // equal). 1432 // equal).
1495 __ lw(right, Address(SP, 0 * kWordSize)); 1433 __ lw(right, Address(SP, 0 * kWordSize));
1496 __ lw(left, Address(SP, 1 * kWordSize)); 1434 __ lw(left, Address(SP, 1 * kWordSize));
1497 __ addiu(SP, SP, Immediate(2 * kWordSize)); 1435 __ addiu(SP, SP, Immediate(2 * kWordSize));
1498 return Condition(CMPRES1, ZR, EQ); 1436 return Condition(CMPRES1, ZR, EQ);
1499 } else { 1437 } else {
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1602 intptr_t argument_count, 1540 intptr_t argument_count,
1603 const Array& argument_names, 1541 const Array& argument_names,
1604 Label* failed, 1542 Label* failed,
1605 Label* match_found, 1543 Label* match_found,
1606 intptr_t deopt_id, 1544 intptr_t deopt_id,
1607 TokenPosition token_index, 1545 TokenPosition token_index,
1608 LocationSummary* locs, 1546 LocationSummary* locs,
1609 bool complete) { 1547 bool complete) {
1610 ASSERT(is_optimizing()); 1548 ASSERT(is_optimizing());
1611 __ Comment("EmitTestAndCall"); 1549 __ Comment("EmitTestAndCall");
1612 const Array& arguments_descriptor = 1550 const Array& arguments_descriptor = Array::ZoneHandle(
1613 Array::ZoneHandle(zone(), ArgumentsDescriptor::New(argument_count, 1551 zone(), ArgumentsDescriptor::New(argument_count, argument_names));
1614 argument_names));
1615 1552
1616 // Load receiver into T0. 1553 // Load receiver into T0.
1617 __ LoadFromOffset(T0, SP, (argument_count - 1) * kWordSize); 1554 __ LoadFromOffset(T0, SP, (argument_count - 1) * kWordSize);
1618 __ LoadObject(S4, arguments_descriptor); 1555 __ LoadObject(S4, arguments_descriptor);
1619 1556
1620 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid; 1557 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid;
1621 const intptr_t kNumChecks = ic_data.NumberOfChecks(); 1558 const intptr_t kNumChecks = ic_data.NumberOfChecks();
1622 1559
1623 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); 1560 ASSERT(!ic_data.IsNull() && (kNumChecks > 0));
1624 1561
1625 Label after_smi_test; 1562 Label after_smi_test;
1626 if (kFirstCheckIsSmi) { 1563 if (kFirstCheckIsSmi) {
1627 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); 1564 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1628 // Jump if receiver is not Smi. 1565 // Jump if receiver is not Smi.
1629 if (kNumChecks == 1) { 1566 if (kNumChecks == 1) {
1630 __ bne(CMPRES1, ZR, failed); 1567 __ bne(CMPRES1, ZR, failed);
1631 } else { 1568 } else {
1632 __ bne(CMPRES1, ZR, &after_smi_test); 1569 __ bne(CMPRES1, ZR, &after_smi_test);
1633 } 1570 }
1634 // Do not use the code from the function, but let the code be patched so 1571 // Do not use the code from the function, but let the code be patched so
1635 // that we can record the outgoing edges to other code. 1572 // that we can record the outgoing edges to other code.
1636 const Function& function = Function::ZoneHandle( 1573 const Function& function =
1637 zone(), ic_data.GetTargetAt(0)); 1574 Function::ZoneHandle(zone(), ic_data.GetTargetAt(0));
1638 GenerateStaticDartCall(deopt_id, 1575 GenerateStaticDartCall(deopt_id, token_index,
1639 token_index,
1640 *StubCode::CallStaticFunction_entry(), 1576 *StubCode::CallStaticFunction_entry(),
1641 RawPcDescriptors::kOther, 1577 RawPcDescriptors::kOther, locs, function);
1642 locs,
1643 function);
1644 __ Drop(argument_count); 1578 __ Drop(argument_count);
1645 if (kNumChecks > 1) { 1579 if (kNumChecks > 1) {
1646 __ b(match_found); 1580 __ b(match_found);
1647 } 1581 }
1648 } else { 1582 } else {
1649 // Receiver is Smi, but Smi is not a valid class therefore fail. 1583 // Receiver is Smi, but Smi is not a valid class therefore fail.
1650 // (Smi class must be first in the list). 1584 // (Smi class must be first in the list).
1651 if (!complete) { 1585 if (!complete) {
1652 __ andi(CMPRES1, T0, Immediate(kSmiTagMask)); 1586 __ andi(CMPRES1, T0, Immediate(kSmiTagMask));
1653 __ beq(CMPRES1, ZR, failed); 1587 __ beq(CMPRES1, ZR, failed);
(...skipping 23 matching lines...) Expand all
1677 __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test); 1611 __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test);
1678 } 1612 }
1679 } else { 1613 } else {
1680 if (!kIsLastCheck) { 1614 if (!kIsLastCheck) {
1681 __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test); 1615 __ BranchNotEqual(T2, Immediate(sorted[i].cid), &next_test);
1682 } 1616 }
1683 } 1617 }
1684 // Do not use the code from the function, but let the code be patched so 1618 // Do not use the code from the function, but let the code be patched so
1685 // that we can record the outgoing edges to other code. 1619 // that we can record the outgoing edges to other code.
1686 const Function& function = *sorted[i].target; 1620 const Function& function = *sorted[i].target;
1687 GenerateStaticDartCall(deopt_id, 1621 GenerateStaticDartCall(deopt_id, token_index,
1688 token_index,
1689 *StubCode::CallStaticFunction_entry(), 1622 *StubCode::CallStaticFunction_entry(),
1690 RawPcDescriptors::kOther, 1623 RawPcDescriptors::kOther, locs, function);
1691 locs,
1692 function);
1693 __ Drop(argument_count); 1624 __ Drop(argument_count);
1694 if (!kIsLastCheck) { 1625 if (!kIsLastCheck) {
1695 __ b(match_found); 1626 __ b(match_found);
1696 } 1627 }
1697 __ Bind(&next_test); 1628 __ Bind(&next_test);
1698 } 1629 }
1699 } 1630 }
1700 1631
1701 1632
1702 #undef __ 1633 #undef __
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1758 if (destination.IsRegister()) { 1689 if (destination.IsRegister()) {
1759 if (constant.IsSmi() && 1690 if (constant.IsSmi() &&
1760 (source.constant_instruction()->representation() == kUnboxedInt32)) { 1691 (source.constant_instruction()->representation() == kUnboxedInt32)) {
1761 __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value()); 1692 __ LoadImmediate(destination.reg(), Smi::Cast(constant).Value());
1762 } else { 1693 } else {
1763 __ LoadObject(destination.reg(), constant); 1694 __ LoadObject(destination.reg(), constant);
1764 } 1695 }
1765 } else if (destination.IsFpuRegister()) { 1696 } else if (destination.IsFpuRegister()) {
1766 __ LoadObject(TMP, constant); 1697 __ LoadObject(TMP, constant);
1767 __ LoadDFromOffset(destination.fpu_reg(), TMP, 1698 __ LoadDFromOffset(destination.fpu_reg(), TMP,
1768 Double::value_offset() - kHeapObjectTag); 1699 Double::value_offset() - kHeapObjectTag);
1769 } else if (destination.IsDoubleStackSlot()) { 1700 } else if (destination.IsDoubleStackSlot()) {
1770 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1701 const intptr_t dest_offset = destination.ToStackSlotOffset();
1771 __ LoadObject(TMP, constant); 1702 __ LoadObject(TMP, constant);
1772 __ LoadDFromOffset(DTMP, TMP, Double::value_offset() - kHeapObjectTag); 1703 __ LoadDFromOffset(DTMP, TMP, Double::value_offset() - kHeapObjectTag);
1773 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); 1704 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1774 } else { 1705 } else {
1775 ASSERT(destination.IsStackSlot()); 1706 ASSERT(destination.IsStackSlot());
1776 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1707 const intptr_t dest_offset = destination.ToStackSlotOffset();
1777 ScratchRegisterScope tmp(this, kNoRegister); 1708 ScratchRegisterScope tmp(this, kNoRegister);
1778 if (constant.IsSmi() && 1709 if (constant.IsSmi() &&
(...skipping 15 matching lines...) Expand all
1794 const Location source = move->src(); 1725 const Location source = move->src();
1795 const Location destination = move->dest(); 1726 const Location destination = move->dest();
1796 1727
1797 if (source.IsRegister() && destination.IsRegister()) { 1728 if (source.IsRegister() && destination.IsRegister()) {
1798 ASSERT(source.reg() != TMP); 1729 ASSERT(source.reg() != TMP);
1799 ASSERT(destination.reg() != TMP); 1730 ASSERT(destination.reg() != TMP);
1800 __ mov(TMP, source.reg()); 1731 __ mov(TMP, source.reg());
1801 __ mov(source.reg(), destination.reg()); 1732 __ mov(source.reg(), destination.reg());
1802 __ mov(destination.reg(), TMP); 1733 __ mov(destination.reg(), TMP);
1803 } else if (source.IsRegister() && destination.IsStackSlot()) { 1734 } else if (source.IsRegister() && destination.IsStackSlot()) {
1804 Exchange(source.reg(), 1735 Exchange(source.reg(), destination.base_reg(),
1805 destination.base_reg(), destination.ToStackSlotOffset()); 1736 destination.ToStackSlotOffset());
1806 } else if (source.IsStackSlot() && destination.IsRegister()) { 1737 } else if (source.IsStackSlot() && destination.IsRegister()) {
1807 Exchange(destination.reg(), 1738 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1808 source.base_reg(), source.ToStackSlotOffset());
1809 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1739 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1810 Exchange(source.base_reg(), source.ToStackSlotOffset(), 1740 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1811 destination.base_reg(), destination.ToStackSlotOffset()); 1741 destination.base_reg(), destination.ToStackSlotOffset());
1812 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { 1742 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1813 DRegister dst = destination.fpu_reg(); 1743 DRegister dst = destination.fpu_reg();
1814 DRegister src = source.fpu_reg(); 1744 DRegister src = source.fpu_reg();
1815 __ movd(DTMP, src); 1745 __ movd(DTMP, src);
1816 __ movd(src, dst); 1746 __ movd(src, dst);
1817 __ movd(dst, DTMP); 1747 __ movd(dst, DTMP);
1818 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { 1748 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1819 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1749 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1820 DRegister reg = source.IsFpuRegister() ? source.fpu_reg() 1750 DRegister reg =
1821 : destination.fpu_reg(); 1751 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1822 Register base_reg = source.IsFpuRegister() 1752 Register base_reg =
1823 ? destination.base_reg() 1753 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1824 : source.base_reg();
1825 const intptr_t slot_offset = source.IsFpuRegister() 1754 const intptr_t slot_offset = source.IsFpuRegister()
1826 ? destination.ToStackSlotOffset() 1755 ? destination.ToStackSlotOffset()
1827 : source.ToStackSlotOffset(); 1756 : source.ToStackSlotOffset();
1828 __ LoadDFromOffset(DTMP, base_reg, slot_offset); 1757 __ LoadDFromOffset(DTMP, base_reg, slot_offset);
1829 __ StoreDToOffset(reg, base_reg, slot_offset); 1758 __ StoreDToOffset(reg, base_reg, slot_offset);
1830 __ movd(reg, DTMP); 1759 __ movd(reg, DTMP);
1831 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { 1760 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1832 const intptr_t source_offset = source.ToStackSlotOffset(); 1761 const intptr_t source_offset = source.ToStackSlotOffset();
1833 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1762 const intptr_t dest_offset = destination.ToStackSlotOffset();
1834 1763
1835 ScratchFpuRegisterScope ensure_scratch(this, DTMP); 1764 ScratchFpuRegisterScope ensure_scratch(this, DTMP);
1836 DRegister scratch = ensure_scratch.reg(); 1765 DRegister scratch = ensure_scratch.reg();
1837 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); 1766 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
1937 __ AddImmediate(SP, kDoubleSize); 1866 __ AddImmediate(SP, kDoubleSize);
1938 } 1867 }
1939 1868
1940 1869
1941 #undef __ 1870 #undef __
1942 1871
1943 1872
1944 } // namespace dart 1873 } // namespace dart
1945 1874
1946 #endif // defined TARGET_ARCH_MIPS 1875 #endif // defined TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.cc ('k') | runtime/vm/flow_graph_compiler_x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698