Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(363)

Side by Side Diff: runtime/vm/flow_graph_compiler_arm.cc

Issue 2481873005: clang-format runtime/vm (Closed)
Patch Set: Merge Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 builder->AddCallerFp(slot_ix++); 111 builder->AddCallerFp(slot_ix++);
112 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++); 112 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
113 113
114 // Emit all values that are needed for materialization as a part of the 114 // Emit all values that are needed for materialization as a part of the
115 // expression stack for the bottom-most frame. This guarantees that GC 115 // expression stack for the bottom-most frame. This guarantees that GC
116 // will be able to find them during materialization. 116 // will be able to find them during materialization.
117 slot_ix = builder->EmitMaterializationArguments(slot_ix); 117 slot_ix = builder->EmitMaterializationArguments(slot_ix);
118 118
119 // For the innermost environment, set outgoing arguments and the locals. 119 // For the innermost environment, set outgoing arguments and the locals.
120 for (intptr_t i = current->Length() - 1; 120 for (intptr_t i = current->Length() - 1;
121 i >= current->fixed_parameter_count(); 121 i >= current->fixed_parameter_count(); i--) {
122 i--) {
123 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++); 122 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
124 } 123 }
125 124
126 Environment* previous = current; 125 Environment* previous = current;
127 current = current->outer(); 126 current = current->outer();
128 while (current != NULL) { 127 while (current != NULL) {
129 builder->AddPp(current->function(), slot_ix++); 128 builder->AddPp(current->function(), slot_ix++);
130 builder->AddPcMarker(previous->function(), slot_ix++); 129 builder->AddPcMarker(previous->function(), slot_ix++);
131 builder->AddCallerFp(slot_ix++); 130 builder->AddCallerFp(slot_ix++);
132 131
133 // For any outer environment the deopt id is that of the call instruction 132 // For any outer environment the deopt id is that of the call instruction
134 // which is recorded in the outer environment. 133 // which is recorded in the outer environment.
135 builder->AddReturnAddress( 134 builder->AddReturnAddress(current->function(),
136 current->function(), 135 Thread::ToDeoptAfter(current->deopt_id()),
137 Thread::ToDeoptAfter(current->deopt_id()), 136 slot_ix++);
138 slot_ix++);
139 137
140 // The values of outgoing arguments can be changed from the inlined call so 138 // The values of outgoing arguments can be changed from the inlined call so
141 // we must read them from the previous environment. 139 // we must read them from the previous environment.
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { 140 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), 141 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
144 previous->LocationAt(i),
145 slot_ix++); 142 slot_ix++);
146 } 143 }
147 144
148 // Set the locals, note that outgoing arguments are not in the environment. 145 // Set the locals, note that outgoing arguments are not in the environment.
149 for (intptr_t i = current->Length() - 1; 146 for (intptr_t i = current->Length() - 1;
150 i >= current->fixed_parameter_count(); 147 i >= current->fixed_parameter_count(); i--) {
151 i--) { 148 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
152 builder->AddCopy(current->ValueAt(i),
153 current->LocationAt(i),
154 slot_ix++);
155 } 149 }
156 150
157 // Iterate on the outer environment. 151 // Iterate on the outer environment.
158 previous = current; 152 previous = current;
159 current = current->outer(); 153 current = current->outer();
160 } 154 }
161 // The previous pointer is now the outermost environment. 155 // The previous pointer is now the outermost environment.
162 ASSERT(previous != NULL); 156 ASSERT(previous != NULL);
163 157
164 // Set slots for the outermost environment. 158 // Set slots for the outermost environment.
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
281 __ b(is_not_instance_lbl, EQ); 275 __ b(is_not_instance_lbl, EQ);
282 } 276 }
283 // A function type test requires checking the function signature. 277 // A function type test requires checking the function signature.
284 if (!type.IsFunctionType()) { 278 if (!type.IsFunctionType()) {
285 const intptr_t num_type_args = type_class.NumTypeArguments(); 279 const intptr_t num_type_args = type_class.NumTypeArguments();
286 const intptr_t num_type_params = type_class.NumTypeParameters(); 280 const intptr_t num_type_params = type_class.NumTypeParameters();
287 const intptr_t from_index = num_type_args - num_type_params; 281 const intptr_t from_index = num_type_args - num_type_params;
288 const TypeArguments& type_arguments = 282 const TypeArguments& type_arguments =
289 TypeArguments::ZoneHandle(zone(), type.arguments()); 283 TypeArguments::ZoneHandle(zone(), type.arguments());
290 const bool is_raw_type = type_arguments.IsNull() || 284 const bool is_raw_type = type_arguments.IsNull() ||
291 type_arguments.IsRaw(from_index, num_type_params); 285 type_arguments.IsRaw(from_index, num_type_params);
292 if (is_raw_type) { 286 if (is_raw_type) {
293 const Register kClassIdReg = R2; 287 const Register kClassIdReg = R2;
294 // dynamic type argument, check only classes. 288 // dynamic type argument, check only classes.
295 __ LoadClassId(kClassIdReg, kInstanceReg); 289 __ LoadClassId(kClassIdReg, kInstanceReg);
296 __ CompareImmediate(kClassIdReg, type_class.id()); 290 __ CompareImmediate(kClassIdReg, type_class.id());
297 __ b(is_instance_lbl, EQ); 291 __ b(is_instance_lbl, EQ);
298 // List is a very common case. 292 // List is a very common case.
299 if (IsListClass(type_class)) { 293 if (IsListClass(type_class)) {
300 GenerateListTypeCheck(kClassIdReg, is_instance_lbl); 294 GenerateListTypeCheck(kClassIdReg, is_instance_lbl);
301 } 295 }
302 return GenerateSubtype1TestCacheLookup( 296 return GenerateSubtype1TestCacheLookup(
303 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 297 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
304 } 298 }
305 // If one type argument only, check if type argument is Object or dynamic. 299 // If one type argument only, check if type argument is Object or dynamic.
306 if (type_arguments.Length() == 1) { 300 if (type_arguments.Length() == 1) {
307 const AbstractType& tp_argument = AbstractType::ZoneHandle( 301 const AbstractType& tp_argument =
308 zone(), type_arguments.TypeAt(0)); 302 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0));
309 ASSERT(!tp_argument.IsMalformed()); 303 ASSERT(!tp_argument.IsMalformed());
310 if (tp_argument.IsType()) { 304 if (tp_argument.IsType()) {
311 ASSERT(tp_argument.HasResolvedTypeClass()); 305 ASSERT(tp_argument.HasResolvedTypeClass());
312 // Check if type argument is dynamic or Object. 306 // Check if type argument is dynamic or Object.
313 const Type& object_type = Type::Handle(zone(), Type::ObjectType()); 307 const Type& object_type = Type::Handle(zone(), Type::ObjectType());
314 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) { 308 if (object_type.IsSubtypeOf(tp_argument, NULL, NULL, Heap::kOld)) {
315 // Instance class test only necessary. 309 // Instance class test only necessary.
316 return GenerateSubtype1TestCacheLookup( 310 return GenerateSubtype1TestCacheLookup(
317 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 311 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
318 } 312 }
319 } 313 }
320 } 314 }
321 } 315 }
322 // Regular subtype test cache involving instance's type arguments. 316 // Regular subtype test cache involving instance's type arguments.
323 const Register kTypeArgumentsReg = kNoRegister; 317 const Register kTypeArgumentsReg = kNoRegister;
324 const Register kTempReg = kNoRegister; 318 const Register kTempReg = kNoRegister;
325 // R0: instance (must be preserved). 319 // R0: instance (must be preserved).
326 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, 320 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg,
327 kInstanceReg, 321 kTypeArgumentsReg, kTempReg,
328 kTypeArgumentsReg, 322 is_instance_lbl, is_not_instance_lbl);
329 kTempReg,
330 is_instance_lbl,
331 is_not_instance_lbl);
332 } 323 }
333 324
334 325
335 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, 326 void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
336 const GrowableArray<intptr_t>& class_ids, 327 const GrowableArray<intptr_t>& class_ids,
337 Label* is_equal_lbl, 328 Label* is_equal_lbl,
338 Label* is_not_equal_lbl) { 329 Label* is_not_equal_lbl) {
339 for (intptr_t i = 0; i < class_ids.length(); i++) { 330 for (intptr_t i = 0; i < class_ids.length(); i++) {
340 __ CompareImmediate(class_id_reg, class_ids[i]); 331 __ CompareImmediate(class_id_reg, class_ids[i]);
341 __ b(is_equal_lbl, EQ); 332 __ b(is_equal_lbl, EQ);
(...skipping 18 matching lines...) Expand all
360 // Fallthrough. 351 // Fallthrough.
361 return true; 352 return true;
362 } 353 }
363 const Class& type_class = Class::Handle(zone(), type.type_class()); 354 const Class& type_class = Class::Handle(zone(), type.type_class());
364 ASSERT(type_class.NumTypeArguments() == 0); 355 ASSERT(type_class.NumTypeArguments() == 0);
365 356
366 const Register kInstanceReg = R0; 357 const Register kInstanceReg = R0;
367 __ tst(kInstanceReg, Operand(kSmiTagMask)); 358 __ tst(kInstanceReg, Operand(kSmiTagMask));
368 // If instance is Smi, check directly. 359 // If instance is Smi, check directly.
369 const Class& smi_class = Class::Handle(zone(), Smi::Class()); 360 const Class& smi_class = Class::Handle(zone(), Smi::Class());
370 if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), 361 if (smi_class.IsSubtypeOf(TypeArguments::Handle(zone()), type_class,
371 type_class, 362 TypeArguments::Handle(zone()), NULL, NULL,
372 TypeArguments::Handle(zone()),
373 NULL,
374 NULL,
375 Heap::kOld)) { 363 Heap::kOld)) {
376 __ b(is_instance_lbl, EQ); 364 __ b(is_instance_lbl, EQ);
377 } else { 365 } else {
378 __ b(is_not_instance_lbl, EQ); 366 __ b(is_not_instance_lbl, EQ);
379 } 367 }
380 const Register kClassIdReg = R2; 368 const Register kClassIdReg = R2;
381 __ LoadClassId(kClassIdReg, kInstanceReg); 369 __ LoadClassId(kClassIdReg, kInstanceReg);
382 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted 370 // See ClassFinalizer::ResolveSuperTypeAndInterfaces for list of restricted
383 // interfaces. 371 // interfaces.
384 // Bool interface can be implemented only by core class Bool. 372 // Bool interface can be implemented only by core class Bool.
385 if (type.IsBoolType()) { 373 if (type.IsBoolType()) {
386 __ CompareImmediate(kClassIdReg, kBoolCid); 374 __ CompareImmediate(kClassIdReg, kBoolCid);
387 __ b(is_instance_lbl, EQ); 375 __ b(is_instance_lbl, EQ);
388 __ b(is_not_instance_lbl); 376 __ b(is_not_instance_lbl);
389 return false; 377 return false;
390 } 378 }
391 // Custom checking for numbers (Smi, Mint, Bigint and Double). 379 // Custom checking for numbers (Smi, Mint, Bigint and Double).
392 // Note that instance is not Smi (checked above). 380 // Note that instance is not Smi (checked above).
393 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) { 381 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) {
394 GenerateNumberTypeCheck( 382 GenerateNumberTypeCheck(kClassIdReg, type, is_instance_lbl,
395 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); 383 is_not_instance_lbl);
396 return false; 384 return false;
397 } 385 }
398 if (type.IsStringType()) { 386 if (type.IsStringType()) {
399 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl); 387 GenerateStringTypeCheck(kClassIdReg, is_instance_lbl, is_not_instance_lbl);
400 return false; 388 return false;
401 } 389 }
402 if (type.IsDartFunctionType()) { 390 if (type.IsDartFunctionType()) {
403 // Check if instance is a closure. 391 // Check if instance is a closure.
404 __ CompareImmediate(kClassIdReg, kClosureCid); 392 __ CompareImmediate(kClassIdReg, kClosureCid);
405 __ b(is_instance_lbl, EQ); 393 __ b(is_instance_lbl, EQ);
(...skipping 26 matching lines...) Expand all
432 __ LoadClass(R1, kInstanceReg, R2); 420 __ LoadClass(R1, kInstanceReg, R2);
433 // R1: instance class. 421 // R1: instance class.
434 // Check immediate superclass equality. 422 // Check immediate superclass equality.
435 __ ldr(R2, FieldAddress(R1, Class::super_type_offset())); 423 __ ldr(R2, FieldAddress(R1, Class::super_type_offset()));
436 __ ldr(R2, FieldAddress(R2, Type::type_class_id_offset())); 424 __ ldr(R2, FieldAddress(R2, Type::type_class_id_offset()));
437 __ CompareImmediate(R2, Smi::RawValue(type_class.id())); 425 __ CompareImmediate(R2, Smi::RawValue(type_class.id()));
438 __ b(is_instance_lbl, EQ); 426 __ b(is_instance_lbl, EQ);
439 427
440 const Register kTypeArgumentsReg = kNoRegister; 428 const Register kTypeArgumentsReg = kNoRegister;
441 const Register kTempReg = kNoRegister; 429 const Register kTempReg = kNoRegister;
442 return GenerateCallSubtypeTestStub(kTestTypeOneArg, 430 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg,
443 kInstanceReg, 431 kTypeArgumentsReg, kTempReg,
444 kTypeArgumentsReg, 432 is_instance_lbl, is_not_instance_lbl);
445 kTempReg,
446 is_instance_lbl,
447 is_not_instance_lbl);
448 } 433 }
449 434
450 435
451 // Generates inlined check if 'type' is a type parameter or type itself 436 // Generates inlined check if 'type' is a type parameter or type itself
452 // R0: instance (preserved). 437 // R0: instance (preserved).
453 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 438 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
454 TokenPosition token_pos, 439 TokenPosition token_pos,
455 const AbstractType& type, 440 const AbstractType& type,
456 Label* is_instance_lbl, 441 Label* is_instance_lbl,
457 Label* is_not_instance_lbl) { 442 Label* is_not_instance_lbl) {
458 __ Comment("UninstantiatedTypeTest"); 443 __ Comment("UninstantiatedTypeTest");
459 ASSERT(!type.IsInstantiated()); 444 ASSERT(!type.IsInstantiated());
460 // Skip check if destination is a dynamic type. 445 // Skip check if destination is a dynamic type.
461 if (type.IsTypeParameter()) { 446 if (type.IsTypeParameter()) {
462 const TypeParameter& type_param = TypeParameter::Cast(type); 447 const TypeParameter& type_param = TypeParameter::Cast(type);
463 // Load instantiator type arguments on stack. 448 // Load instantiator type arguments on stack.
464 __ ldr(R1, Address(SP, 0)); // Get instantiator type arguments. 449 __ ldr(R1, Address(SP, 0)); // Get instantiator type arguments.
465 // R1: instantiator type arguments. 450 // R1: instantiator type arguments.
466 // Check if type arguments are null, i.e. equivalent to vector of dynamic. 451 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
467 __ CompareObject(R1, Object::null_object()); 452 __ CompareObject(R1, Object::null_object());
468 __ b(is_instance_lbl, EQ); 453 __ b(is_instance_lbl, EQ);
469 __ ldr(R2, 454 __ ldr(R2,
470 FieldAddress(R1, TypeArguments::type_at_offset(type_param.index()))); 455 FieldAddress(R1, TypeArguments::type_at_offset(type_param.index())));
471 // R2: concrete type of type. 456 // R2: concrete type of type.
472 // Check if type argument is dynamic. 457 // Check if type argument is dynamic.
473 __ CompareObject(R2, Object::dynamic_type()); 458 __ CompareObject(R2, Object::dynamic_type());
474 __ b(is_instance_lbl, EQ); 459 __ b(is_instance_lbl, EQ);
475 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::ObjectType())); 460 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::ObjectType()));
476 __ b(is_instance_lbl, EQ); 461 __ b(is_instance_lbl, EQ);
477 462
478 // For Smi check quickly against int and num interfaces. 463 // For Smi check quickly against int and num interfaces.
479 Label not_smi; 464 Label not_smi;
480 __ tst(R0, Operand(kSmiTagMask)); // Value is Smi? 465 __ tst(R0, Operand(kSmiTagMask)); // Value is Smi?
481 __ b(&not_smi, NE); 466 __ b(&not_smi, NE);
482 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::IntType())); 467 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::IntType()));
483 __ b(is_instance_lbl, EQ); 468 __ b(is_instance_lbl, EQ);
484 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::Number())); 469 __ CompareObject(R2, Type::ZoneHandle(zone(), Type::Number()));
485 __ b(is_instance_lbl, EQ); 470 __ b(is_instance_lbl, EQ);
486 // Smi must be handled in runtime. 471 // Smi must be handled in runtime.
487 Label fall_through; 472 Label fall_through;
488 __ b(&fall_through); 473 __ b(&fall_through);
489 474
490 __ Bind(&not_smi); 475 __ Bind(&not_smi);
491 // R1: instantiator type arguments. 476 // R1: instantiator type arguments.
492 // R0: instance. 477 // R0: instance.
493 const Register kInstanceReg = R0; 478 const Register kInstanceReg = R0;
494 const Register kTypeArgumentsReg = R1; 479 const Register kTypeArgumentsReg = R1;
495 const Register kTempReg = kNoRegister; 480 const Register kTempReg = kNoRegister;
496 const SubtypeTestCache& type_test_cache = 481 const SubtypeTestCache& type_test_cache = SubtypeTestCache::ZoneHandle(
497 SubtypeTestCache::ZoneHandle(zone(), 482 zone(), GenerateCallSubtypeTestStub(
498 GenerateCallSubtypeTestStub(kTestTypeThreeArgs, 483 kTestTypeThreeArgs, kInstanceReg, kTypeArgumentsReg,
499 kInstanceReg, 484 kTempReg, is_instance_lbl, is_not_instance_lbl));
500 kTypeArgumentsReg,
501 kTempReg,
502 is_instance_lbl,
503 is_not_instance_lbl));
504 __ Bind(&fall_through); 485 __ Bind(&fall_through);
505 return type_test_cache.raw(); 486 return type_test_cache.raw();
506 } 487 }
507 if (type.IsType()) { 488 if (type.IsType()) {
508 const Register kInstanceReg = R0; 489 const Register kInstanceReg = R0;
509 const Register kTypeArgumentsReg = R1; 490 const Register kTypeArgumentsReg = R1;
510 __ tst(kInstanceReg, Operand(kSmiTagMask)); // Is instance Smi? 491 __ tst(kInstanceReg, Operand(kSmiTagMask)); // Is instance Smi?
511 __ b(is_not_instance_lbl, EQ); 492 __ b(is_not_instance_lbl, EQ);
512 __ ldr(kTypeArgumentsReg, Address(SP, 0)); // Instantiator type args. 493 __ ldr(kTypeArgumentsReg, Address(SP, 0)); // Instantiator type args.
513 // Uninstantiated type class is known at compile time, but the type 494 // Uninstantiated type class is known at compile time, but the type
514 // arguments are determined at runtime by the instantiator. 495 // arguments are determined at runtime by the instantiator.
515 const Register kTempReg = kNoRegister; 496 const Register kTempReg = kNoRegister;
516 return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, 497 return GenerateCallSubtypeTestStub(kTestTypeThreeArgs, kInstanceReg,
517 kInstanceReg, 498 kTypeArgumentsReg, kTempReg,
518 kTypeArgumentsReg, 499 is_instance_lbl, is_not_instance_lbl);
519 kTempReg,
520 is_instance_lbl,
521 is_not_instance_lbl);
522 } 500 }
523 return SubtypeTestCache::null(); 501 return SubtypeTestCache::null();
524 } 502 }
525 503
526 504
527 // Inputs: 505 // Inputs:
528 // - R0: instance being type checked (preserved). 506 // - R0: instance being type checked (preserved).
529 // - R1: optional instantiator type arguments (preserved). 507 // - R1: optional instantiator type arguments (preserved).
530 // Clobbers R2, R3. 508 // Clobbers R2, R3.
531 // Returns: 509 // Returns:
(...skipping 11 matching lines...) Expand all
543 // A non-null value is returned from a void function, which will result in a 521 // A non-null value is returned from a void function, which will result in a
544 // type error. A null value is handled prior to executing this inline code. 522 // type error. A null value is handled prior to executing this inline code.
545 return SubtypeTestCache::null(); 523 return SubtypeTestCache::null();
546 } 524 }
547 if (type.IsInstantiated()) { 525 if (type.IsInstantiated()) {
548 const Class& type_class = Class::ZoneHandle(zone(), type.type_class()); 526 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
549 // A class equality check is only applicable with a dst type (not a 527 // A class equality check is only applicable with a dst type (not a
550 // function type) of a non-parameterized class or with a raw dst type of 528 // function type) of a non-parameterized class or with a raw dst type of
551 // a parameterized class. 529 // a parameterized class.
552 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) { 530 if (type.IsFunctionType() || (type_class.NumTypeArguments() > 0)) {
553 return GenerateInstantiatedTypeWithArgumentsTest(token_pos, 531 return GenerateInstantiatedTypeWithArgumentsTest(
554 type, 532 token_pos, type, is_instance_lbl, is_not_instance_lbl);
555 is_instance_lbl,
556 is_not_instance_lbl);
557 // Fall through to runtime call. 533 // Fall through to runtime call.
558 } 534 }
559 const bool has_fall_through = 535 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
560 GenerateInstantiatedTypeNoArgumentsTest(token_pos, 536 token_pos, type, is_instance_lbl, is_not_instance_lbl);
561 type,
562 is_instance_lbl,
563 is_not_instance_lbl);
564 if (has_fall_through) { 537 if (has_fall_through) {
565 // If test non-conclusive so far, try the inlined type-test cache. 538 // If test non-conclusive so far, try the inlined type-test cache.
566 // 'type' is known at compile time. 539 // 'type' is known at compile time.
567 return GenerateSubtype1TestCacheLookup( 540 return GenerateSubtype1TestCacheLookup(
568 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 541 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
569 } else { 542 } else {
570 return SubtypeTestCache::null(); 543 return SubtypeTestCache::null();
571 } 544 }
572 } 545 }
573 return GenerateUninstantiatedTypeTest(token_pos, 546 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl,
574 type,
575 is_instance_lbl,
576 is_not_instance_lbl); 547 is_not_instance_lbl);
577 } 548 }
578 549
579 550
580 // If instanceof type test cannot be performed successfully at compile time and 551 // If instanceof type test cannot be performed successfully at compile time and
581 // therefore eliminated, optimize it by adding inlined tests for: 552 // therefore eliminated, optimize it by adding inlined tests for:
582 // - NULL -> return false. 553 // - NULL -> return false.
583 // - Smi -> compile time subtype check (only if dst class is not parameterized). 554 // - Smi -> compile time subtype check (only if dst class is not parameterized).
584 // - Class equality (only if class is not parameterized). 555 // - Class equality (only if class is not parameterized).
585 // Inputs: 556 // Inputs:
(...skipping 21 matching lines...) Expand all
607 // instantiated). 578 // instantiated).
608 // We can only inline this null check if the type is instantiated at compile 579 // We can only inline this null check if the type is instantiated at compile
609 // time, since an uninstantiated type at compile time could be Object or 580 // time, since an uninstantiated type at compile time could be Object or
610 // dynamic at run time. 581 // dynamic at run time.
611 __ CompareObject(R0, Object::null_object()); 582 __ CompareObject(R0, Object::null_object());
612 __ b(type.IsNullType() ? &is_instance : &is_not_instance, EQ); 583 __ b(type.IsNullType() ? &is_instance : &is_not_instance, EQ);
613 } 584 }
614 585
615 // Generate inline instanceof test. 586 // Generate inline instanceof test.
616 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 587 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
617 test_cache = GenerateInlineInstanceof(token_pos, type, 588 test_cache =
618 &is_instance, &is_not_instance); 589 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
619 590
620 // test_cache is null if there is no fall-through. 591 // test_cache is null if there is no fall-through.
621 Label done; 592 Label done;
622 if (!test_cache.IsNull()) { 593 if (!test_cache.IsNull()) {
623 // Generate runtime call. 594 // Generate runtime call.
624 // Load instantiator type arguments (R1). 595 // Load instantiator type arguments (R1).
625 __ ldr(R1, Address(SP, 0 * kWordSize)); 596 __ ldr(R1, Address(SP, 0 * kWordSize));
626 __ PushObject(Object::null_object()); // Make room for the result. 597 __ PushObject(Object::null_object()); // Make room for the result.
627 __ Push(R0); // Push the instance. 598 __ Push(R0); // Push the instance.
628 __ PushObject(type); // Push the type. 599 __ PushObject(type); // Push the type.
629 __ Push(R1); // Push instantiator type arguments (R1). 600 __ Push(R1); // Push instantiator type arguments (R1).
630 __ LoadUniqueObject(R0, test_cache); 601 __ LoadUniqueObject(R0, test_cache);
631 __ Push(R0); 602 __ Push(R0);
632 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 4, locs); 603 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 4, locs);
633 // Pop the parameters supplied to the runtime entry. The result of the 604 // Pop the parameters supplied to the runtime entry. The result of the
634 // instanceof runtime call will be left as the result of the operation. 605 // instanceof runtime call will be left as the result of the operation.
635 __ Drop(4); 606 __ Drop(4);
636 if (negate_result) { 607 if (negate_result) {
637 __ Pop(R1); 608 __ Pop(R1);
638 __ LoadObject(R0, Bool::True()); 609 __ LoadObject(R0, Bool::True());
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
681 // Preserve instantiator type arguments (R1). 652 // Preserve instantiator type arguments (R1).
682 __ Push(R1); 653 __ Push(R1);
683 // A null object is always assignable and is returned as result. 654 // A null object is always assignable and is returned as result.
684 Label is_assignable, runtime_call; 655 Label is_assignable, runtime_call;
685 __ CompareObject(R0, Object::null_object()); 656 __ CompareObject(R0, Object::null_object());
686 __ b(&is_assignable, EQ); 657 __ b(&is_assignable, EQ);
687 658
688 // Generate throw new TypeError() if the type is malformed or malbounded. 659 // Generate throw new TypeError() if the type is malformed or malbounded.
689 if (dst_type.IsMalformedOrMalbounded()) { 660 if (dst_type.IsMalformedOrMalbounded()) {
690 __ PushObject(Object::null_object()); // Make room for the result. 661 __ PushObject(Object::null_object()); // Make room for the result.
691 __ Push(R0); // Push the source object. 662 __ Push(R0); // Push the source object.
692 __ PushObject(dst_name); // Push the name of the destination. 663 __ PushObject(dst_name); // Push the name of the destination.
693 __ PushObject(dst_type); // Push the type of the destination. 664 __ PushObject(dst_type); // Push the type of the destination.
694 GenerateRuntimeCall(token_pos, 665 GenerateRuntimeCall(token_pos, deopt_id, kBadTypeErrorRuntimeEntry, 3,
695 deopt_id,
696 kBadTypeErrorRuntimeEntry,
697 3,
698 locs); 666 locs);
699 // We should never return here. 667 // We should never return here.
700 __ bkpt(0); 668 __ bkpt(0);
701 669
702 __ Bind(&is_assignable); // For a null object. 670 __ Bind(&is_assignable); // For a null object.
703 // Restore instantiator type arguments (R1). 671 // Restore instantiator type arguments (R1).
704 __ Pop(R1); 672 __ Pop(R1);
705 return; 673 return;
706 } 674 }
707 675
708 // Generate inline type check, linking to runtime call if not assignable. 676 // Generate inline type check, linking to runtime call if not assignable.
709 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 677 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
710 test_cache = GenerateInlineInstanceof(token_pos, dst_type, 678 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable,
711 &is_assignable, &runtime_call); 679 &runtime_call);
712 680
713 __ Bind(&runtime_call); 681 __ Bind(&runtime_call);
714 // Load instantiator type arguments (R1). 682 // Load instantiator type arguments (R1).
715 __ ldr(R1, Address(SP, 0 * kWordSize)); 683 __ ldr(R1, Address(SP, 0 * kWordSize));
716 __ PushObject(Object::null_object()); // Make room for the result. 684 __ PushObject(Object::null_object()); // Make room for the result.
717 __ Push(R0); // Push the source object. 685 __ Push(R0); // Push the source object.
718 __ PushObject(dst_type); // Push the type of the destination. 686 __ PushObject(dst_type); // Push the type of the destination.
719 __ Push(R1); // Push instantiator type arguments (R1). 687 __ Push(R1); // Push instantiator type arguments (R1).
720 __ PushObject(dst_name); // Push the name of the destination. 688 __ PushObject(dst_name); // Push the name of the destination.
721 __ LoadUniqueObject(R0, test_cache); 689 __ LoadUniqueObject(R0, test_cache);
722 __ Push(R0); 690 __ Push(R0);
723 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 5, locs); 691 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 5, locs);
724 // Pop the parameters supplied to the runtime entry. The result of the 692 // Pop the parameters supplied to the runtime entry. The result of the
725 // type check runtime call is the checked value. 693 // type check runtime call is the checked value.
726 __ Drop(5); 694 __ Drop(5);
727 __ Pop(R0); 695 __ Pop(R0);
728 696
729 __ Bind(&is_assignable); 697 __ Bind(&is_assignable);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
798 __ Bind(&loop); 766 __ Bind(&loop);
799 __ ldr(IP, argument_addr); 767 __ ldr(IP, argument_addr);
800 __ str(IP, copy_addr); 768 __ str(IP, copy_addr);
801 __ Bind(&loop_condition); 769 __ Bind(&loop_condition);
802 __ subs(R6, R6, Operand(1)); 770 __ subs(R6, R6, Operand(1));
803 __ b(&loop, PL); 771 __ b(&loop, PL);
804 772
805 // Copy or initialize optional named arguments. 773 // Copy or initialize optional named arguments.
806 Label all_arguments_processed; 774 Label all_arguments_processed;
807 #ifdef DEBUG 775 #ifdef DEBUG
808 const bool check_correct_named_args = true; 776 const bool check_correct_named_args = true;
809 #else 777 #else
810 const bool check_correct_named_args = function.IsClosureFunction(); 778 const bool check_correct_named_args = function.IsClosureFunction();
811 #endif 779 #endif
812 if (num_opt_named_params > 0) { 780 if (num_opt_named_params > 0) {
813 // Start by alphabetically sorting the names of the optional parameters. 781 // Start by alphabetically sorting the names of the optional parameters.
814 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; 782 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
815 int* opt_param_position = new int[num_opt_named_params]; 783 int* opt_param_position = new int[num_opt_named_params];
816 for (int pos = num_fixed_params; pos < num_params; pos++) { 784 for (int pos = num_fixed_params; pos < num_params; pos++) {
817 LocalVariable* parameter = scope->VariableAt(pos); 785 LocalVariable* parameter = scope->VariableAt(pos);
818 const String& opt_param_name = parameter->name(); 786 const String& opt_param_name = parameter->name();
819 int i = pos - num_fixed_params; 787 int i = pos - num_fixed_params;
820 while (--i >= 0) { 788 while (--i >= 0) {
(...skipping 10 matching lines...) Expand all
831 // Generate code handling each optional parameter in alphabetical order. 799 // Generate code handling each optional parameter in alphabetical order.
832 __ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 800 __ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
833 __ ldr(R6, 801 __ ldr(R6,
834 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset())); 802 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
835 __ SmiUntag(R6); 803 __ SmiUntag(R6);
836 // Let NOTFP point to the first passed argument, i.e. to 804 // Let NOTFP point to the first passed argument, i.e. to
837 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (NOTFP) is Smi. 805 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (NOTFP) is Smi.
838 __ add(NOTFP, FP, Operand(NOTFP, LSL, 1)); 806 __ add(NOTFP, FP, Operand(NOTFP, LSL, 1));
839 __ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize); 807 __ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize);
840 // Let R8 point to the entry of the first named argument. 808 // Let R8 point to the entry of the first named argument.
841 __ add(R8, R4, Operand( 809 __ add(R8, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() -
842 ArgumentsDescriptor::first_named_entry_offset() - kHeapObjectTag)); 810 kHeapObjectTag));
843 for (int i = 0; i < num_opt_named_params; i++) { 811 for (int i = 0; i < num_opt_named_params; i++) {
844 Label load_default_value, assign_optional_parameter; 812 Label load_default_value, assign_optional_parameter;
845 const int param_pos = opt_param_position[i]; 813 const int param_pos = opt_param_position[i];
846 // Check if this named parameter was passed in. 814 // Check if this named parameter was passed in.
847 // Load R9 with the name of the argument. 815 // Load R9 with the name of the argument.
848 __ ldr(R9, Address(R8, ArgumentsDescriptor::name_offset())); 816 __ ldr(R9, Address(R8, ArgumentsDescriptor::name_offset()));
849 ASSERT(opt_param[i]->name().IsSymbol()); 817 ASSERT(opt_param[i]->name().IsSymbol());
850 __ CompareObject(R9, opt_param[i]->name()); 818 __ CompareObject(R9, opt_param[i]->name());
851 __ b(&load_default_value, NE); 819 __ b(&load_default_value, NE);
852 // Load R9 with passed-in argument at provided arg_pos, i.e. at 820 // Load R9 with passed-in argument at provided arg_pos, i.e. at
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 __ LoadObject(R0, Object::null_object()); 939 __ LoadObject(R0, Object::null_object());
972 __ Ret(); 940 __ Ret();
973 } 941 }
974 942
975 943
976 static const Register new_pp = NOTFP; 944 static const Register new_pp = NOTFP;
977 945
978 946
979 void FlowGraphCompiler::EmitFrameEntry() { 947 void FlowGraphCompiler::EmitFrameEntry() {
980 const Function& function = parsed_function().function(); 948 const Function& function = parsed_function().function();
981 if (CanOptimizeFunction() && 949 if (CanOptimizeFunction() && function.IsOptimizable() &&
982 function.IsOptimizable() &&
983 (!is_optimizing() || may_reoptimize())) { 950 (!is_optimizing() || may_reoptimize())) {
984 __ Comment("Invocation Count Check"); 951 __ Comment("Invocation Count Check");
985 const Register function_reg = R8; 952 const Register function_reg = R8;
986 // The pool pointer is not setup before entering the Dart frame. 953 // The pool pointer is not setup before entering the Dart frame.
987 // Temporarily setup pool pointer for this dart function. 954 // Temporarily setup pool pointer for this dart function.
988 __ LoadPoolPointer(new_pp); 955 __ LoadPoolPointer(new_pp);
989 // Load function object from object pool. 956 // Load function object from object pool.
990 __ LoadFunctionFromCalleePool(function_reg, function, new_pp); 957 __ LoadFunctionFromCalleePool(function_reg, function, new_pp);
991 958
992 __ ldr(R3, FieldAddress(function_reg, 959 __ ldr(R3, FieldAddress(function_reg, Function::usage_counter_offset()));
993 Function::usage_counter_offset()));
994 // Reoptimization of an optimized function is triggered by counting in 960 // Reoptimization of an optimized function is triggered by counting in
995 // IC stubs, but not at the entry of the function. 961 // IC stubs, but not at the entry of the function.
996 if (!is_optimizing()) { 962 if (!is_optimizing()) {
997 __ add(R3, R3, Operand(1)); 963 __ add(R3, R3, Operand(1));
998 __ str(R3, FieldAddress(function_reg, 964 __ str(R3, FieldAddress(function_reg, Function::usage_counter_offset()));
999 Function::usage_counter_offset()));
1000 } 965 }
1001 __ CompareImmediate(R3, GetOptimizationThreshold()); 966 __ CompareImmediate(R3, GetOptimizationThreshold());
1002 ASSERT(function_reg == R8); 967 ASSERT(function_reg == R8);
1003 __ Branch(*StubCode::OptimizeFunction_entry(), kNotPatchable, new_pp, GE); 968 __ Branch(*StubCode::OptimizeFunction_entry(), kNotPatchable, new_pp, GE);
1004 } 969 }
1005 __ Comment("Enter frame"); 970 __ Comment("Enter frame");
1006 if (flow_graph().IsCompiledForOsr()) { 971 if (flow_graph().IsCompiledForOsr()) {
1007 intptr_t extra_slots = StackSize() 972 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() -
1008 - flow_graph().num_stack_locals() 973 flow_graph().num_copied_params();
1009 - flow_graph().num_copied_params();
1010 ASSERT(extra_slots >= 0); 974 ASSERT(extra_slots >= 0);
1011 __ EnterOsrFrame(extra_slots * kWordSize); 975 __ EnterOsrFrame(extra_slots * kWordSize);
1012 } else { 976 } else {
1013 ASSERT(StackSize() >= 0); 977 ASSERT(StackSize() >= 0);
1014 __ EnterDartFrame(StackSize() * kWordSize); 978 __ EnterDartFrame(StackSize() * kWordSize);
1015 } 979 }
1016 } 980 }
1017 981
1018 982
1019 // Input parameters: 983 // Input parameters:
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1052 if (num_copied_params == 0) { 1016 if (num_copied_params == 0) {
1053 const bool check_arguments = 1017 const bool check_arguments =
1054 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); 1018 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr();
1055 if (check_arguments) { 1019 if (check_arguments) {
1056 __ Comment("Check argument count"); 1020 __ Comment("Check argument count");
1057 // Check that exactly num_fixed arguments are passed in. 1021 // Check that exactly num_fixed arguments are passed in.
1058 Label correct_num_arguments, wrong_num_arguments; 1022 Label correct_num_arguments, wrong_num_arguments;
1059 __ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 1023 __ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
1060 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params)); 1024 __ CompareImmediate(R0, Smi::RawValue(num_fixed_params));
1061 __ b(&wrong_num_arguments, NE); 1025 __ b(&wrong_num_arguments, NE);
1062 __ ldr(R1, FieldAddress(R4, 1026 __ ldr(R1,
1063 ArgumentsDescriptor::positional_count_offset())); 1027 FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
1064 __ cmp(R0, Operand(R1)); 1028 __ cmp(R0, Operand(R1));
1065 __ b(&correct_num_arguments, EQ); 1029 __ b(&correct_num_arguments, EQ);
1066 __ Bind(&wrong_num_arguments); 1030 __ Bind(&wrong_num_arguments);
1067 ASSERT(assembler()->constant_pool_allowed()); 1031 ASSERT(assembler()->constant_pool_allowed());
1068 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack. 1032 __ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
1069 __ Branch(*StubCode::CallClosureNoSuchMethod_entry()); 1033 __ Branch(*StubCode::CallClosureNoSuchMethod_entry());
1070 // The noSuchMethod call may return to the caller, but not here. 1034 // The noSuchMethod call may return to the caller, but not here.
1071 __ Bind(&correct_num_arguments); 1035 __ Bind(&correct_num_arguments);
1072 } 1036 }
1073 } else if (!flow_graph().IsCompiledForOsr()) { 1037 } else if (!flow_graph().IsCompiledForOsr()) {
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
1139 AddCurrentDescriptor(kind, deopt_id, token_pos); 1103 AddCurrentDescriptor(kind, deopt_id, token_pos);
1140 RecordSafepoint(locs); 1104 RecordSafepoint(locs);
1141 // Marks either the continuation point in unoptimized code or the 1105 // Marks either the continuation point in unoptimized code or the
1142 // deoptimization point in optimized code, after call. 1106 // deoptimization point in optimized code, after call.
1143 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1107 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1144 if (is_optimizing()) { 1108 if (is_optimizing()) {
1145 AddDeoptIndexAtCall(deopt_id_after); 1109 AddDeoptIndexAtCall(deopt_id_after);
1146 } else { 1110 } else {
1147 // Add deoptimization continuation point after the call and before the 1111 // Add deoptimization continuation point after the call and before the
1148 // arguments are removed. 1112 // arguments are removed.
1149 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1113 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1150 deopt_id_after, token_pos);
1151 } 1114 }
1152 } 1115 }
1153 1116
1154 1117
1155 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, 1118 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
1156 TokenPosition token_pos, 1119 TokenPosition token_pos,
1157 const StubEntry& stub_entry, 1120 const StubEntry& stub_entry,
1158 RawPcDescriptors::Kind kind, 1121 RawPcDescriptors::Kind kind,
1159 LocationSummary* locs, 1122 LocationSummary* locs,
1160 const Function& target) { 1123 const Function& target) {
1161 // Call sites to the same target can share object pool entries. These 1124 // Call sites to the same target can share object pool entries. These
1162 // call sites are never patched for breakpoints: the function is deoptimized 1125 // call sites are never patched for breakpoints: the function is deoptimized
1163 // and the unoptimized code with IC calls for static calls is patched instead. 1126 // and the unoptimized code with IC calls for static calls is patched instead.
1164 ASSERT(is_optimizing()); 1127 ASSERT(is_optimizing());
1165 __ BranchLinkWithEquivalence(stub_entry, target); 1128 __ BranchLinkWithEquivalence(stub_entry, target);
1166 1129
1167 AddCurrentDescriptor(kind, deopt_id, token_pos); 1130 AddCurrentDescriptor(kind, deopt_id, token_pos);
1168 RecordSafepoint(locs); 1131 RecordSafepoint(locs);
1169 // Marks either the continuation point in unoptimized code or the 1132 // Marks either the continuation point in unoptimized code or the
1170 // deoptimization point in optimized code, after call. 1133 // deoptimization point in optimized code, after call.
1171 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1134 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1172 if (is_optimizing()) { 1135 if (is_optimizing()) {
1173 AddDeoptIndexAtCall(deopt_id_after); 1136 AddDeoptIndexAtCall(deopt_id_after);
1174 } else { 1137 } else {
1175 // Add deoptimization continuation point after the call and before the 1138 // Add deoptimization continuation point after the call and before the
1176 // arguments are removed. 1139 // arguments are removed.
1177 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1140 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1178 deopt_id_after, token_pos);
1179 } 1141 }
1180 AddStaticCallTarget(target); 1142 AddStaticCallTarget(target);
1181 } 1143 }
1182 1144
1183 1145
1184 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, 1146 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
1185 intptr_t deopt_id, 1147 intptr_t deopt_id,
1186 const RuntimeEntry& entry, 1148 const RuntimeEntry& entry,
1187 intptr_t argument_count, 1149 intptr_t argument_count,
1188 LocationSummary* locs) { 1150 LocationSummary* locs) {
1189 __ CallRuntime(entry, argument_count); 1151 __ CallRuntime(entry, argument_count);
1190 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos); 1152 AddCurrentDescriptor(RawPcDescriptors::kOther, deopt_id, token_pos);
1191 RecordSafepoint(locs); 1153 RecordSafepoint(locs);
1192 if (deopt_id != Thread::kNoDeoptId) { 1154 if (deopt_id != Thread::kNoDeoptId) {
1193 // Marks either the continuation point in unoptimized code or the 1155 // Marks either the continuation point in unoptimized code or the
1194 // deoptimization point in optimized code, after call. 1156 // deoptimization point in optimized code, after call.
1195 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1157 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1196 if (is_optimizing()) { 1158 if (is_optimizing()) {
1197 AddDeoptIndexAtCall(deopt_id_after); 1159 AddDeoptIndexAtCall(deopt_id_after);
1198 } else { 1160 } else {
1199 // Add deoptimization continuation point after the call and before the 1161 // Add deoptimization continuation point after the call and before the
1200 // arguments are removed. 1162 // arguments are removed.
1201 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1163 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1202 deopt_id_after,
1203 token_pos);
1204 } 1164 }
1205 } 1165 }
1206 } 1166 }
1207 1167
1208 1168
1209 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { 1169 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
1210 // We do not check for overflow when incrementing the edge counter. The 1170 // We do not check for overflow when incrementing the edge counter. The
1211 // function should normally be optimized long before the counter can 1171 // function should normally be optimized long before the counter can
1212 // overflow; and though we do not reset the counters when we optimize or 1172 // overflow; and though we do not reset the counters when we optimize or
1213 // deoptimize, there is a bound on the number of 1173 // deoptimize, there is a bound on the number of
1214 // optimization/deoptimization cycles we will attempt. 1174 // optimization/deoptimization cycles we will attempt.
1215 ASSERT(!edge_counters_array_.IsNull()); 1175 ASSERT(!edge_counters_array_.IsNull());
1216 ASSERT(assembler_->constant_pool_allowed()); 1176 ASSERT(assembler_->constant_pool_allowed());
1217 __ Comment("Edge counter"); 1177 __ Comment("Edge counter");
1218 __ LoadObject(R0, edge_counters_array_); 1178 __ LoadObject(R0, edge_counters_array_);
1219 #if defined(DEBUG) 1179 #if defined(DEBUG)
1220 bool old_use_far_branches = assembler_->use_far_branches(); 1180 bool old_use_far_branches = assembler_->use_far_branches();
1221 assembler_->set_use_far_branches(true); 1181 assembler_->set_use_far_branches(true);
1222 #endif // DEBUG 1182 #endif // DEBUG
1223 __ LoadFieldFromOffset(kWord, R1, R0, Array::element_offset(edge_id)); 1183 __ LoadFieldFromOffset(kWord, R1, R0, Array::element_offset(edge_id));
1224 __ add(R1, R1, Operand(Smi::RawValue(1))); 1184 __ add(R1, R1, Operand(Smi::RawValue(1)));
1225 __ StoreIntoObjectNoBarrierOffset(R0, Array::element_offset(edge_id), R1); 1185 __ StoreIntoObjectNoBarrierOffset(R0, Array::element_offset(edge_id), R1);
1226 #if defined(DEBUG) 1186 #if defined(DEBUG)
1227 assembler_->set_use_far_branches(old_use_far_branches); 1187 assembler_->set_use_far_branches(old_use_far_branches);
1228 #endif // DEBUG 1188 #endif // DEBUG
1229 } 1189 }
1230 1190
1231 1191
1232 void FlowGraphCompiler::EmitOptimizedInstanceCall( 1192 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
1233 const StubEntry& stub_entry, 1193 const ICData& ic_data,
1234 const ICData& ic_data, 1194 intptr_t argument_count,
1235 intptr_t argument_count, 1195 intptr_t deopt_id,
1236 intptr_t deopt_id, 1196 TokenPosition token_pos,
1237 TokenPosition token_pos, 1197 LocationSummary* locs) {
1238 LocationSummary* locs) {
1239 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1198 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1240 // Each ICData propagated from unoptimized to optimized code contains the 1199 // Each ICData propagated from unoptimized to optimized code contains the
1241 // function that corresponds to the Dart function of that IC call. Due 1200 // function that corresponds to the Dart function of that IC call. Due
1242 // to inlining in optimized code, that function may not correspond to the 1201 // to inlining in optimized code, that function may not correspond to the
1243 // top-level function (parsed_function().function()) which could be 1202 // top-level function (parsed_function().function()) which could be
1244 // reoptimized and which counter needs to be incremented. 1203 // reoptimized and which counter needs to be incremented.
1245 // Pass the function explicitly, it is used in IC stub. 1204 // Pass the function explicitly, it is used in IC stub.
1246 1205
1247 __ LoadObject(R8, parsed_function().function()); 1206 __ LoadObject(R8, parsed_function().function());
1248 __ LoadUniqueObject(R9, ic_data); 1207 __ LoadUniqueObject(R9, ic_data);
1249 GenerateDartCall(deopt_id, 1208 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1250 token_pos,
1251 stub_entry,
1252 RawPcDescriptors::kIcCall,
1253 locs); 1209 locs);
1254 __ Drop(argument_count); 1210 __ Drop(argument_count);
1255 } 1211 }
1256 1212
1257 1213
1258 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, 1214 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
1259 const ICData& ic_data, 1215 const ICData& ic_data,
1260 intptr_t argument_count, 1216 intptr_t argument_count,
1261 intptr_t deopt_id, 1217 intptr_t deopt_id,
1262 TokenPosition token_pos, 1218 TokenPosition token_pos,
1263 LocationSummary* locs) { 1219 LocationSummary* locs) {
1264 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1220 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1265 __ LoadUniqueObject(R9, ic_data); 1221 __ LoadUniqueObject(R9, ic_data);
1266 GenerateDartCall(deopt_id, 1222 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1267 token_pos,
1268 stub_entry,
1269 RawPcDescriptors::kIcCall,
1270 locs); 1223 locs);
1271 __ Drop(argument_count); 1224 __ Drop(argument_count);
1272 } 1225 }
1273 1226
1274 1227
1275 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1228 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1276 const ICData& ic_data, 1229 const ICData& ic_data,
1277 intptr_t argument_count, 1230 intptr_t argument_count,
1278 intptr_t deopt_id, 1231 intptr_t deopt_id,
1279 TokenPosition token_pos, 1232 TokenPosition token_pos,
1280 LocationSummary* locs, 1233 LocationSummary* locs,
1281 intptr_t try_index, 1234 intptr_t try_index,
1282 intptr_t slow_path_argument_count) { 1235 intptr_t slow_path_argument_count) {
1283 const String& name = String::Handle(zone(), ic_data.target_name()); 1236 const String& name = String::Handle(zone(), ic_data.target_name());
1284 const Array& arguments_descriptor = 1237 const Array& arguments_descriptor =
1285 Array::ZoneHandle(zone(), ic_data.arguments_descriptor()); 1238 Array::ZoneHandle(zone(), ic_data.arguments_descriptor());
1286 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); 1239 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
1287 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(zone(), 1240 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
1241 zone(),
1288 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor)); 1242 MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor));
1289 1243
1290 __ Comment("MegamorphicCall"); 1244 __ Comment("MegamorphicCall");
1291 // Load receiver into R0. 1245 // Load receiver into R0.
1292 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); 1246 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize);
1293 Label done; 1247 Label done;
1294 if (ShouldInlineSmiStringHashCode(ic_data)) { 1248 if (ShouldInlineSmiStringHashCode(ic_data)) {
1295 Label megamorphic_call; 1249 Label megamorphic_call;
1296 __ Comment("Inlined get:hashCode for Smi and OneByteString"); 1250 __ Comment("Inlined get:hashCode for Smi and OneByteString");
1297 __ tst(R0, Operand(kSmiTagMask)); 1251 __ tst(R0, Operand(kSmiTagMask));
1298 __ b(&done, EQ); // Is Smi (result is receiver). 1252 __ b(&done, EQ); // Is Smi (result is receiver).
1299 1253
1300 // Use R9 (cache for megamorphic call) as scratch. 1254 // Use R9 (cache for megamorphic call) as scratch.
1301 __ CompareClassId(R0, kOneByteStringCid, R9); 1255 __ CompareClassId(R0, kOneByteStringCid, R9);
1302 __ b(&megamorphic_call, NE); 1256 __ b(&megamorphic_call, NE);
1303 1257
1304 __ mov(R9, Operand(R0)); // Preserve receiver in R9. 1258 __ mov(R9, Operand(R0)); // Preserve receiver in R9.
1305 __ ldr(R0, FieldAddress(R0, String::hash_offset())); 1259 __ ldr(R0, FieldAddress(R0, String::hash_offset()));
1306 ASSERT(Smi::New(0) == 0); 1260 ASSERT(Smi::New(0) == 0);
1307 __ cmp(R0, Operand(0)); 1261 __ cmp(R0, Operand(0));
1308 1262
1309 __ b(&done, NE); // Return if already computed. 1263 __ b(&done, NE); // Return if already computed.
1310 __ mov(R0, Operand(R9)); // Restore receiver in R0. 1264 __ mov(R0, Operand(R9)); // Restore receiver in R0.
1311 1265
1312 __ Bind(&megamorphic_call); 1266 __ Bind(&megamorphic_call);
1313 __ Comment("Slow case: megamorphic call"); 1267 __ Comment("Slow case: megamorphic call");
1314 } 1268 }
1315 __ LoadObject(R9, cache); 1269 __ LoadObject(R9, cache);
1316 __ ldr(LR, Address(THR, Thread::megamorphic_call_checked_entry_offset())); 1270 __ ldr(LR, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
1317 __ blx(LR); 1271 __ blx(LR);
1318 1272
1319 __ Bind(&done); 1273 __ Bind(&done);
1320 RecordSafepoint(locs, slow_path_argument_count); 1274 RecordSafepoint(locs, slow_path_argument_count);
1321 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1275 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1322 if (FLAG_precompiled_mode) { 1276 if (FLAG_precompiled_mode) {
1323 // Megamorphic calls may occur in slow path stubs. 1277 // Megamorphic calls may occur in slow path stubs.
1324 // If valid use try_index argument. 1278 // If valid use try_index argument.
1325 if (try_index == CatchClauseNode::kInvalidTryIndex) { 1279 if (try_index == CatchClauseNode::kInvalidTryIndex) {
1326 try_index = CurrentTryIndex(); 1280 try_index = CurrentTryIndex();
1327 } 1281 }
1328 pc_descriptors_list()->AddDescriptor(RawPcDescriptors::kOther, 1282 pc_descriptors_list()->AddDescriptor(
1329 assembler()->CodeSize(), 1283 RawPcDescriptors::kOther, assembler()->CodeSize(), Thread::kNoDeoptId,
1330 Thread::kNoDeoptId, 1284 token_pos, try_index);
1331 token_pos,
1332 try_index);
1333 } else if (is_optimizing()) { 1285 } else if (is_optimizing()) {
1334 AddCurrentDescriptor(RawPcDescriptors::kOther, 1286 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1335 Thread::kNoDeoptId, token_pos); 1287 token_pos);
1336 AddDeoptIndexAtCall(deopt_id_after); 1288 AddDeoptIndexAtCall(deopt_id_after);
1337 } else { 1289 } else {
1338 AddCurrentDescriptor(RawPcDescriptors::kOther, 1290 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1339 Thread::kNoDeoptId, token_pos); 1291 token_pos);
1340 // Add deoptimization continuation point after the call and before the 1292 // Add deoptimization continuation point after the call and before the
1341 // arguments are removed. 1293 // arguments are removed.
1342 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1294 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1343 deopt_id_after, token_pos);
1344 } 1295 }
1345 __ Drop(argument_count); 1296 __ Drop(argument_count);
1346 } 1297 }
1347 1298
1348 1299
1349 void FlowGraphCompiler::EmitSwitchableInstanceCall( 1300 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
1350 const ICData& ic_data, 1301 intptr_t argument_count,
1351 intptr_t argument_count, 1302 intptr_t deopt_id,
1352 intptr_t deopt_id, 1303 TokenPosition token_pos,
1353 TokenPosition token_pos, 1304 LocationSummary* locs) {
1354 LocationSummary* locs) {
1355 ASSERT(ic_data.NumArgsTested() == 1); 1305 ASSERT(ic_data.NumArgsTested() == 1);
1356 const Code& initial_stub = Code::ZoneHandle( 1306 const Code& initial_stub =
1357 StubCode::ICCallThroughFunction_entry()->code()); 1307 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code());
1358 1308
1359 __ Comment("SwitchableCall"); 1309 __ Comment("SwitchableCall");
1360 1310
1361 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); 1311 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize);
1362 __ LoadUniqueObject(CODE_REG, initial_stub); 1312 __ LoadUniqueObject(CODE_REG, initial_stub);
1363 __ ldr(LR, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); 1313 __ ldr(LR, FieldAddress(CODE_REG, Code::checked_entry_point_offset()));
1364 __ LoadUniqueObject(R9, ic_data); 1314 __ LoadUniqueObject(R9, ic_data);
1365 __ blx(LR); 1315 __ blx(LR);
1366 1316
1367 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, token_pos); 1317 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, token_pos);
1368 RecordSafepoint(locs); 1318 RecordSafepoint(locs);
1369 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1319 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1370 if (is_optimizing()) { 1320 if (is_optimizing()) {
1371 AddDeoptIndexAtCall(deopt_id_after); 1321 AddDeoptIndexAtCall(deopt_id_after);
1372 } else { 1322 } else {
1373 // Add deoptimization continuation point after the call and before the 1323 // Add deoptimization continuation point after the call and before the
1374 // arguments are removed. 1324 // arguments are removed.
1375 AddCurrentDescriptor(RawPcDescriptors::kDeopt, 1325 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1376 deopt_id_after, token_pos);
1377 } 1326 }
1378 __ Drop(argument_count); 1327 __ Drop(argument_count);
1379 } 1328 }
1380 1329
1381 1330
1382 void FlowGraphCompiler::EmitUnoptimizedStaticCall( 1331 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
1383 intptr_t argument_count, 1332 intptr_t deopt_id,
1384 intptr_t deopt_id, 1333 TokenPosition token_pos,
1385 TokenPosition token_pos, 1334 LocationSummary* locs,
1386 LocationSummary* locs, 1335 const ICData& ic_data) {
1387 const ICData& ic_data) {
1388 const StubEntry* stub_entry = 1336 const StubEntry* stub_entry =
1389 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); 1337 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
1390 __ LoadObject(R9, ic_data); 1338 __ LoadObject(R9, ic_data);
1391 GenerateDartCall(deopt_id, 1339 GenerateDartCall(deopt_id, token_pos, *stub_entry,
1392 token_pos, 1340 RawPcDescriptors::kUnoptStaticCall, locs);
1393 *stub_entry,
1394 RawPcDescriptors::kUnoptStaticCall,
1395 locs);
1396 __ Drop(argument_count); 1341 __ Drop(argument_count);
1397 } 1342 }
1398 1343
1399 1344
1400 void FlowGraphCompiler::EmitOptimizedStaticCall( 1345 void FlowGraphCompiler::EmitOptimizedStaticCall(
1401 const Function& function, 1346 const Function& function,
1402 const Array& arguments_descriptor, 1347 const Array& arguments_descriptor,
1403 intptr_t argument_count, 1348 intptr_t argument_count,
1404 intptr_t deopt_id, 1349 intptr_t deopt_id,
1405 TokenPosition token_pos, 1350 TokenPosition token_pos,
1406 LocationSummary* locs) { 1351 LocationSummary* locs) {
1407 ASSERT(!function.IsClosureFunction()); 1352 ASSERT(!function.IsClosureFunction());
1408 if (function.HasOptionalParameters()) { 1353 if (function.HasOptionalParameters()) {
1409 __ LoadObject(R4, arguments_descriptor); 1354 __ LoadObject(R4, arguments_descriptor);
1410 } else { 1355 } else {
1411 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub. 1356 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
1412 } 1357 }
1413 // Do not use the code from the function, but let the code be patched so that 1358 // Do not use the code from the function, but let the code be patched so that
1414 // we can record the outgoing edges to other code. 1359 // we can record the outgoing edges to other code.
1415 GenerateStaticDartCall(deopt_id, 1360 GenerateStaticDartCall(deopt_id, token_pos,
1416 token_pos,
1417 *StubCode::CallStaticFunction_entry(), 1361 *StubCode::CallStaticFunction_entry(),
1418 RawPcDescriptors::kOther, 1362 RawPcDescriptors::kOther, locs, function);
1419 locs,
1420 function);
1421 __ Drop(argument_count); 1363 __ Drop(argument_count);
1422 } 1364 }
1423 1365
1424 1366
1425 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1367 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1426 Register reg, 1368 Register reg,
1427 const Object& obj, 1369 const Object& obj,
1428 bool needs_number_check, 1370 bool needs_number_check,
1429 TokenPosition token_pos) { 1371 TokenPosition token_pos) {
1430 if (needs_number_check) { 1372 if (needs_number_check) {
1431 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); 1373 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint());
1432 __ Push(reg); 1374 __ Push(reg);
1433 __ PushObject(obj); 1375 __ PushObject(obj);
1434 if (is_optimizing()) { 1376 if (is_optimizing()) {
1435 __ BranchLinkPatchable( 1377 __ BranchLinkPatchable(
1436 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1378 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1437 } else { 1379 } else {
1438 __ BranchLinkPatchable( 1380 __ BranchLinkPatchable(
1439 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1381 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1440 } 1382 }
1441 if (token_pos.IsReal()) { 1383 if (token_pos.IsReal()) {
1442 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1384 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1443 Thread::kNoDeoptId,
1444 token_pos); 1385 token_pos);
1445 } 1386 }
1446 // Stub returns result in flags (result of a cmp, we need Z computed). 1387 // Stub returns result in flags (result of a cmp, we need Z computed).
1447 __ Drop(1); // Discard constant. 1388 __ Drop(1); // Discard constant.
1448 __ Pop(reg); // Restore 'reg'. 1389 __ Pop(reg); // Restore 'reg'.
1449 } else { 1390 } else {
1450 __ CompareObject(reg, obj); 1391 __ CompareObject(reg, obj);
1451 } 1392 }
1452 return EQ; 1393 return EQ;
1453 } 1394 }
1454 1395
1455 1396
1456 Condition FlowGraphCompiler::EmitEqualityRegRegCompare( 1397 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
1457 Register left, 1398 Register left,
1458 Register right, 1399 Register right,
1459 bool needs_number_check, 1400 bool needs_number_check,
1460 TokenPosition token_pos) { 1401 TokenPosition token_pos) {
1461 if (needs_number_check) { 1402 if (needs_number_check) {
1462 __ Push(left); 1403 __ Push(left);
1463 __ Push(right); 1404 __ Push(right);
1464 if (is_optimizing()) { 1405 if (is_optimizing()) {
1465 __ BranchLinkPatchable( 1406 __ BranchLinkPatchable(
1466 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1407 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1467 } else { 1408 } else {
1468 __ BranchLinkPatchable( 1409 __ BranchLinkPatchable(
1469 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1410 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1470 } 1411 }
1471 if (token_pos.IsReal()) { 1412 if (token_pos.IsReal()) {
1472 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1413 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, Thread::kNoDeoptId,
1473 Thread::kNoDeoptId,
1474 token_pos); 1414 token_pos);
1475 } 1415 }
1476 // Stub returns result in flags (result of a cmp, we need Z computed). 1416 // Stub returns result in flags (result of a cmp, we need Z computed).
1477 __ Pop(right); 1417 __ Pop(right);
1478 __ Pop(left); 1418 __ Pop(left);
1479 } else { 1419 } else {
1480 __ cmp(left, Operand(right)); 1420 __ cmp(left, Operand(right));
1481 } 1421 }
1482 return EQ; 1422 return EQ;
1483 } 1423 }
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
1580 intptr_t argument_count, 1520 intptr_t argument_count,
1581 const Array& argument_names, 1521 const Array& argument_names,
1582 Label* failed, 1522 Label* failed,
1583 Label* match_found, 1523 Label* match_found,
1584 intptr_t deopt_id, 1524 intptr_t deopt_id,
1585 TokenPosition token_index, 1525 TokenPosition token_index,
1586 LocationSummary* locs, 1526 LocationSummary* locs,
1587 bool complete) { 1527 bool complete) {
1588 ASSERT(is_optimizing()); 1528 ASSERT(is_optimizing());
1589 __ Comment("EmitTestAndCall"); 1529 __ Comment("EmitTestAndCall");
1590 const Array& arguments_descriptor = 1530 const Array& arguments_descriptor = Array::ZoneHandle(
1591 Array::ZoneHandle(zone(), ArgumentsDescriptor::New(argument_count, 1531 zone(), ArgumentsDescriptor::New(argument_count, argument_names));
1592 argument_names));
1593 1532
1594 // Load receiver into R0. 1533 // Load receiver into R0.
1595 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); 1534 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize);
1596 __ LoadObject(R4, arguments_descriptor); 1535 __ LoadObject(R4, arguments_descriptor);
1597 1536
1598 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid; 1537 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid;
1599 const intptr_t kNumChecks = ic_data.NumberOfChecks(); 1538 const intptr_t kNumChecks = ic_data.NumberOfChecks();
1600 1539
1601 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); 1540 ASSERT(!ic_data.IsNull() && (kNumChecks > 0));
1602 1541
1603 Label after_smi_test; 1542 Label after_smi_test;
1604 if (kFirstCheckIsSmi) { 1543 if (kFirstCheckIsSmi) {
1605 __ tst(R0, Operand(kSmiTagMask)); 1544 __ tst(R0, Operand(kSmiTagMask));
1606 // Jump if receiver is not Smi. 1545 // Jump if receiver is not Smi.
1607 if (kNumChecks == 1) { 1546 if (kNumChecks == 1) {
1608 __ b(failed, NE); 1547 __ b(failed, NE);
1609 } else { 1548 } else {
1610 __ b(&after_smi_test, NE); 1549 __ b(&after_smi_test, NE);
1611 } 1550 }
1612 // Do not use the code from the function, but let the code be patched so 1551 // Do not use the code from the function, but let the code be patched so
1613 // that we can record the outgoing edges to other code. 1552 // that we can record the outgoing edges to other code.
1614 const Function& function = Function::ZoneHandle( 1553 const Function& function =
1615 zone(), ic_data.GetTargetAt(0)); 1554 Function::ZoneHandle(zone(), ic_data.GetTargetAt(0));
1616 GenerateStaticDartCall(deopt_id, 1555 GenerateStaticDartCall(deopt_id, token_index,
1617 token_index,
1618 *StubCode::CallStaticFunction_entry(), 1556 *StubCode::CallStaticFunction_entry(),
1619 RawPcDescriptors::kOther, 1557 RawPcDescriptors::kOther, locs, function);
1620 locs,
1621 function);
1622 __ Drop(argument_count); 1558 __ Drop(argument_count);
1623 if (kNumChecks > 1) { 1559 if (kNumChecks > 1) {
1624 __ b(match_found); 1560 __ b(match_found);
1625 } 1561 }
1626 } else { 1562 } else {
1627 // Receiver is Smi, but Smi is not a valid class therefore fail. 1563 // Receiver is Smi, but Smi is not a valid class therefore fail.
1628 // (Smi class must be first in the list). 1564 // (Smi class must be first in the list).
1629 if (!complete) { 1565 if (!complete) {
1630 __ tst(R0, Operand(kSmiTagMask)); 1566 __ tst(R0, Operand(kSmiTagMask));
1631 __ b(failed, EQ); 1567 __ b(failed, EQ);
(...skipping 25 matching lines...) Expand all
1657 } 1593 }
1658 } else { 1594 } else {
1659 if (!kIsLastCheck) { 1595 if (!kIsLastCheck) {
1660 __ CompareImmediate(R2, sorted[i].cid); 1596 __ CompareImmediate(R2, sorted[i].cid);
1661 __ b(&next_test, NE); 1597 __ b(&next_test, NE);
1662 } 1598 }
1663 } 1599 }
1664 // Do not use the code from the function, but let the code be patched so 1600 // Do not use the code from the function, but let the code be patched so
1665 // that we can record the outgoing edges to other code. 1601 // that we can record the outgoing edges to other code.
1666 const Function& function = *sorted[i].target; 1602 const Function& function = *sorted[i].target;
1667 GenerateStaticDartCall(deopt_id, 1603 GenerateStaticDartCall(deopt_id, token_index,
1668 token_index,
1669 *StubCode::CallStaticFunction_entry(), 1604 *StubCode::CallStaticFunction_entry(),
1670 RawPcDescriptors::kOther, 1605 RawPcDescriptors::kOther, locs, function);
1671 locs,
1672 function);
1673 __ Drop(argument_count); 1606 __ Drop(argument_count);
1674 if (!kIsLastCheck) { 1607 if (!kIsLastCheck) {
1675 __ b(match_found); 1608 __ b(match_found);
1676 } 1609 }
1677 __ Bind(&next_test); 1610 __ Bind(&next_test);
1678 } 1611 }
1679 } 1612 }
1680 1613
1681 1614
1682 #undef __ 1615 #undef __
1683 #define __ compiler_->assembler()-> 1616 #define __ compiler_->assembler()->
1684 1617
1685 1618
1686 void ParallelMoveResolver::EmitMove(int index) { 1619 void ParallelMoveResolver::EmitMove(int index) {
1687 MoveOperands* move = moves_[index]; 1620 MoveOperands* move = moves_[index];
1688 const Location source = move->src(); 1621 const Location source = move->src();
1689 const Location destination = move->dest(); 1622 const Location destination = move->dest();
1690 1623
1691 if (source.IsRegister()) { 1624 if (source.IsRegister()) {
1692 if (destination.IsRegister()) { 1625 if (destination.IsRegister()) {
1693 __ mov(destination.reg(), Operand(source.reg())); 1626 __ mov(destination.reg(), Operand(source.reg()));
1694 } else { 1627 } else {
1695 ASSERT(destination.IsStackSlot()); 1628 ASSERT(destination.IsStackSlot());
1696 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1629 const intptr_t dest_offset = destination.ToStackSlotOffset();
1697 __ StoreToOffset( 1630 __ StoreToOffset(kWord, source.reg(), destination.base_reg(),
1698 kWord, source.reg(), destination.base_reg(), dest_offset); 1631 dest_offset);
1699 } 1632 }
1700 } else if (source.IsStackSlot()) { 1633 } else if (source.IsStackSlot()) {
1701 if (destination.IsRegister()) { 1634 if (destination.IsRegister()) {
1702 const intptr_t source_offset = source.ToStackSlotOffset(); 1635 const intptr_t source_offset = source.ToStackSlotOffset();
1703 __ LoadFromOffset( 1636 __ LoadFromOffset(kWord, destination.reg(), source.base_reg(),
1704 kWord, destination.reg(), source.base_reg(), source_offset); 1637 source_offset);
1705 } else { 1638 } else {
1706 ASSERT(destination.IsStackSlot()); 1639 ASSERT(destination.IsStackSlot());
1707 const intptr_t source_offset = source.ToStackSlotOffset(); 1640 const intptr_t source_offset = source.ToStackSlotOffset();
1708 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1641 const intptr_t dest_offset = destination.ToStackSlotOffset();
1709 __ LoadFromOffset(kWord, TMP, source.base_reg(), source_offset); 1642 __ LoadFromOffset(kWord, TMP, source.base_reg(), source_offset);
1710 __ StoreToOffset(kWord, TMP, destination.base_reg(), dest_offset); 1643 __ StoreToOffset(kWord, TMP, destination.base_reg(), dest_offset);
1711 } 1644 }
1712 } else if (source.IsFpuRegister()) { 1645 } else if (source.IsFpuRegister()) {
1713 if (destination.IsFpuRegister()) { 1646 if (destination.IsFpuRegister()) {
1714 if (TargetCPUFeatures::neon_supported()) { 1647 if (TargetCPUFeatures::neon_supported()) {
1715 __ vmovq(destination.fpu_reg(), source.fpu_reg()); 1648 __ vmovq(destination.fpu_reg(), source.fpu_reg());
1716 } else { 1649 } else {
1717 // If we're not inlining simd values, then only the even numbered D 1650 // If we're not inlining simd values, then only the even numbered D
1718 // register will have anything in them. 1651 // register will have anything in them.
1719 __ vmovd(EvenDRegisterOf(destination.fpu_reg()), 1652 __ vmovd(EvenDRegisterOf(destination.fpu_reg()),
1720 EvenDRegisterOf(source.fpu_reg())); 1653 EvenDRegisterOf(source.fpu_reg()));
1721 } 1654 }
1722 } else { 1655 } else {
1723 if (destination.IsDoubleStackSlot()) { 1656 if (destination.IsDoubleStackSlot()) {
1724 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1657 const intptr_t dest_offset = destination.ToStackSlotOffset();
1725 DRegister src = EvenDRegisterOf(source.fpu_reg()); 1658 DRegister src = EvenDRegisterOf(source.fpu_reg());
1726 __ StoreDToOffset(src, destination.base_reg(), dest_offset); 1659 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
1727 } else { 1660 } else {
1728 ASSERT(destination.IsQuadStackSlot()); 1661 ASSERT(destination.IsQuadStackSlot());
1729 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1662 const intptr_t dest_offset = destination.ToStackSlotOffset();
1730 const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg()); 1663 const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg());
1731 __ StoreMultipleDToOffset( 1664 __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(),
1732 dsrc0, 2, destination.base_reg(), dest_offset); 1665 dest_offset);
1733 } 1666 }
1734 } 1667 }
1735 } else if (source.IsDoubleStackSlot()) { 1668 } else if (source.IsDoubleStackSlot()) {
1736 if (destination.IsFpuRegister()) { 1669 if (destination.IsFpuRegister()) {
1737 const intptr_t source_offset = source.ToStackSlotOffset(); 1670 const intptr_t source_offset = source.ToStackSlotOffset();
1738 const DRegister dst = EvenDRegisterOf(destination.fpu_reg()); 1671 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
1739 __ LoadDFromOffset(dst, source.base_reg(), source_offset); 1672 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
1740 } else { 1673 } else {
1741 ASSERT(destination.IsDoubleStackSlot()); 1674 ASSERT(destination.IsDoubleStackSlot());
1742 const intptr_t source_offset = source.ToStackSlotOffset(); 1675 const intptr_t source_offset = source.ToStackSlotOffset();
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
1809 const Location source = move->src(); 1742 const Location source = move->src();
1810 const Location destination = move->dest(); 1743 const Location destination = move->dest();
1811 1744
1812 if (source.IsRegister() && destination.IsRegister()) { 1745 if (source.IsRegister() && destination.IsRegister()) {
1813 ASSERT(source.reg() != IP); 1746 ASSERT(source.reg() != IP);
1814 ASSERT(destination.reg() != IP); 1747 ASSERT(destination.reg() != IP);
1815 __ mov(IP, Operand(source.reg())); 1748 __ mov(IP, Operand(source.reg()));
1816 __ mov(source.reg(), Operand(destination.reg())); 1749 __ mov(source.reg(), Operand(destination.reg()));
1817 __ mov(destination.reg(), Operand(IP)); 1750 __ mov(destination.reg(), Operand(IP));
1818 } else if (source.IsRegister() && destination.IsStackSlot()) { 1751 } else if (source.IsRegister() && destination.IsStackSlot()) {
1819 Exchange(source.reg(), 1752 Exchange(source.reg(), destination.base_reg(),
1820 destination.base_reg(), destination.ToStackSlotOffset()); 1753 destination.ToStackSlotOffset());
1821 } else if (source.IsStackSlot() && destination.IsRegister()) { 1754 } else if (source.IsStackSlot() && destination.IsRegister()) {
1822 Exchange(destination.reg(), 1755 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1823 source.base_reg(), source.ToStackSlotOffset());
1824 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1756 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1825 Exchange(source.base_reg(), source.ToStackSlotOffset(), 1757 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1826 destination.base_reg(), destination.ToStackSlotOffset()); 1758 destination.base_reg(), destination.ToStackSlotOffset());
1827 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { 1759 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1828 if (TargetCPUFeatures::neon_supported()) { 1760 if (TargetCPUFeatures::neon_supported()) {
1829 const QRegister dst = destination.fpu_reg(); 1761 const QRegister dst = destination.fpu_reg();
1830 const QRegister src = source.fpu_reg(); 1762 const QRegister src = source.fpu_reg();
1831 __ vmovq(QTMP, src); 1763 __ vmovq(QTMP, src);
1832 __ vmovq(src, dst); 1764 __ vmovq(src, dst);
1833 __ vmovq(dst, QTMP); 1765 __ vmovq(dst, QTMP);
1834 } else { 1766 } else {
1835 const DRegister dst = EvenDRegisterOf(destination.fpu_reg()); 1767 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
1836 const DRegister src = EvenDRegisterOf(source.fpu_reg()); 1768 const DRegister src = EvenDRegisterOf(source.fpu_reg());
1837 __ vmovd(DTMP, src); 1769 __ vmovd(DTMP, src);
1838 __ vmovd(src, dst); 1770 __ vmovd(src, dst);
1839 __ vmovd(dst, DTMP); 1771 __ vmovd(dst, DTMP);
1840 } 1772 }
1841 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) { 1773 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1842 ASSERT(destination.IsDoubleStackSlot() || 1774 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1843 destination.IsQuadStackSlot() || 1775 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1844 source.IsDoubleStackSlot() || 1776 bool double_width =
1845 source.IsQuadStackSlot()); 1777 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1846 bool double_width = destination.IsDoubleStackSlot() || 1778 QRegister qreg =
1847 source.IsDoubleStackSlot(); 1779 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1848 QRegister qreg = source.IsFpuRegister() ? source.fpu_reg()
1849 : destination.fpu_reg();
1850 DRegister reg = EvenDRegisterOf(qreg); 1780 DRegister reg = EvenDRegisterOf(qreg);
1851 Register base_reg = source.IsFpuRegister() 1781 Register base_reg =
1852 ? destination.base_reg() 1782 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1853 : source.base_reg();
1854 const intptr_t slot_offset = source.IsFpuRegister() 1783 const intptr_t slot_offset = source.IsFpuRegister()
1855 ? destination.ToStackSlotOffset() 1784 ? destination.ToStackSlotOffset()
1856 : source.ToStackSlotOffset(); 1785 : source.ToStackSlotOffset();
1857 1786
1858 if (double_width) { 1787 if (double_width) {
1859 __ LoadDFromOffset(DTMP, base_reg, slot_offset); 1788 __ LoadDFromOffset(DTMP, base_reg, slot_offset);
1860 __ StoreDToOffset(reg, base_reg, slot_offset); 1789 __ StoreDToOffset(reg, base_reg, slot_offset);
1861 __ vmovd(reg, DTMP); 1790 __ vmovd(reg, DTMP);
1862 } else { 1791 } else {
1863 __ LoadMultipleDFromOffset(DTMP, 2, base_reg, slot_offset); 1792 __ LoadMultipleDFromOffset(DTMP, 2, base_reg, slot_offset);
1864 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset); 1793 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset);
1865 __ vmovq(qreg, QTMP); 1794 __ vmovq(qreg, QTMP);
1866 } 1795 }
1867 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) { 1796 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1868 const intptr_t source_offset = source.ToStackSlotOffset(); 1797 const intptr_t source_offset = source.ToStackSlotOffset();
1869 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1798 const intptr_t dest_offset = destination.ToStackSlotOffset();
1870 1799
1871 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister); 1800 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1872 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg()); 1801 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1873 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset); 1802 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
1874 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset); 1803 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1875 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset); 1804 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1876 __ StoreDToOffset(scratch, destination.base_reg(), source_offset); 1805 __ StoreDToOffset(scratch, destination.base_reg(), source_offset);
1877 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) { 1806 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1878 const intptr_t source_offset = source.ToStackSlotOffset(); 1807 const intptr_t source_offset = source.ToStackSlotOffset();
1879 const intptr_t dest_offset = destination.ToStackSlotOffset(); 1808 const intptr_t dest_offset = destination.ToStackSlotOffset();
1880 1809
1881 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister); 1810 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1882 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg()); 1811 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1883 __ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset); 1812 __ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset);
1884 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset); 1813 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset);
1885 __ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset); 1814 __ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset);
1886 __ StoreMultipleDToOffset( 1815 __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(),
1887 scratch, 2, destination.base_reg(), source_offset); 1816 source_offset);
1888 } else { 1817 } else {
1889 UNREACHABLE(); 1818 UNREACHABLE();
1890 } 1819 }
1891 1820
1892 // The swap of source and destination has executed a move from source to 1821 // The swap of source and destination has executed a move from source to
1893 // destination. 1822 // destination.
1894 move->Eliminate(); 1823 move->Eliminate();
1895 1824
1896 // Any unperformed (including pending) move with a source of either 1825 // Any unperformed (including pending) move with a source of either
1897 // this move's source or destination needs to have their source 1826 // this move's source or destination needs to have their source
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1975 DRegister dreg = EvenDRegisterOf(reg); 1904 DRegister dreg = EvenDRegisterOf(reg);
1976 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex)); 1905 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex));
1977 } 1906 }
1978 1907
1979 1908
1980 #undef __ 1909 #undef __
1981 1910
1982 } // namespace dart 1911 } // namespace dart
1983 1912
1984 #endif // defined TARGET_ARCH_ARM 1913 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698