Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(488)

Side by Side Diff: runtime/vm/flow_graph_compiler_arm.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 10 matching lines...) Expand all
21 #include "vm/stub_code.h" 21 #include "vm/stub_code.h"
22 #include "vm/symbols.h" 22 #include "vm/symbols.h"
23 23
24 namespace dart { 24 namespace dart {
25 25
26 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); 26 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
27 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); 27 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
28 DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic."); 28 DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic.");
29 DECLARE_FLAG(bool, enable_simd_inline); 29 DECLARE_FLAG(bool, enable_simd_inline);
30 30
31
32 FlowGraphCompiler::~FlowGraphCompiler() { 31 FlowGraphCompiler::~FlowGraphCompiler() {
33 // BlockInfos are zone-allocated, so their destructors are not called. 32 // BlockInfos are zone-allocated, so their destructors are not called.
34 // Verify the labels explicitly here. 33 // Verify the labels explicitly here.
35 for (int i = 0; i < block_info_.length(); ++i) { 34 for (int i = 0; i < block_info_.length(); ++i) {
36 ASSERT(!block_info_[i]->jump_label()->IsLinked()); 35 ASSERT(!block_info_[i]->jump_label()->IsLinked());
37 } 36 }
38 } 37 }
39 38
40
41 bool FlowGraphCompiler::SupportsUnboxedDoubles() { 39 bool FlowGraphCompiler::SupportsUnboxedDoubles() {
42 return TargetCPUFeatures::vfp_supported() && FLAG_unbox_doubles; 40 return TargetCPUFeatures::vfp_supported() && FLAG_unbox_doubles;
43 } 41 }
44 42
45
46 bool FlowGraphCompiler::SupportsUnboxedMints() { 43 bool FlowGraphCompiler::SupportsUnboxedMints() {
47 return FLAG_unbox_mints; 44 return FLAG_unbox_mints;
48 } 45 }
49 46
50
51 bool FlowGraphCompiler::SupportsUnboxedSimd128() { 47 bool FlowGraphCompiler::SupportsUnboxedSimd128() {
52 return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline; 48 return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline;
53 } 49 }
54 50
55
56 bool FlowGraphCompiler::SupportsHardwareDivision() { 51 bool FlowGraphCompiler::SupportsHardwareDivision() {
57 return TargetCPUFeatures::can_divide(); 52 return TargetCPUFeatures::can_divide();
58 } 53 }
59 54
60
61 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { 55 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() {
62 // ARM does not have a short instruction sequence for converting int64 to 56 // ARM does not have a short instruction sequence for converting int64 to
63 // double. 57 // double.
64 return false; 58 return false;
65 } 59 }
66 60
67
68 void FlowGraphCompiler::EnterIntrinsicMode() { 61 void FlowGraphCompiler::EnterIntrinsicMode() {
69 ASSERT(!intrinsic_mode()); 62 ASSERT(!intrinsic_mode());
70 intrinsic_mode_ = true; 63 intrinsic_mode_ = true;
71 ASSERT(!assembler()->constant_pool_allowed()); 64 ASSERT(!assembler()->constant_pool_allowed());
72 } 65 }
73 66
74
75 void FlowGraphCompiler::ExitIntrinsicMode() { 67 void FlowGraphCompiler::ExitIntrinsicMode() {
76 ASSERT(intrinsic_mode()); 68 ASSERT(intrinsic_mode());
77 intrinsic_mode_ = false; 69 intrinsic_mode_ = false;
78 } 70 }
79 71
80
81 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, 72 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
82 DeoptInfoBuilder* builder, 73 DeoptInfoBuilder* builder,
83 const Array& deopt_table) { 74 const Array& deopt_table) {
84 if (deopt_env_ == NULL) { 75 if (deopt_env_ == NULL) {
85 ++builder->current_info_number_; 76 ++builder->current_info_number_;
86 return TypedData::null(); 77 return TypedData::null();
87 } 78 }
88 79
89 intptr_t stack_height = compiler->StackSize(); 80 intptr_t stack_height = compiler->StackSize();
90 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); 81 AllocateIncomingParametersRecursive(deopt_env_, &stack_height);
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 builder->AddCallerPc(slot_ix++); 148 builder->AddCallerPc(slot_ix++);
158 149
159 // For the outermost environment, set the incoming arguments. 150 // For the outermost environment, set the incoming arguments.
160 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { 151 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
161 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); 152 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
162 } 153 }
163 154
164 return builder->CreateDeoptInfo(deopt_table); 155 return builder->CreateDeoptInfo(deopt_table);
165 } 156 }
166 157
167
168 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 158 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
169 intptr_t stub_ix) { 159 intptr_t stub_ix) {
170 // Calls do not need stubs, they share a deoptimization trampoline. 160 // Calls do not need stubs, they share a deoptimization trampoline.
171 ASSERT(reason() != ICData::kDeoptAtCall); 161 ASSERT(reason() != ICData::kDeoptAtCall);
172 Assembler* assembler = compiler->assembler(); 162 Assembler* assembler = compiler->assembler();
173 #define __ assembler-> 163 #define __ assembler->
174 __ Comment("%s", Name()); 164 __ Comment("%s", Name());
175 __ Bind(entry_label()); 165 __ Bind(entry_label());
176 if (FLAG_trap_on_deoptimization) { 166 if (FLAG_trap_on_deoptimization) {
177 __ bkpt(0); 167 __ bkpt(0);
178 } 168 }
179 169
180 ASSERT(deopt_env() != NULL); 170 ASSERT(deopt_env() != NULL);
181 171
182 // LR may be live. It will be clobbered by BranchLink, so cache it in IP. 172 // LR may be live. It will be clobbered by BranchLink, so cache it in IP.
183 // It will be restored at the top of the deoptimization stub, specifically in 173 // It will be restored at the top of the deoptimization stub, specifically in
184 // GenerateDeoptimizationSequence in stub_code_arm.cc. 174 // GenerateDeoptimizationSequence in stub_code_arm.cc.
185 __ Push(CODE_REG); 175 __ Push(CODE_REG);
186 __ mov(IP, Operand(LR)); 176 __ mov(IP, Operand(LR));
187 __ BranchLink(*StubCode::Deoptimize_entry()); 177 __ BranchLink(*StubCode::Deoptimize_entry());
188 set_pc_offset(assembler->CodeSize()); 178 set_pc_offset(assembler->CodeSize());
189 #undef __ 179 #undef __
190 } 180 }
191 181
192
193 #define __ assembler()-> 182 #define __ assembler()->
194 183
195
196 // Fall through if bool_register contains null. 184 // Fall through if bool_register contains null.
197 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 185 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
198 Label* is_true, 186 Label* is_true,
199 Label* is_false) { 187 Label* is_false) {
200 Label fall_through; 188 Label fall_through;
201 __ CompareObject(bool_register, Object::null_object()); 189 __ CompareObject(bool_register, Object::null_object());
202 __ b(&fall_through, EQ); 190 __ b(&fall_through, EQ);
203 __ CompareObject(bool_register, Bool::True()); 191 __ CompareObject(bool_register, Bool::True());
204 __ b(is_true, EQ); 192 __ b(is_true, EQ);
205 __ b(is_false); 193 __ b(is_false);
206 __ Bind(&fall_through); 194 __ Bind(&fall_through);
207 } 195 }
208 196
209
210 // R0: instance (must be preserved). 197 // R0: instance (must be preserved).
211 // R2: instantiator type arguments (if used). 198 // R2: instantiator type arguments (if used).
212 // R1: function type arguments (if used). 199 // R1: function type arguments (if used).
213 // R3: type test cache. 200 // R3: type test cache.
214 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 201 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
215 TypeTestStubKind test_kind, 202 TypeTestStubKind test_kind,
216 Register instance_reg, 203 Register instance_reg,
217 Register instantiator_type_arguments_reg, 204 Register instantiator_type_arguments_reg,
218 Register function_type_arguments_reg, 205 Register function_type_arguments_reg,
219 Register temp_reg, 206 Register temp_reg,
(...skipping 17 matching lines...) Expand all
237 ASSERT(function_type_arguments_reg == R1); 224 ASSERT(function_type_arguments_reg == R1);
238 __ BranchLink(*StubCode::Subtype4TestCache_entry()); 225 __ BranchLink(*StubCode::Subtype4TestCache_entry());
239 } else { 226 } else {
240 UNREACHABLE(); 227 UNREACHABLE();
241 } 228 }
242 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. 229 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False.
243 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); 230 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl);
244 return type_test_cache.raw(); 231 return type_test_cache.raw();
245 } 232 }
246 233
247
248 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if 234 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if
249 // type test is conclusive, otherwise fallthrough if a type test could not 235 // type test is conclusive, otherwise fallthrough if a type test could not
250 // be completed. 236 // be completed.
251 // R0: instance being type checked (preserved). 237 // R0: instance being type checked (preserved).
252 // Clobbers R1, R2. 238 // Clobbers R1, R2.
253 RawSubtypeTestCache* 239 RawSubtypeTestCache*
254 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( 240 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
255 TokenPosition token_pos, 241 TokenPosition token_pos,
256 const AbstractType& type, 242 const AbstractType& type,
257 Label* is_instance_lbl, 243 Label* is_instance_lbl,
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
316 const Register kInstantiatorTypeArgumentsReg = kNoRegister; 302 const Register kInstantiatorTypeArgumentsReg = kNoRegister;
317 const Register kFunctionTypeArgumentsReg = kNoRegister; 303 const Register kFunctionTypeArgumentsReg = kNoRegister;
318 const Register kTempReg = kNoRegister; 304 const Register kTempReg = kNoRegister;
319 // R0: instance (must be preserved). 305 // R0: instance (must be preserved).
320 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, 306 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg,
321 kInstantiatorTypeArgumentsReg, 307 kInstantiatorTypeArgumentsReg,
322 kFunctionTypeArgumentsReg, kTempReg, 308 kFunctionTypeArgumentsReg, kTempReg,
323 is_instance_lbl, is_not_instance_lbl); 309 is_instance_lbl, is_not_instance_lbl);
324 } 310 }
325 311
326
327 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, 312 void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
328 const GrowableArray<intptr_t>& class_ids, 313 const GrowableArray<intptr_t>& class_ids,
329 Label* is_equal_lbl, 314 Label* is_equal_lbl,
330 Label* is_not_equal_lbl) { 315 Label* is_not_equal_lbl) {
331 for (intptr_t i = 0; i < class_ids.length(); i++) { 316 for (intptr_t i = 0; i < class_ids.length(); i++) {
332 __ CompareImmediate(class_id_reg, class_ids[i]); 317 __ CompareImmediate(class_id_reg, class_ids[i]);
333 __ b(is_equal_lbl, EQ); 318 __ b(is_equal_lbl, EQ);
334 } 319 }
335 __ b(is_not_equal_lbl); 320 __ b(is_not_equal_lbl);
336 } 321 }
337 322
338
339 // Testing against an instantiated type with no arguments, without 323 // Testing against an instantiated type with no arguments, without
340 // SubtypeTestCache. 324 // SubtypeTestCache.
341 // R0: instance being type checked (preserved). 325 // R0: instance being type checked (preserved).
342 // Clobbers R2, R3. 326 // Clobbers R2, R3.
343 // Returns true if there is a fallthrough. 327 // Returns true if there is a fallthrough.
344 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( 328 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
345 TokenPosition token_pos, 329 TokenPosition token_pos,
346 const AbstractType& type, 330 const AbstractType& type,
347 Label* is_instance_lbl, 331 Label* is_instance_lbl,
348 Label* is_not_instance_lbl) { 332 Label* is_not_instance_lbl) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
396 } 380 }
397 // Compare if the classes are equal. 381 // Compare if the classes are equal.
398 if (!type_class.is_abstract()) { 382 if (!type_class.is_abstract()) {
399 __ CompareImmediate(kClassIdReg, type_class.id()); 383 __ CompareImmediate(kClassIdReg, type_class.id());
400 __ b(is_instance_lbl, EQ); 384 __ b(is_instance_lbl, EQ);
401 } 385 }
402 // Otherwise fallthrough. 386 // Otherwise fallthrough.
403 return true; 387 return true;
404 } 388 }
405 389
406
407 // Uses SubtypeTestCache to store instance class and result. 390 // Uses SubtypeTestCache to store instance class and result.
408 // R0: instance to test. 391 // R0: instance to test.
409 // Clobbers R1-R4, R8, R9. 392 // Clobbers R1-R4, R8, R9.
410 // Immediate class test already done. 393 // Immediate class test already done.
411 // TODO(srdjan): Implement a quicker subtype check, as type test 394 // TODO(srdjan): Implement a quicker subtype check, as type test
412 // arrays can grow too high, but they may be useful when optimizing 395 // arrays can grow too high, but they may be useful when optimizing
413 // code (type-feedback). 396 // code (type-feedback).
414 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( 397 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
415 TokenPosition token_pos, 398 TokenPosition token_pos,
416 const Class& type_class, 399 const Class& type_class,
(...skipping 11 matching lines...) Expand all
428 411
429 const Register kInstantiatorTypeArgumentsReg = kNoRegister; 412 const Register kInstantiatorTypeArgumentsReg = kNoRegister;
430 const Register kFunctionTypeArgumentsReg = kNoRegister; 413 const Register kFunctionTypeArgumentsReg = kNoRegister;
431 const Register kTempReg = kNoRegister; 414 const Register kTempReg = kNoRegister;
432 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, 415 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg,
433 kInstantiatorTypeArgumentsReg, 416 kInstantiatorTypeArgumentsReg,
434 kFunctionTypeArgumentsReg, kTempReg, 417 kFunctionTypeArgumentsReg, kTempReg,
435 is_instance_lbl, is_not_instance_lbl); 418 is_instance_lbl, is_not_instance_lbl);
436 } 419 }
437 420
438
439 // Generates inlined check if 'type' is a type parameter or type itself 421 // Generates inlined check if 'type' is a type parameter or type itself
440 // R0: instance (preserved). 422 // R0: instance (preserved).
441 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 423 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
442 TokenPosition token_pos, 424 TokenPosition token_pos,
443 const AbstractType& type, 425 const AbstractType& type,
444 Label* is_instance_lbl, 426 Label* is_instance_lbl,
445 Label* is_not_instance_lbl) { 427 Label* is_not_instance_lbl) {
446 __ Comment("UninstantiatedTypeTest"); 428 __ Comment("UninstantiatedTypeTest");
447 ASSERT(!type.IsInstantiated()); 429 ASSERT(!type.IsInstantiated());
448 // Skip check if destination is a dynamic type. 430 // Skip check if destination is a dynamic type.
449 if (type.IsTypeParameter()) { 431 if (type.IsTypeParameter()) {
450 const TypeParameter& type_param = TypeParameter::Cast(type); 432 const TypeParameter& type_param = TypeParameter::Cast(type);
451 const Register kInstantiatorTypeArgumentsReg = R2; 433 const Register kInstantiatorTypeArgumentsReg = R2;
452 const Register kFunctionTypeArgumentsReg = R1; 434 const Register kFunctionTypeArgumentsReg = R1;
453 __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) | 435 __ ldm(IA, SP,
454 (1 << kInstantiatorTypeArgumentsReg)); 436 (1 << kFunctionTypeArgumentsReg) |
437 (1 << kInstantiatorTypeArgumentsReg));
455 // R2: instantiator type arguments. 438 // R2: instantiator type arguments.
456 // R1: function type arguments. 439 // R1: function type arguments.
457 const Register kTypeArgumentsReg = 440 const Register kTypeArgumentsReg =
458 type_param.IsClassTypeParameter() ? R2 : R1; 441 type_param.IsClassTypeParameter() ? R2 : R1;
459 // Check if type arguments are null, i.e. equivalent to vector of dynamic. 442 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
460 __ CompareObject(kTypeArgumentsReg, Object::null_object()); 443 __ CompareObject(kTypeArgumentsReg, Object::null_object());
461 __ b(is_instance_lbl, EQ); 444 __ b(is_instance_lbl, EQ);
462 __ ldr(R3, FieldAddress(kTypeArgumentsReg, 445 __ ldr(R3, FieldAddress(kTypeArgumentsReg,
463 TypeArguments::type_at_offset(type_param.index()))); 446 TypeArguments::type_at_offset(type_param.index())));
464 // R3: concrete type of type. 447 // R3: concrete type of type.
(...skipping 29 matching lines...) Expand all
494 kTempReg, is_instance_lbl, is_not_instance_lbl)); 477 kTempReg, is_instance_lbl, is_not_instance_lbl));
495 __ Bind(&fall_through); 478 __ Bind(&fall_through);
496 return type_test_cache.raw(); 479 return type_test_cache.raw();
497 } 480 }
498 if (type.IsType()) { 481 if (type.IsType()) {
499 const Register kInstanceReg = R0; 482 const Register kInstanceReg = R0;
500 const Register kInstantiatorTypeArgumentsReg = R2; 483 const Register kInstantiatorTypeArgumentsReg = R2;
501 const Register kFunctionTypeArgumentsReg = R1; 484 const Register kFunctionTypeArgumentsReg = R1;
502 __ tst(kInstanceReg, Operand(kSmiTagMask)); // Is instance Smi? 485 __ tst(kInstanceReg, Operand(kSmiTagMask)); // Is instance Smi?
503 __ b(is_not_instance_lbl, EQ); 486 __ b(is_not_instance_lbl, EQ);
504 __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) | 487 __ ldm(IA, SP,
505 (1 << kInstantiatorTypeArgumentsReg)); 488 (1 << kFunctionTypeArgumentsReg) |
489 (1 << kInstantiatorTypeArgumentsReg));
506 // Uninstantiated type class is known at compile time, but the type 490 // Uninstantiated type class is known at compile time, but the type
507 // arguments are determined at runtime by the instantiator(s). 491 // arguments are determined at runtime by the instantiator(s).
508 const Register kTempReg = kNoRegister; 492 const Register kTempReg = kNoRegister;
509 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg, 493 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg,
510 kInstantiatorTypeArgumentsReg, 494 kInstantiatorTypeArgumentsReg,
511 kFunctionTypeArgumentsReg, kTempReg, 495 kFunctionTypeArgumentsReg, kTempReg,
512 is_instance_lbl, is_not_instance_lbl); 496 is_instance_lbl, is_not_instance_lbl);
513 } 497 }
514 return SubtypeTestCache::null(); 498 return SubtypeTestCache::null();
515 } 499 }
516 500
517
518 // Inputs: 501 // Inputs:
519 // - R0: instance being type checked (preserved). 502 // - R0: instance being type checked (preserved).
520 // - R2: optional instantiator type arguments (preserved). 503 // - R2: optional instantiator type arguments (preserved).
521 // - R1: optional function type arguments (preserved). 504 // - R1: optional function type arguments (preserved).
522 // Clobbers R3, R4, R8, R9. 505 // Clobbers R3, R4, R8, R9.
523 // Returns: 506 // Returns:
524 // - preserved instance in R0, optional instantiator type arguments in R2, and 507 // - preserved instance in R0, optional instantiator type arguments in R2, and
525 // optional function type arguments in R1. 508 // optional function type arguments in R1.
526 // Note that this inlined code must be followed by the runtime_call code, as it 509 // Note that this inlined code must be followed by the runtime_call code, as it
527 // may fall through to it. Otherwise, this inline code will jump to the label 510 // may fall through to it. Otherwise, this inline code will jump to the label
(...skipping 22 matching lines...) Expand all
550 return GenerateSubtype1TestCacheLookup( 533 return GenerateSubtype1TestCacheLookup(
551 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); 534 token_pos, type_class, is_instance_lbl, is_not_instance_lbl);
552 } else { 535 } else {
553 return SubtypeTestCache::null(); 536 return SubtypeTestCache::null();
554 } 537 }
555 } 538 }
556 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, 539 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl,
557 is_not_instance_lbl); 540 is_not_instance_lbl);
558 } 541 }
559 542
560
561 // If instanceof type test cannot be performed successfully at compile time and 543 // If instanceof type test cannot be performed successfully at compile time and
562 // therefore eliminated, optimize it by adding inlined tests for: 544 // therefore eliminated, optimize it by adding inlined tests for:
563 // - NULL -> return type == Null (type is not Object or dynamic). 545 // - NULL -> return type == Null (type is not Object or dynamic).
564 // - Smi -> compile time subtype check (only if dst class is not parameterized). 546 // - Smi -> compile time subtype check (only if dst class is not parameterized).
565 // - Class equality (only if class is not parameterized). 547 // - Class equality (only if class is not parameterized).
566 // Inputs: 548 // Inputs:
567 // - R0: object. 549 // - R0: object.
568 // - R2: instantiator type arguments or raw_null. 550 // - R2: instantiator type arguments or raw_null.
569 // - R1: function type arguments or raw_null. 551 // - R1: function type arguments or raw_null.
570 // Returns: 552 // Returns:
(...skipping 25 matching lines...) Expand all
596 578
597 // Generate inline instanceof test. 579 // Generate inline instanceof test.
598 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 580 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
599 test_cache = 581 test_cache =
600 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance); 582 GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
601 583
602 // test_cache is null if there is no fall-through. 584 // test_cache is null if there is no fall-through.
603 Label done; 585 Label done;
604 if (!test_cache.IsNull()) { 586 if (!test_cache.IsNull()) {
605 // Generate runtime call. 587 // Generate runtime call.
606 __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) | 588 __ ldm(IA, SP,
607 (1 << kInstantiatorTypeArgumentsReg)); 589 (1 << kFunctionTypeArgumentsReg) |
590 (1 << kInstantiatorTypeArgumentsReg));
608 __ PushObject(Object::null_object()); // Make room for the result. 591 __ PushObject(Object::null_object()); // Make room for the result.
609 __ Push(R0); // Push the instance. 592 __ Push(R0); // Push the instance.
610 __ PushObject(type); // Push the type. 593 __ PushObject(type); // Push the type.
611 __ PushList((1 << kInstantiatorTypeArgumentsReg) | 594 __ PushList((1 << kInstantiatorTypeArgumentsReg) |
612 (1 << kFunctionTypeArgumentsReg)); 595 (1 << kFunctionTypeArgumentsReg));
613 __ LoadUniqueObject(R0, test_cache); 596 __ LoadUniqueObject(R0, test_cache);
614 __ Push(R0); 597 __ Push(R0);
615 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs); 598 GenerateRuntimeCall(token_pos, deopt_id, kInstanceofRuntimeEntry, 5, locs);
616 // Pop the parameters supplied to the runtime entry. The result of the 599 // Pop the parameters supplied to the runtime entry. The result of the
617 // instanceof runtime call will be left as the result of the operation. 600 // instanceof runtime call will be left as the result of the operation.
618 __ Drop(5); 601 __ Drop(5);
619 __ Pop(R0); 602 __ Pop(R0);
620 __ b(&done); 603 __ b(&done);
621 } 604 }
622 __ Bind(&is_not_instance); 605 __ Bind(&is_not_instance);
623 __ LoadObject(R0, Bool::Get(false)); 606 __ LoadObject(R0, Bool::Get(false));
624 __ b(&done); 607 __ b(&done);
625 608
626 __ Bind(&is_instance); 609 __ Bind(&is_instance);
627 __ LoadObject(R0, Bool::Get(true)); 610 __ LoadObject(R0, Bool::Get(true));
628 __ Bind(&done); 611 __ Bind(&done);
629 // Remove instantiator type arguments and function type arguments. 612 // Remove instantiator type arguments and function type arguments.
630 __ Drop(2); 613 __ Drop(2);
631 } 614 }
632 615
633
634 // Optimize assignable type check by adding inlined tests for: 616 // Optimize assignable type check by adding inlined tests for:
635 // - NULL -> return NULL. 617 // - NULL -> return NULL.
636 // - Smi -> compile time subtype check (only if dst class is not parameterized). 618 // - Smi -> compile time subtype check (only if dst class is not parameterized).
637 // - Class equality (only if class is not parameterized). 619 // - Class equality (only if class is not parameterized).
638 // Inputs: 620 // Inputs:
639 // - R0: instance being type checked. 621 // - R0: instance being type checked.
640 // - R2: instantiator type arguments or raw_null. 622 // - R2: instantiator type arguments or raw_null.
641 // - R1: function type arguments or raw_null. 623 // - R1: function type arguments or raw_null.
642 // Returns: 624 // Returns:
643 // - object in R0 for successful assignable check (or throws TypeError). 625 // - object in R0 for successful assignable check (or throws TypeError).
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
680 (1 << kInstantiatorTypeArgumentsReg)); 662 (1 << kInstantiatorTypeArgumentsReg));
681 return; 663 return;
682 } 664 }
683 665
684 // Generate inline type check, linking to runtime call if not assignable. 666 // Generate inline type check, linking to runtime call if not assignable.
685 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone()); 667 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(zone());
686 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable, 668 test_cache = GenerateInlineInstanceof(token_pos, dst_type, &is_assignable,
687 &runtime_call); 669 &runtime_call);
688 670
689 __ Bind(&runtime_call); 671 __ Bind(&runtime_call);
690 __ ldm(IA, SP, (1 << kFunctionTypeArgumentsReg) | 672 __ ldm(
691 (1 << kInstantiatorTypeArgumentsReg)); 673 IA, SP,
674 (1 << kFunctionTypeArgumentsReg) | (1 << kInstantiatorTypeArgumentsReg));
692 __ PushObject(Object::null_object()); // Make room for the result. 675 __ PushObject(Object::null_object()); // Make room for the result.
693 __ Push(R0); // Push the source object. 676 __ Push(R0); // Push the source object.
694 __ PushObject(dst_type); // Push the type of the destination. 677 __ PushObject(dst_type); // Push the type of the destination.
695 __ PushList((1 << kInstantiatorTypeArgumentsReg) | 678 __ PushList((1 << kInstantiatorTypeArgumentsReg) |
696 (1 << kFunctionTypeArgumentsReg)); 679 (1 << kFunctionTypeArgumentsReg));
697 __ PushObject(dst_name); // Push the name of the destination. 680 __ PushObject(dst_name); // Push the name of the destination.
698 __ LoadUniqueObject(R0, test_cache); 681 __ LoadUniqueObject(R0, test_cache);
699 __ Push(R0); 682 __ Push(R0);
700 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); 683 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs);
701 // Pop the parameters supplied to the runtime entry. The result of the 684 // Pop the parameters supplied to the runtime entry. The result of the
702 // type check runtime call is the checked value. 685 // type check runtime call is the checked value.
703 __ Drop(6); 686 __ Drop(6);
704 __ Pop(R0); 687 __ Pop(R0);
705 688
706 __ Bind(&is_assignable); 689 __ Bind(&is_assignable);
707 __ PopList((1 << kFunctionTypeArgumentsReg) | 690 __ PopList((1 << kFunctionTypeArgumentsReg) |
708 (1 << kInstantiatorTypeArgumentsReg)); 691 (1 << kInstantiatorTypeArgumentsReg));
709 } 692 }
710 693
711
712 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { 694 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
713 if (is_optimizing()) { 695 if (is_optimizing()) {
714 return; 696 return;
715 } 697 }
716 Definition* defn = instr->AsDefinition(); 698 Definition* defn = instr->AsDefinition();
717 if ((defn != NULL) && defn->HasTemp()) { 699 if ((defn != NULL) && defn->HasTemp()) {
718 __ Push(defn->locs()->out(0).reg()); 700 __ Push(defn->locs()->out(0).reg());
719 } 701 }
720 } 702 }
721 703
722
723 // Input parameters: 704 // Input parameters:
724 // R4: arguments descriptor array. 705 // R4: arguments descriptor array.
725 void FlowGraphCompiler::CopyParameters() { 706 void FlowGraphCompiler::CopyParameters() {
726 __ Comment("Copy parameters"); 707 __ Comment("Copy parameters");
727 const Function& function = parsed_function().function(); 708 const Function& function = parsed_function().function();
728 LocalScope* scope = parsed_function().node_sequence()->scope(); 709 LocalScope* scope = parsed_function().node_sequence()->scope();
729 const int num_fixed_params = function.num_fixed_parameters(); 710 const int num_fixed_params = function.num_fixed_parameters();
730 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); 711 const int num_opt_pos_params = function.NumOptionalPositionalParameters();
731 const int num_opt_named_params = function.NumOptionalNamedParameters(); 712 const int num_opt_named_params = function.NumOptionalNamedParameters();
732 const int num_params = 713 const int num_params =
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
805 opt_param[i + 1] = parameter; 786 opt_param[i + 1] = parameter;
806 opt_param_position[i + 1] = pos; 787 opt_param_position[i + 1] = pos;
807 } 788 }
808 // Generate code handling each optional parameter in alphabetical order. 789 // Generate code handling each optional parameter in alphabetical order.
809 __ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset())); 790 __ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
810 // Let NOTFP point to the first passed argument, i.e. to 791 // Let NOTFP point to the first passed argument, i.e. to
811 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (NOTFP) is Smi. 792 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (NOTFP) is Smi.
812 __ add(NOTFP, FP, Operand(NOTFP, LSL, 1)); 793 __ add(NOTFP, FP, Operand(NOTFP, LSL, 1));
813 __ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize); 794 __ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize);
814 // Let R8 point to the entry of the first named argument. 795 // Let R8 point to the entry of the first named argument.
815 __ add(R8, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() - 796 __ add(R8, R4,
816 kHeapObjectTag)); 797 Operand(ArgumentsDescriptor::first_named_entry_offset() -
798 kHeapObjectTag));
817 for (int i = 0; i < num_opt_named_params; i++) { 799 for (int i = 0; i < num_opt_named_params; i++) {
818 Label load_default_value, assign_optional_parameter; 800 Label load_default_value, assign_optional_parameter;
819 const int param_pos = opt_param_position[i]; 801 const int param_pos = opt_param_position[i];
820 // Check if this named parameter was passed in. 802 // Check if this named parameter was passed in.
821 // Load R9 with the name of the argument. 803 // Load R9 with the name of the argument.
822 __ ldr(R9, Address(R8, ArgumentsDescriptor::name_offset())); 804 __ ldr(R9, Address(R8, ArgumentsDescriptor::name_offset()));
823 ASSERT(opt_param[i]->name().IsSymbol()); 805 ASSERT(opt_param[i]->name().IsSymbol());
824 __ CompareObject(R9, opt_param[i]->name()); 806 __ CompareObject(R9, opt_param[i]->name());
825 __ b(&load_default_value, NE); 807 __ b(&load_default_value, NE);
826 // Load R9 with passed-in argument at provided arg_pos, i.e. at 808 // Load R9 with passed-in argument at provided arg_pos, i.e. at
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
914 __ LoadObject(IP, Object::null_object()); 896 __ LoadObject(IP, Object::null_object());
915 Label null_args_loop, null_args_loop_condition; 897 Label null_args_loop, null_args_loop_condition;
916 __ b(&null_args_loop_condition); 898 __ b(&null_args_loop_condition);
917 __ Bind(&null_args_loop); 899 __ Bind(&null_args_loop);
918 __ str(IP, original_argument_addr); 900 __ str(IP, original_argument_addr);
919 __ Bind(&null_args_loop_condition); 901 __ Bind(&null_args_loop_condition);
920 __ subs(R6, R6, Operand(1)); 902 __ subs(R6, R6, Operand(1));
921 __ b(&null_args_loop, PL); 903 __ b(&null_args_loop, PL);
922 } 904 }
923 905
924
925 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { 906 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
926 // LR: return address. 907 // LR: return address.
927 // SP: receiver. 908 // SP: receiver.
928 // Sequence node has one return node, its input is load field node. 909 // Sequence node has one return node, its input is load field node.
929 __ Comment("Inlined Getter"); 910 __ Comment("Inlined Getter");
930 __ ldr(R0, Address(SP, 0 * kWordSize)); 911 __ ldr(R0, Address(SP, 0 * kWordSize));
931 __ LoadFieldFromOffset(kWord, R0, R0, offset); 912 __ LoadFieldFromOffset(kWord, R0, R0, offset);
932 __ Ret(); 913 __ Ret();
933 } 914 }
934 915
935
936 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 916 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
937 // LR: return address. 917 // LR: return address.
938 // SP+1: receiver. 918 // SP+1: receiver.
939 // SP+0: value. 919 // SP+0: value.
940 // Sequence node has one store node and one return NULL node. 920 // Sequence node has one store node and one return NULL node.
941 __ Comment("Inlined Setter"); 921 __ Comment("Inlined Setter");
942 __ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver. 922 __ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver.
943 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value. 923 __ ldr(R1, Address(SP, 0 * kWordSize)); // Value.
944 __ StoreIntoObjectOffset(R0, offset, R1); 924 __ StoreIntoObjectOffset(R0, offset, R1);
945 __ LoadObject(R0, Object::null_object()); 925 __ LoadObject(R0, Object::null_object());
946 __ Ret(); 926 __ Ret();
947 } 927 }
948 928
949
950 static const Register new_pp = NOTFP; 929 static const Register new_pp = NOTFP;
951 930
952
953 void FlowGraphCompiler::EmitFrameEntry() { 931 void FlowGraphCompiler::EmitFrameEntry() {
954 const Function& function = parsed_function().function(); 932 const Function& function = parsed_function().function();
955 if (CanOptimizeFunction() && function.IsOptimizable() && 933 if (CanOptimizeFunction() && function.IsOptimizable() &&
956 (!is_optimizing() || may_reoptimize())) { 934 (!is_optimizing() || may_reoptimize())) {
957 __ Comment("Invocation Count Check"); 935 __ Comment("Invocation Count Check");
958 const Register function_reg = R8; 936 const Register function_reg = R8;
959 // The pool pointer is not setup before entering the Dart frame. 937 // The pool pointer is not setup before entering the Dart frame.
960 // Temporarily setup pool pointer for this dart function. 938 // Temporarily setup pool pointer for this dart function.
961 __ LoadPoolPointer(new_pp); 939 __ LoadPoolPointer(new_pp);
962 // Load function object from object pool. 940 // Load function object from object pool.
(...skipping 15 matching lines...) Expand all
978 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - 956 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() -
979 flow_graph().num_copied_params(); 957 flow_graph().num_copied_params();
980 ASSERT(extra_slots >= 0); 958 ASSERT(extra_slots >= 0);
981 __ EnterOsrFrame(extra_slots * kWordSize); 959 __ EnterOsrFrame(extra_slots * kWordSize);
982 } else { 960 } else {
983 ASSERT(StackSize() >= 0); 961 ASSERT(StackSize() >= 0);
984 __ EnterDartFrame(StackSize() * kWordSize); 962 __ EnterDartFrame(StackSize() * kWordSize);
985 } 963 }
986 } 964 }
987 965
988
989 // Input parameters: 966 // Input parameters:
990 // LR: return address. 967 // LR: return address.
991 // SP: address of last argument. 968 // SP: address of last argument.
992 // FP: caller's frame pointer. 969 // FP: caller's frame pointer.
993 // PP: caller's pool pointer. 970 // PP: caller's pool pointer.
994 // R9: ic-data. 971 // R9: ic-data.
995 // R4: arguments descriptor array. 972 // R4: arguments descriptor array.
996 void FlowGraphCompiler::CompileGraph() { 973 void FlowGraphCompiler::CompileGraph() {
997 InitCompiler(); 974 InitCompiler();
998 const Function& function = parsed_function().function(); 975 const Function& function = parsed_function().function();
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
1113 // checked during resolution. 1090 // checked during resolution.
1114 1091
1115 EndCodeSourceRange(TokenPosition::kDartCodePrologue); 1092 EndCodeSourceRange(TokenPosition::kDartCodePrologue);
1116 VisitBlocks(); 1093 VisitBlocks();
1117 1094
1118 __ bkpt(0); 1095 __ bkpt(0);
1119 ASSERT(assembler()->constant_pool_allowed()); 1096 ASSERT(assembler()->constant_pool_allowed());
1120 GenerateDeferredCode(); 1097 GenerateDeferredCode();
1121 } 1098 }
1122 1099
1123
1124 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, 1100 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
1125 const StubEntry& stub_entry, 1101 const StubEntry& stub_entry,
1126 RawPcDescriptors::Kind kind, 1102 RawPcDescriptors::Kind kind,
1127 LocationSummary* locs) { 1103 LocationSummary* locs) {
1128 __ BranchLink(stub_entry); 1104 __ BranchLink(stub_entry);
1129 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); 1105 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
1130 } 1106 }
1131 1107
1132
1133 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, 1108 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
1134 const StubEntry& stub_entry, 1109 const StubEntry& stub_entry,
1135 RawPcDescriptors::Kind kind, 1110 RawPcDescriptors::Kind kind,
1136 LocationSummary* locs) { 1111 LocationSummary* locs) {
1137 __ BranchLinkPatchable(stub_entry); 1112 __ BranchLinkPatchable(stub_entry);
1138 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); 1113 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs);
1139 } 1114 }
1140 1115
1141
1142 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, 1116 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
1143 TokenPosition token_pos, 1117 TokenPosition token_pos,
1144 const StubEntry& stub_entry, 1118 const StubEntry& stub_entry,
1145 RawPcDescriptors::Kind kind, 1119 RawPcDescriptors::Kind kind,
1146 LocationSummary* locs) { 1120 LocationSummary* locs) {
1147 __ BranchLinkPatchable(stub_entry); 1121 __ BranchLinkPatchable(stub_entry);
1148 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); 1122 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs);
1149 // Marks either the continuation point in unoptimized code or the 1123 // Marks either the continuation point in unoptimized code or the
1150 // deoptimization point in optimized code, after call. 1124 // deoptimization point in optimized code, after call.
1151 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1125 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1152 if (is_optimizing()) { 1126 if (is_optimizing()) {
1153 AddDeoptIndexAtCall(deopt_id_after); 1127 AddDeoptIndexAtCall(deopt_id_after);
1154 } else { 1128 } else {
1155 // Add deoptimization continuation point after the call and before the 1129 // Add deoptimization continuation point after the call and before the
1156 // arguments are removed. 1130 // arguments are removed.
1157 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1131 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1158 } 1132 }
1159 } 1133 }
1160 1134
1161
1162 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, 1135 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
1163 TokenPosition token_pos, 1136 TokenPosition token_pos,
1164 const StubEntry& stub_entry, 1137 const StubEntry& stub_entry,
1165 RawPcDescriptors::Kind kind, 1138 RawPcDescriptors::Kind kind,
1166 LocationSummary* locs, 1139 LocationSummary* locs,
1167 const Function& target) { 1140 const Function& target) {
1168 // Call sites to the same target can share object pool entries. These 1141 // Call sites to the same target can share object pool entries. These
1169 // call sites are never patched for breakpoints: the function is deoptimized 1142 // call sites are never patched for breakpoints: the function is deoptimized
1170 // and the unoptimized code with IC calls for static calls is patched instead. 1143 // and the unoptimized code with IC calls for static calls is patched instead.
1171 ASSERT(is_optimizing()); 1144 ASSERT(is_optimizing());
1172 __ BranchLinkWithEquivalence(stub_entry, target); 1145 __ BranchLinkWithEquivalence(stub_entry, target);
1173 1146
1174 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); 1147 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs);
1175 // Marks either the continuation point in unoptimized code or the 1148 // Marks either the continuation point in unoptimized code or the
1176 // deoptimization point in optimized code, after call. 1149 // deoptimization point in optimized code, after call.
1177 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1150 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1178 if (is_optimizing()) { 1151 if (is_optimizing()) {
1179 AddDeoptIndexAtCall(deopt_id_after); 1152 AddDeoptIndexAtCall(deopt_id_after);
1180 } else { 1153 } else {
1181 // Add deoptimization continuation point after the call and before the 1154 // Add deoptimization continuation point after the call and before the
1182 // arguments are removed. 1155 // arguments are removed.
1183 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1156 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1184 } 1157 }
1185 AddStaticCallTarget(target); 1158 AddStaticCallTarget(target);
1186 } 1159 }
1187 1160
1188
1189 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, 1161 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
1190 intptr_t deopt_id, 1162 intptr_t deopt_id,
1191 const RuntimeEntry& entry, 1163 const RuntimeEntry& entry,
1192 intptr_t argument_count, 1164 intptr_t argument_count,
1193 LocationSummary* locs) { 1165 LocationSummary* locs) {
1194 __ CallRuntime(entry, argument_count); 1166 __ CallRuntime(entry, argument_count);
1195 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); 1167 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
1196 if (deopt_id != Thread::kNoDeoptId) { 1168 if (deopt_id != Thread::kNoDeoptId) {
1197 // Marks either the continuation point in unoptimized code or the 1169 // Marks either the continuation point in unoptimized code or the
1198 // deoptimization point in optimized code, after call. 1170 // deoptimization point in optimized code, after call.
1199 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); 1171 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id);
1200 if (is_optimizing()) { 1172 if (is_optimizing()) {
1201 AddDeoptIndexAtCall(deopt_id_after); 1173 AddDeoptIndexAtCall(deopt_id_after);
1202 } else { 1174 } else {
1203 // Add deoptimization continuation point after the call and before the 1175 // Add deoptimization continuation point after the call and before the
1204 // arguments are removed. 1176 // arguments are removed.
1205 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1177 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1206 } 1178 }
1207 } 1179 }
1208 } 1180 }
1209 1181
1210
1211 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { 1182 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
1212 // We do not check for overflow when incrementing the edge counter. The 1183 // We do not check for overflow when incrementing the edge counter. The
1213 // function should normally be optimized long before the counter can 1184 // function should normally be optimized long before the counter can
1214 // overflow; and though we do not reset the counters when we optimize or 1185 // overflow; and though we do not reset the counters when we optimize or
1215 // deoptimize, there is a bound on the number of 1186 // deoptimize, there is a bound on the number of
1216 // optimization/deoptimization cycles we will attempt. 1187 // optimization/deoptimization cycles we will attempt.
1217 ASSERT(!edge_counters_array_.IsNull()); 1188 ASSERT(!edge_counters_array_.IsNull());
1218 ASSERT(assembler_->constant_pool_allowed()); 1189 ASSERT(assembler_->constant_pool_allowed());
1219 __ Comment("Edge counter"); 1190 __ Comment("Edge counter");
1220 __ LoadObject(R0, edge_counters_array_); 1191 __ LoadObject(R0, edge_counters_array_);
1221 #if defined(DEBUG) 1192 #if defined(DEBUG)
1222 bool old_use_far_branches = assembler_->use_far_branches(); 1193 bool old_use_far_branches = assembler_->use_far_branches();
1223 assembler_->set_use_far_branches(true); 1194 assembler_->set_use_far_branches(true);
1224 #endif // DEBUG 1195 #endif // DEBUG
1225 __ LoadFieldFromOffset(kWord, R1, R0, Array::element_offset(edge_id)); 1196 __ LoadFieldFromOffset(kWord, R1, R0, Array::element_offset(edge_id));
1226 __ add(R1, R1, Operand(Smi::RawValue(1))); 1197 __ add(R1, R1, Operand(Smi::RawValue(1)));
1227 __ StoreIntoObjectNoBarrierOffset(R0, Array::element_offset(edge_id), R1); 1198 __ StoreIntoObjectNoBarrierOffset(R0, Array::element_offset(edge_id), R1);
1228 #if defined(DEBUG) 1199 #if defined(DEBUG)
1229 assembler_->set_use_far_branches(old_use_far_branches); 1200 assembler_->set_use_far_branches(old_use_far_branches);
1230 #endif // DEBUG 1201 #endif // DEBUG
1231 } 1202 }
1232 1203
1233
1234 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, 1204 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry,
1235 const ICData& ic_data, 1205 const ICData& ic_data,
1236 intptr_t argument_count, 1206 intptr_t argument_count,
1237 intptr_t deopt_id, 1207 intptr_t deopt_id,
1238 TokenPosition token_pos, 1208 TokenPosition token_pos,
1239 LocationSummary* locs) { 1209 LocationSummary* locs) {
1240 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1210 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1241 // Each ICData propagated from unoptimized to optimized code contains the 1211 // Each ICData propagated from unoptimized to optimized code contains the
1242 // function that corresponds to the Dart function of that IC call. Due 1212 // function that corresponds to the Dart function of that IC call. Due
1243 // to inlining in optimized code, that function may not correspond to the 1213 // to inlining in optimized code, that function may not correspond to the
1244 // top-level function (parsed_function().function()) which could be 1214 // top-level function (parsed_function().function()) which could be
1245 // reoptimized and which counter needs to be incremented. 1215 // reoptimized and which counter needs to be incremented.
1246 // Pass the function explicitly, it is used in IC stub. 1216 // Pass the function explicitly, it is used in IC stub.
1247 1217
1248 __ LoadObject(R8, parsed_function().function()); 1218 __ LoadObject(R8, parsed_function().function());
1249 __ LoadUniqueObject(R9, ic_data); 1219 __ LoadUniqueObject(R9, ic_data);
1250 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, 1220 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1251 locs); 1221 locs);
1252 __ Drop(argument_count); 1222 __ Drop(argument_count);
1253 } 1223 }
1254 1224
1255
1256 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, 1225 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry,
1257 const ICData& ic_data, 1226 const ICData& ic_data,
1258 intptr_t argument_count, 1227 intptr_t argument_count,
1259 intptr_t deopt_id, 1228 intptr_t deopt_id,
1260 TokenPosition token_pos, 1229 TokenPosition token_pos,
1261 LocationSummary* locs) { 1230 LocationSummary* locs) {
1262 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); 1231 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
1263 __ LoadUniqueObject(R9, ic_data); 1232 __ LoadUniqueObject(R9, ic_data);
1264 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, 1233 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall,
1265 locs); 1234 locs);
1266 __ Drop(argument_count); 1235 __ Drop(argument_count);
1267 } 1236 }
1268 1237
1269
1270 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1238 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1271 const String& name, 1239 const String& name,
1272 const Array& arguments_descriptor, 1240 const Array& arguments_descriptor,
1273 intptr_t argument_count, 1241 intptr_t argument_count,
1274 intptr_t deopt_id, 1242 intptr_t deopt_id,
1275 TokenPosition token_pos, 1243 TokenPosition token_pos,
1276 LocationSummary* locs, 1244 LocationSummary* locs,
1277 intptr_t try_index, 1245 intptr_t try_index,
1278 intptr_t slow_path_argument_count) { 1246 intptr_t slow_path_argument_count) {
1279 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); 1247 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
(...skipping 26 matching lines...) Expand all
1306 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, 1274 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId,
1307 token_pos); 1275 token_pos);
1308 // Add deoptimization continuation point after the call and before the 1276 // Add deoptimization continuation point after the call and before the
1309 // arguments are removed. 1277 // arguments are removed.
1310 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1278 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1311 } 1279 }
1312 EmitCatchEntryState(pending_deoptimization_env_, try_index); 1280 EmitCatchEntryState(pending_deoptimization_env_, try_index);
1313 __ Drop(argument_count); 1281 __ Drop(argument_count);
1314 } 1282 }
1315 1283
1316
1317 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, 1284 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
1318 intptr_t argument_count, 1285 intptr_t argument_count,
1319 intptr_t deopt_id, 1286 intptr_t deopt_id,
1320 TokenPosition token_pos, 1287 TokenPosition token_pos,
1321 LocationSummary* locs) { 1288 LocationSummary* locs) {
1322 ASSERT(ic_data.NumArgsTested() == 1); 1289 ASSERT(ic_data.NumArgsTested() == 1);
1323 const Code& initial_stub = 1290 const Code& initial_stub =
1324 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); 1291 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code());
1325 1292
1326 __ Comment("SwitchableCall"); 1293 __ Comment("SwitchableCall");
(...skipping 10 matching lines...) Expand all
1337 if (is_optimizing()) { 1304 if (is_optimizing()) {
1338 AddDeoptIndexAtCall(deopt_id_after); 1305 AddDeoptIndexAtCall(deopt_id_after);
1339 } else { 1306 } else {
1340 // Add deoptimization continuation point after the call and before the 1307 // Add deoptimization continuation point after the call and before the
1341 // arguments are removed. 1308 // arguments are removed.
1342 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1309 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1343 } 1310 }
1344 __ Drop(argument_count); 1311 __ Drop(argument_count);
1345 } 1312 }
1346 1313
1347
1348 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, 1314 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count,
1349 intptr_t deopt_id, 1315 intptr_t deopt_id,
1350 TokenPosition token_pos, 1316 TokenPosition token_pos,
1351 LocationSummary* locs, 1317 LocationSummary* locs,
1352 const ICData& ic_data) { 1318 const ICData& ic_data) {
1353 const StubEntry* stub_entry = 1319 const StubEntry* stub_entry =
1354 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); 1320 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
1355 __ LoadObject(R9, ic_data); 1321 __ LoadObject(R9, ic_data);
1356 GenerateDartCall(deopt_id, token_pos, *stub_entry, 1322 GenerateDartCall(deopt_id, token_pos, *stub_entry,
1357 RawPcDescriptors::kUnoptStaticCall, locs); 1323 RawPcDescriptors::kUnoptStaticCall, locs);
1358 __ Drop(argument_count); 1324 __ Drop(argument_count);
1359 } 1325 }
1360 1326
1361
1362 void FlowGraphCompiler::EmitOptimizedStaticCall( 1327 void FlowGraphCompiler::EmitOptimizedStaticCall(
1363 const Function& function, 1328 const Function& function,
1364 const Array& arguments_descriptor, 1329 const Array& arguments_descriptor,
1365 intptr_t argument_count, 1330 intptr_t argument_count,
1366 intptr_t deopt_id, 1331 intptr_t deopt_id,
1367 TokenPosition token_pos, 1332 TokenPosition token_pos,
1368 LocationSummary* locs) { 1333 LocationSummary* locs) {
1369 ASSERT(!function.IsClosureFunction()); 1334 ASSERT(!function.IsClosureFunction());
1370 if (function.HasOptionalParameters() || 1335 if (function.HasOptionalParameters() ||
1371 (FLAG_reify_generic_functions && function.IsGeneric())) { 1336 (FLAG_reify_generic_functions && function.IsGeneric())) {
1372 __ LoadObject(R4, arguments_descriptor); 1337 __ LoadObject(R4, arguments_descriptor);
1373 } else { 1338 } else {
1374 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub. 1339 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
1375 } 1340 }
1376 // Do not use the code from the function, but let the code be patched so that 1341 // Do not use the code from the function, but let the code be patched so that
1377 // we can record the outgoing edges to other code. 1342 // we can record the outgoing edges to other code.
1378 GenerateStaticDartCall(deopt_id, token_pos, 1343 GenerateStaticDartCall(deopt_id, token_pos,
1379 *StubCode::CallStaticFunction_entry(), 1344 *StubCode::CallStaticFunction_entry(),
1380 RawPcDescriptors::kOther, locs, function); 1345 RawPcDescriptors::kOther, locs, function);
1381 __ Drop(argument_count); 1346 __ Drop(argument_count);
1382 } 1347 }
1383 1348
1384
1385 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1349 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1386 Register reg, 1350 Register reg,
1387 const Object& obj, 1351 const Object& obj,
1388 bool needs_number_check, 1352 bool needs_number_check,
1389 TokenPosition token_pos, 1353 TokenPosition token_pos,
1390 intptr_t deopt_id) { 1354 intptr_t deopt_id) {
1391 if (needs_number_check) { 1355 if (needs_number_check) {
1392 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); 1356 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint());
1393 __ Push(reg); 1357 __ Push(reg);
1394 __ PushObject(obj); 1358 __ PushObject(obj);
1395 if (is_optimizing()) { 1359 if (is_optimizing()) {
1396 __ BranchLinkPatchable( 1360 __ BranchLinkPatchable(
1397 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1361 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1398 } else { 1362 } else {
1399 __ BranchLinkPatchable( 1363 __ BranchLinkPatchable(
1400 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1364 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1401 } 1365 }
1402 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); 1366 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
1403 // Stub returns result in flags (result of a cmp, we need Z computed). 1367 // Stub returns result in flags (result of a cmp, we need Z computed).
1404 __ Drop(1); // Discard constant. 1368 __ Drop(1); // Discard constant.
1405 __ Pop(reg); // Restore 'reg'. 1369 __ Pop(reg); // Restore 'reg'.
1406 } else { 1370 } else {
1407 __ CompareObject(reg, obj); 1371 __ CompareObject(reg, obj);
1408 } 1372 }
1409 return EQ; 1373 return EQ;
1410 } 1374 }
1411 1375
1412
1413 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, 1376 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
1414 Register right, 1377 Register right,
1415 bool needs_number_check, 1378 bool needs_number_check,
1416 TokenPosition token_pos, 1379 TokenPosition token_pos,
1417 intptr_t deopt_id) { 1380 intptr_t deopt_id) {
1418 if (needs_number_check) { 1381 if (needs_number_check) {
1419 __ Push(left); 1382 __ Push(left);
1420 __ Push(right); 1383 __ Push(right);
1421 if (is_optimizing()) { 1384 if (is_optimizing()) {
1422 __ BranchLinkPatchable( 1385 __ BranchLinkPatchable(
1423 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); 1386 *StubCode::OptimizedIdenticalWithNumberCheck_entry());
1424 } else { 1387 } else {
1425 __ BranchLinkPatchable( 1388 __ BranchLinkPatchable(
1426 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); 1389 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry());
1427 } 1390 }
1428 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); 1391 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
1429 // Stub returns result in flags (result of a cmp, we need Z computed). 1392 // Stub returns result in flags (result of a cmp, we need Z computed).
1430 __ Pop(right); 1393 __ Pop(right);
1431 __ Pop(left); 1394 __ Pop(left);
1432 } else { 1395 } else {
1433 __ cmp(left, Operand(right)); 1396 __ cmp(left, Operand(right));
1434 } 1397 }
1435 return EQ; 1398 return EQ;
1436 } 1399 }
1437 1400
1438
1439 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and 1401 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and
1440 // FlowGraphCompiler::SlowPathEnvironmentFor. 1402 // FlowGraphCompiler::SlowPathEnvironmentFor.
1441 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1403 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1442 #if defined(DEBUG) 1404 #if defined(DEBUG)
1443 locs->CheckWritableInputs(); 1405 locs->CheckWritableInputs();
1444 ClobberDeadTempRegisters(locs); 1406 ClobberDeadTempRegisters(locs);
1445 #endif 1407 #endif
1446 1408
1447 // TODO(vegorov): consider saving only caller save (volatile) registers. 1409 // TODO(vegorov): consider saving only caller save (volatile) registers.
1448 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); 1410 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
(...skipping 23 matching lines...) Expand all
1472 Register reg = static_cast<Register>(i); 1434 Register reg = static_cast<Register>(i);
1473 if (locs->live_registers()->ContainsRegister(reg)) { 1435 if (locs->live_registers()->ContainsRegister(reg)) {
1474 reg_list |= (1 << reg); 1436 reg_list |= (1 << reg);
1475 } 1437 }
1476 } 1438 }
1477 if (reg_list != 0) { 1439 if (reg_list != 0) {
1478 __ PushList(reg_list); 1440 __ PushList(reg_list);
1479 } 1441 }
1480 } 1442 }
1481 1443
1482
1483 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1444 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1484 RegList reg_list = 0; 1445 RegList reg_list = 0;
1485 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { 1446 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1486 Register reg = static_cast<Register>(i); 1447 Register reg = static_cast<Register>(i);
1487 if (locs->live_registers()->ContainsRegister(reg)) { 1448 if (locs->live_registers()->ContainsRegister(reg)) {
1488 reg_list |= (1 << reg); 1449 reg_list |= (1 << reg);
1489 } 1450 }
1490 } 1451 }
1491 if (reg_list != 0) { 1452 if (reg_list != 0) {
1492 __ PopList(reg_list); 1453 __ PopList(reg_list);
1493 } 1454 }
1494 1455
1495 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); 1456 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount();
1496 if (fpu_regs_count > 0) { 1457 if (fpu_regs_count > 0) {
1497 // Fpu registers have the lowest register number at the lowest address. 1458 // Fpu registers have the lowest register number at the lowest address.
1498 intptr_t offset = 0; 1459 intptr_t offset = 0;
1499 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) { 1460 for (intptr_t i = 0; i < kNumberOfFpuRegisters; ++i) {
1500 QRegister fpu_reg = static_cast<QRegister>(i); 1461 QRegister fpu_reg = static_cast<QRegister>(i);
1501 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { 1462 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) {
1502 DRegister d = EvenDRegisterOf(fpu_reg); 1463 DRegister d = EvenDRegisterOf(fpu_reg);
1503 ASSERT(d + 1 == OddDRegisterOf(fpu_reg)); 1464 ASSERT(d + 1 == OddDRegisterOf(fpu_reg));
1504 __ vldmd(IA_W, SP, d, 2); 1465 __ vldmd(IA_W, SP, d, 2);
1505 offset += kFpuRegisterSize; 1466 offset += kFpuRegisterSize;
1506 } 1467 }
1507 } 1468 }
1508 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize)); 1469 ASSERT(offset == (fpu_regs_count * kFpuRegisterSize));
1509 } 1470 }
1510 } 1471 }
1511 1472
1512
1513 #if defined(DEBUG) 1473 #if defined(DEBUG)
1514 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { 1474 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
1515 // Clobber temporaries that have not been manually preserved. 1475 // Clobber temporaries that have not been manually preserved.
1516 for (intptr_t i = 0; i < locs->temp_count(); ++i) { 1476 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
1517 Location tmp = locs->temp(i); 1477 Location tmp = locs->temp(i);
1518 // TODO(zerny): clobber non-live temporary FPU registers. 1478 // TODO(zerny): clobber non-live temporary FPU registers.
1519 if (tmp.IsRegister() && 1479 if (tmp.IsRegister() &&
1520 !locs->live_registers()->ContainsRegister(tmp.reg())) { 1480 !locs->live_registers()->ContainsRegister(tmp.reg())) {
1521 __ mov(tmp.reg(), Operand(0xf7)); 1481 __ mov(tmp.reg(), Operand(0xf7));
1522 } 1482 }
1523 } 1483 }
1524 } 1484 }
1525 #endif 1485 #endif
1526 1486
1527
1528 void FlowGraphCompiler::EmitTestAndCallLoadReceiver( 1487 void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
1529 intptr_t argument_count, 1488 intptr_t argument_count,
1530 const Array& arguments_descriptor) { 1489 const Array& arguments_descriptor) {
1531 __ Comment("EmitTestAndCall"); 1490 __ Comment("EmitTestAndCall");
1532 // Load receiver into R0. 1491 // Load receiver into R0.
1533 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize); 1492 __ LoadFromOffset(kWord, R0, SP, (argument_count - 1) * kWordSize);
1534 __ LoadObject(R4, arguments_descriptor); 1493 __ LoadObject(R4, arguments_descriptor);
1535 } 1494 }
1536 1495
1537
1538 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) { 1496 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
1539 __ tst(R0, Operand(kSmiTagMask)); 1497 __ tst(R0, Operand(kSmiTagMask));
1540 // Jump if receiver is not Smi. 1498 // Jump if receiver is not Smi.
1541 __ b(label, if_smi ? EQ : NE); 1499 __ b(label, if_smi ? EQ : NE);
1542 } 1500 }
1543 1501
1544
1545 void FlowGraphCompiler::EmitTestAndCallLoadCid() { 1502 void FlowGraphCompiler::EmitTestAndCallLoadCid() {
1546 __ LoadClassId(R2, R0); 1503 __ LoadClassId(R2, R0);
1547 } 1504 }
1548 1505
1549
1550 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label, 1506 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label,
1551 const CidRange& range, 1507 const CidRange& range,
1552 int bias) { 1508 int bias) {
1553 intptr_t cid_start = range.cid_start; 1509 intptr_t cid_start = range.cid_start;
1554 if (range.IsSingleCid()) { 1510 if (range.IsSingleCid()) {
1555 __ CompareImmediate(R2, cid_start - bias); 1511 __ CompareImmediate(R2, cid_start - bias);
1556 __ b(next_label, NE); 1512 __ b(next_label, NE);
1557 } else { 1513 } else {
1558 __ AddImmediate(R2, R2, bias - cid_start); 1514 __ AddImmediate(R2, R2, bias - cid_start);
1559 bias = cid_start; 1515 bias = cid_start;
1560 __ CompareImmediate(R2, range.Extent()); 1516 __ CompareImmediate(R2, range.Extent());
1561 __ b(next_label, HI); // Unsigned higher. 1517 __ b(next_label, HI); // Unsigned higher.
1562 } 1518 }
1563 return bias; 1519 return bias;
1564 } 1520 }
1565 1521
1566
1567 #undef __ 1522 #undef __
1568 #define __ compiler_->assembler()-> 1523 #define __ compiler_->assembler()->
1569 1524
1570
1571 void ParallelMoveResolver::EmitMove(int index) { 1525 void ParallelMoveResolver::EmitMove(int index) {
1572 MoveOperands* move = moves_[index]; 1526 MoveOperands* move = moves_[index];
1573 const Location source = move->src(); 1527 const Location source = move->src();
1574 const Location destination = move->dest(); 1528 const Location destination = move->dest();
1575 1529
1576 if (source.IsRegister()) { 1530 if (source.IsRegister()) {
1577 if (destination.IsRegister()) { 1531 if (destination.IsRegister()) {
1578 __ mov(destination.reg(), Operand(source.reg())); 1532 __ mov(destination.reg(), Operand(source.reg()));
1579 } else { 1533 } else {
1580 ASSERT(destination.IsStackSlot()); 1534 ASSERT(destination.IsStackSlot());
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1681 } else { 1635 } else {
1682 __ LoadObject(TMP, constant); 1636 __ LoadObject(TMP, constant);
1683 } 1637 }
1684 __ StoreToOffset(kWord, TMP, destination.base_reg(), dest_offset); 1638 __ StoreToOffset(kWord, TMP, destination.base_reg(), dest_offset);
1685 } 1639 }
1686 } 1640 }
1687 1641
1688 move->Eliminate(); 1642 move->Eliminate();
1689 } 1643 }
1690 1644
1691
1692 void ParallelMoveResolver::EmitSwap(int index) { 1645 void ParallelMoveResolver::EmitSwap(int index) {
1693 MoveOperands* move = moves_[index]; 1646 MoveOperands* move = moves_[index];
1694 const Location source = move->src(); 1647 const Location source = move->src();
1695 const Location destination = move->dest(); 1648 const Location destination = move->dest();
1696 1649
1697 if (source.IsRegister() && destination.IsRegister()) { 1650 if (source.IsRegister() && destination.IsRegister()) {
1698 ASSERT(source.reg() != IP); 1651 ASSERT(source.reg() != IP);
1699 ASSERT(destination.reg() != IP); 1652 ASSERT(destination.reg() != IP);
1700 __ mov(IP, Operand(source.reg())); 1653 __ mov(IP, Operand(source.reg()));
1701 __ mov(source.reg(), Operand(destination.reg())); 1654 __ mov(source.reg(), Operand(destination.reg()));
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1780 for (int i = 0; i < moves_.length(); ++i) { 1733 for (int i = 0; i < moves_.length(); ++i) {
1781 const MoveOperands& other_move = *moves_[i]; 1734 const MoveOperands& other_move = *moves_[i];
1782 if (other_move.Blocks(source)) { 1735 if (other_move.Blocks(source)) {
1783 moves_[i]->set_src(destination); 1736 moves_[i]->set_src(destination);
1784 } else if (other_move.Blocks(destination)) { 1737 } else if (other_move.Blocks(destination)) {
1785 moves_[i]->set_src(source); 1738 moves_[i]->set_src(source);
1786 } 1739 }
1787 } 1740 }
1788 } 1741 }
1789 1742
1790
1791 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, 1743 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
1792 const Address& src) { 1744 const Address& src) {
1793 UNREACHABLE(); 1745 UNREACHABLE();
1794 } 1746 }
1795 1747
1796
1797 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { 1748 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
1798 UNREACHABLE(); 1749 UNREACHABLE();
1799 } 1750 }
1800 1751
1801
1802 // Do not call or implement this function. Instead, use the form below that 1752 // Do not call or implement this function. Instead, use the form below that
1803 // uses an offset from the frame pointer instead of an Address. 1753 // uses an offset from the frame pointer instead of an Address.
1804 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { 1754 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
1805 UNREACHABLE(); 1755 UNREACHABLE();
1806 } 1756 }
1807 1757
1808
1809 // Do not call or implement this function. Instead, use the form below that 1758 // Do not call or implement this function. Instead, use the form below that
1810 // uses offsets from the frame pointer instead of Addresses. 1759 // uses offsets from the frame pointer instead of Addresses.
1811 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { 1760 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
1812 UNREACHABLE(); 1761 UNREACHABLE();
1813 } 1762 }
1814 1763
1815
1816 void ParallelMoveResolver::Exchange(Register reg, 1764 void ParallelMoveResolver::Exchange(Register reg,
1817 Register base_reg, 1765 Register base_reg,
1818 intptr_t stack_offset) { 1766 intptr_t stack_offset) {
1819 ScratchRegisterScope tmp(this, reg); 1767 ScratchRegisterScope tmp(this, reg);
1820 __ mov(tmp.reg(), Operand(reg)); 1768 __ mov(tmp.reg(), Operand(reg));
1821 __ LoadFromOffset(kWord, reg, base_reg, stack_offset); 1769 __ LoadFromOffset(kWord, reg, base_reg, stack_offset);
1822 __ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset); 1770 __ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset);
1823 } 1771 }
1824 1772
1825
1826 void ParallelMoveResolver::Exchange(Register base_reg1, 1773 void ParallelMoveResolver::Exchange(Register base_reg1,
1827 intptr_t stack_offset1, 1774 intptr_t stack_offset1,
1828 Register base_reg2, 1775 Register base_reg2,
1829 intptr_t stack_offset2) { 1776 intptr_t stack_offset2) {
1830 ScratchRegisterScope tmp1(this, kNoRegister); 1777 ScratchRegisterScope tmp1(this, kNoRegister);
1831 ScratchRegisterScope tmp2(this, tmp1.reg()); 1778 ScratchRegisterScope tmp2(this, tmp1.reg());
1832 __ LoadFromOffset(kWord, tmp1.reg(), base_reg1, stack_offset1); 1779 __ LoadFromOffset(kWord, tmp1.reg(), base_reg1, stack_offset1);
1833 __ LoadFromOffset(kWord, tmp2.reg(), base_reg2, stack_offset2); 1780 __ LoadFromOffset(kWord, tmp2.reg(), base_reg2, stack_offset2);
1834 __ StoreToOffset(kWord, tmp1.reg(), base_reg2, stack_offset2); 1781 __ StoreToOffset(kWord, tmp1.reg(), base_reg2, stack_offset2);
1835 __ StoreToOffset(kWord, tmp2.reg(), base_reg1, stack_offset1); 1782 __ StoreToOffset(kWord, tmp2.reg(), base_reg1, stack_offset1);
1836 } 1783 }
1837 1784
1838
1839 void ParallelMoveResolver::SpillScratch(Register reg) { 1785 void ParallelMoveResolver::SpillScratch(Register reg) {
1840 __ Push(reg); 1786 __ Push(reg);
1841 } 1787 }
1842 1788
1843
1844 void ParallelMoveResolver::RestoreScratch(Register reg) { 1789 void ParallelMoveResolver::RestoreScratch(Register reg) {
1845 __ Pop(reg); 1790 __ Pop(reg);
1846 } 1791 }
1847 1792
1848
1849 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { 1793 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
1850 DRegister dreg = EvenDRegisterOf(reg); 1794 DRegister dreg = EvenDRegisterOf(reg);
1851 __ vstrd(dreg, Address(SP, -kDoubleSize, Address::PreIndex)); 1795 __ vstrd(dreg, Address(SP, -kDoubleSize, Address::PreIndex));
1852 } 1796 }
1853 1797
1854
1855 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { 1798 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
1856 DRegister dreg = EvenDRegisterOf(reg); 1799 DRegister dreg = EvenDRegisterOf(reg);
1857 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex)); 1800 __ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex));
1858 } 1801 }
1859 1802
1860
1861 #undef __ 1803 #undef __
1862 1804
1863 } // namespace dart 1805 } // namespace dart
1864 1806
1865 #endif // defined TARGET_ARCH_ARM 1807 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler.cc ('k') | runtime/vm/flow_graph_compiler_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698