OLD | NEW |
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. | 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. |
6 #if defined(TARGET_ARCH_ARM64) | 6 #if defined(TARGET_ARCH_ARM64) |
7 | 7 |
8 #include "vm/flow_graph_compiler.h" | 8 #include "vm/flow_graph_compiler.h" |
9 | 9 |
10 #include "vm/ast_printer.h" | 10 #include "vm/ast_printer.h" |
11 #include "vm/compiler.h" | 11 #include "vm/compiler.h" |
12 #include "vm/cpu.h" | 12 #include "vm/cpu.h" |
13 #include "vm/dart_entry.h" | 13 #include "vm/dart_entry.h" |
14 #include "vm/deopt_instructions.h" | 14 #include "vm/deopt_instructions.h" |
15 #include "vm/il_printer.h" | 15 #include "vm/il_printer.h" |
16 #include "vm/instructions.h" | 16 #include "vm/instructions.h" |
17 #include "vm/locations.h" | 17 #include "vm/locations.h" |
18 #include "vm/object_store.h" | 18 #include "vm/object_store.h" |
19 #include "vm/parser.h" | 19 #include "vm/parser.h" |
20 #include "vm/stack_frame.h" | 20 #include "vm/stack_frame.h" |
21 #include "vm/stub_code.h" | 21 #include "vm/stub_code.h" |
22 #include "vm/symbols.h" | 22 #include "vm/symbols.h" |
23 | 23 |
24 namespace dart { | 24 namespace dart { |
25 | 25 |
26 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); | 26 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); |
27 DECLARE_FLAG(bool, enable_simd_inline); | 27 DECLARE_FLAG(bool, enable_simd_inline); |
28 | 28 |
29 | |
30 FlowGraphCompiler::~FlowGraphCompiler() { | 29 FlowGraphCompiler::~FlowGraphCompiler() { |
31 // BlockInfos are zone-allocated, so their destructors are not called. | 30 // BlockInfos are zone-allocated, so their destructors are not called. |
32 // Verify the labels explicitly here. | 31 // Verify the labels explicitly here. |
33 for (int i = 0; i < block_info_.length(); ++i) { | 32 for (int i = 0; i < block_info_.length(); ++i) { |
34 ASSERT(!block_info_[i]->jump_label()->IsLinked()); | 33 ASSERT(!block_info_[i]->jump_label()->IsLinked()); |
35 } | 34 } |
36 } | 35 } |
37 | 36 |
38 | |
39 bool FlowGraphCompiler::SupportsUnboxedDoubles() { | 37 bool FlowGraphCompiler::SupportsUnboxedDoubles() { |
40 return true; | 38 return true; |
41 } | 39 } |
42 | 40 |
43 | |
44 bool FlowGraphCompiler::SupportsUnboxedMints() { | 41 bool FlowGraphCompiler::SupportsUnboxedMints() { |
45 return false; | 42 return false; |
46 } | 43 } |
47 | 44 |
48 | |
49 bool FlowGraphCompiler::SupportsUnboxedSimd128() { | 45 bool FlowGraphCompiler::SupportsUnboxedSimd128() { |
50 return FLAG_enable_simd_inline; | 46 return FLAG_enable_simd_inline; |
51 } | 47 } |
52 | 48 |
53 | |
54 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { | 49 bool FlowGraphCompiler::CanConvertUnboxedMintToDouble() { |
55 // ARM does not have a short instruction sequence for converting int64 to | 50 // ARM does not have a short instruction sequence for converting int64 to |
56 // double. | 51 // double. |
57 return false; | 52 return false; |
58 } | 53 } |
59 | 54 |
60 | |
61 bool FlowGraphCompiler::SupportsHardwareDivision() { | 55 bool FlowGraphCompiler::SupportsHardwareDivision() { |
62 return true; | 56 return true; |
63 } | 57 } |
64 | 58 |
65 | |
66 void FlowGraphCompiler::EnterIntrinsicMode() { | 59 void FlowGraphCompiler::EnterIntrinsicMode() { |
67 ASSERT(!intrinsic_mode()); | 60 ASSERT(!intrinsic_mode()); |
68 intrinsic_mode_ = true; | 61 intrinsic_mode_ = true; |
69 ASSERT(!assembler()->constant_pool_allowed()); | 62 ASSERT(!assembler()->constant_pool_allowed()); |
70 } | 63 } |
71 | 64 |
72 | |
73 void FlowGraphCompiler::ExitIntrinsicMode() { | 65 void FlowGraphCompiler::ExitIntrinsicMode() { |
74 ASSERT(intrinsic_mode()); | 66 ASSERT(intrinsic_mode()); |
75 intrinsic_mode_ = false; | 67 intrinsic_mode_ = false; |
76 } | 68 } |
77 | 69 |
78 | |
79 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, | 70 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, |
80 DeoptInfoBuilder* builder, | 71 DeoptInfoBuilder* builder, |
81 const Array& deopt_table) { | 72 const Array& deopt_table) { |
82 if (deopt_env_ == NULL) { | 73 if (deopt_env_ == NULL) { |
83 ++builder->current_info_number_; | 74 ++builder->current_info_number_; |
84 return TypedData::null(); | 75 return TypedData::null(); |
85 } | 76 } |
86 | 77 |
87 intptr_t stack_height = compiler->StackSize(); | 78 intptr_t stack_height = compiler->StackSize(); |
88 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); | 79 AllocateIncomingParametersRecursive(deopt_env_, &stack_height); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
155 builder->AddCallerPc(slot_ix++); | 146 builder->AddCallerPc(slot_ix++); |
156 | 147 |
157 // For the outermost environment, set the incoming arguments. | 148 // For the outermost environment, set the incoming arguments. |
158 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { | 149 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) { |
159 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); | 150 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++); |
160 } | 151 } |
161 | 152 |
162 return builder->CreateDeoptInfo(deopt_table); | 153 return builder->CreateDeoptInfo(deopt_table); |
163 } | 154 } |
164 | 155 |
165 | |
166 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, | 156 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, |
167 intptr_t stub_ix) { | 157 intptr_t stub_ix) { |
168 // Calls do not need stubs, they share a deoptimization trampoline. | 158 // Calls do not need stubs, they share a deoptimization trampoline. |
169 ASSERT(reason() != ICData::kDeoptAtCall); | 159 ASSERT(reason() != ICData::kDeoptAtCall); |
170 Assembler* assembler = compiler->assembler(); | 160 Assembler* assembler = compiler->assembler(); |
171 #define __ assembler-> | 161 #define __ assembler-> |
172 __ Comment("%s", Name()); | 162 __ Comment("%s", Name()); |
173 __ Bind(entry_label()); | 163 __ Bind(entry_label()); |
174 if (FLAG_trap_on_deoptimization) { | 164 if (FLAG_trap_on_deoptimization) { |
175 __ brk(0); | 165 __ brk(0); |
176 } | 166 } |
177 | 167 |
178 ASSERT(deopt_env() != NULL); | 168 ASSERT(deopt_env() != NULL); |
179 __ Push(CODE_REG); | 169 __ Push(CODE_REG); |
180 __ BranchLink(*StubCode::Deoptimize_entry()); | 170 __ BranchLink(*StubCode::Deoptimize_entry()); |
181 set_pc_offset(assembler->CodeSize()); | 171 set_pc_offset(assembler->CodeSize()); |
182 #undef __ | 172 #undef __ |
183 } | 173 } |
184 | 174 |
185 | |
186 #define __ assembler()-> | 175 #define __ assembler()-> |
187 | 176 |
188 | |
189 // Fall through if bool_register contains null. | 177 // Fall through if bool_register contains null. |
190 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, | 178 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, |
191 Label* is_true, | 179 Label* is_true, |
192 Label* is_false) { | 180 Label* is_false) { |
193 Label fall_through; | 181 Label fall_through; |
194 __ CompareObject(bool_register, Object::null_object()); | 182 __ CompareObject(bool_register, Object::null_object()); |
195 __ b(&fall_through, EQ); | 183 __ b(&fall_through, EQ); |
196 __ CompareObject(bool_register, Bool::True()); | 184 __ CompareObject(bool_register, Bool::True()); |
197 __ b(is_true, EQ); | 185 __ b(is_true, EQ); |
198 __ b(is_false); | 186 __ b(is_false); |
199 __ Bind(&fall_through); | 187 __ Bind(&fall_through); |
200 } | 188 } |
201 | 189 |
202 | |
203 // R0: instance (must be preserved). | 190 // R0: instance (must be preserved). |
204 // R1: instantiator type arguments (if used). | 191 // R1: instantiator type arguments (if used). |
205 // R2: function type arguments (if used). | 192 // R2: function type arguments (if used). |
206 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( | 193 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( |
207 TypeTestStubKind test_kind, | 194 TypeTestStubKind test_kind, |
208 Register instance_reg, | 195 Register instance_reg, |
209 Register instantiator_type_arguments_reg, | 196 Register instantiator_type_arguments_reg, |
210 Register function_type_arguments_reg, | 197 Register function_type_arguments_reg, |
211 Register temp_reg, | 198 Register temp_reg, |
212 Label* is_instance_lbl, | 199 Label* is_instance_lbl, |
(...skipping 16 matching lines...) Expand all Loading... |
229 ASSERT(function_type_arguments_reg == R2); | 216 ASSERT(function_type_arguments_reg == R2); |
230 __ BranchLink(*StubCode::Subtype4TestCache_entry()); | 217 __ BranchLink(*StubCode::Subtype4TestCache_entry()); |
231 } else { | 218 } else { |
232 UNREACHABLE(); | 219 UNREACHABLE(); |
233 } | 220 } |
234 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. | 221 // Result is in R1: null -> not found, otherwise Bool::True or Bool::False. |
235 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); | 222 GenerateBoolToJump(R1, is_instance_lbl, is_not_instance_lbl); |
236 return type_test_cache.raw(); | 223 return type_test_cache.raw(); |
237 } | 224 } |
238 | 225 |
239 | |
240 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if | 226 // Jumps to labels 'is_instance' or 'is_not_instance' respectively, if |
241 // type test is conclusive, otherwise fallthrough if a type test could not | 227 // type test is conclusive, otherwise fallthrough if a type test could not |
242 // be completed. | 228 // be completed. |
243 // R0: instance being type checked (preserved). | 229 // R0: instance being type checked (preserved). |
244 // Clobbers R1, R2. | 230 // Clobbers R1, R2. |
245 RawSubtypeTestCache* | 231 RawSubtypeTestCache* |
246 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( | 232 FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest( |
247 TokenPosition token_pos, | 233 TokenPosition token_pos, |
248 const AbstractType& type, | 234 const AbstractType& type, |
249 Label* is_instance_lbl, | 235 Label* is_instance_lbl, |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
308 const Register kInstantiatorTypeArgumentsReg = kNoRegister; | 294 const Register kInstantiatorTypeArgumentsReg = kNoRegister; |
309 const Register kFunctionTypeArgumentsReg = kNoRegister; | 295 const Register kFunctionTypeArgumentsReg = kNoRegister; |
310 const Register kTempReg = kNoRegister; | 296 const Register kTempReg = kNoRegister; |
311 // R0: instance (must be preserved). | 297 // R0: instance (must be preserved). |
312 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, | 298 return GenerateCallSubtypeTestStub(kTestTypeTwoArgs, kInstanceReg, |
313 kInstantiatorTypeArgumentsReg, | 299 kInstantiatorTypeArgumentsReg, |
314 kFunctionTypeArgumentsReg, kTempReg, | 300 kFunctionTypeArgumentsReg, kTempReg, |
315 is_instance_lbl, is_not_instance_lbl); | 301 is_instance_lbl, is_not_instance_lbl); |
316 } | 302 } |
317 | 303 |
318 | |
319 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, | 304 void FlowGraphCompiler::CheckClassIds(Register class_id_reg, |
320 const GrowableArray<intptr_t>& class_ids, | 305 const GrowableArray<intptr_t>& class_ids, |
321 Label* is_equal_lbl, | 306 Label* is_equal_lbl, |
322 Label* is_not_equal_lbl) { | 307 Label* is_not_equal_lbl) { |
323 for (intptr_t i = 0; i < class_ids.length(); i++) { | 308 for (intptr_t i = 0; i < class_ids.length(); i++) { |
324 __ CompareImmediate(class_id_reg, class_ids[i]); | 309 __ CompareImmediate(class_id_reg, class_ids[i]); |
325 __ b(is_equal_lbl, EQ); | 310 __ b(is_equal_lbl, EQ); |
326 } | 311 } |
327 __ b(is_not_equal_lbl); | 312 __ b(is_not_equal_lbl); |
328 } | 313 } |
329 | 314 |
330 | |
331 // Testing against an instantiated type with no arguments, without | 315 // Testing against an instantiated type with no arguments, without |
332 // SubtypeTestCache. | 316 // SubtypeTestCache. |
333 // R0: instance being type checked (preserved). | 317 // R0: instance being type checked (preserved). |
334 // Clobbers R2, R3. | 318 // Clobbers R2, R3. |
335 // Returns true if there is a fallthrough. | 319 // Returns true if there is a fallthrough. |
336 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( | 320 bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest( |
337 TokenPosition token_pos, | 321 TokenPosition token_pos, |
338 const AbstractType& type, | 322 const AbstractType& type, |
339 Label* is_instance_lbl, | 323 Label* is_instance_lbl, |
340 Label* is_not_instance_lbl) { | 324 Label* is_not_instance_lbl) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 } | 372 } |
389 // Compare if the classes are equal. | 373 // Compare if the classes are equal. |
390 if (!type_class.is_abstract()) { | 374 if (!type_class.is_abstract()) { |
391 __ CompareImmediate(kClassIdReg, type_class.id()); | 375 __ CompareImmediate(kClassIdReg, type_class.id()); |
392 __ b(is_instance_lbl, EQ); | 376 __ b(is_instance_lbl, EQ); |
393 } | 377 } |
394 // Otherwise fallthrough. | 378 // Otherwise fallthrough. |
395 return true; | 379 return true; |
396 } | 380 } |
397 | 381 |
398 | |
399 // Uses SubtypeTestCache to store instance class and result. | 382 // Uses SubtypeTestCache to store instance class and result. |
400 // R0: instance to test. | 383 // R0: instance to test. |
401 // Clobbers R1-R5. | 384 // Clobbers R1-R5. |
402 // Immediate class test already done. | 385 // Immediate class test already done. |
403 // TODO(srdjan): Implement a quicker subtype check, as type test | 386 // TODO(srdjan): Implement a quicker subtype check, as type test |
404 // arrays can grow too high, but they may be useful when optimizing | 387 // arrays can grow too high, but they may be useful when optimizing |
405 // code (type-feedback). | 388 // code (type-feedback). |
406 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( | 389 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( |
407 TokenPosition token_pos, | 390 TokenPosition token_pos, |
408 const Class& type_class, | 391 const Class& type_class, |
(...skipping 11 matching lines...) Expand all Loading... |
420 | 403 |
421 const Register kInstantiatorTypeArgumentsReg = kNoRegister; | 404 const Register kInstantiatorTypeArgumentsReg = kNoRegister; |
422 const Register kFunctionTypeArgumentsReg = kNoRegister; | 405 const Register kFunctionTypeArgumentsReg = kNoRegister; |
423 const Register kTempReg = kNoRegister; | 406 const Register kTempReg = kNoRegister; |
424 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, | 407 return GenerateCallSubtypeTestStub(kTestTypeOneArg, kInstanceReg, |
425 kInstantiatorTypeArgumentsReg, | 408 kInstantiatorTypeArgumentsReg, |
426 kFunctionTypeArgumentsReg, kTempReg, | 409 kFunctionTypeArgumentsReg, kTempReg, |
427 is_instance_lbl, is_not_instance_lbl); | 410 is_instance_lbl, is_not_instance_lbl); |
428 } | 411 } |
429 | 412 |
430 | |
431 // Generates inlined check if 'type' is a type parameter or type itself | 413 // Generates inlined check if 'type' is a type parameter or type itself |
432 // R0: instance (preserved). | 414 // R0: instance (preserved). |
433 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( | 415 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( |
434 TokenPosition token_pos, | 416 TokenPosition token_pos, |
435 const AbstractType& type, | 417 const AbstractType& type, |
436 Label* is_instance_lbl, | 418 Label* is_instance_lbl, |
437 Label* is_not_instance_lbl) { | 419 Label* is_not_instance_lbl) { |
438 __ Comment("UninstantiatedTypeTest"); | 420 __ Comment("UninstantiatedTypeTest"); |
439 ASSERT(!type.IsInstantiated()); | 421 ASSERT(!type.IsInstantiated()); |
440 // Skip check if destination is a dynamic type. | 422 // Skip check if destination is a dynamic type. |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
499 // arguments are determined at runtime by the instantiator. | 481 // arguments are determined at runtime by the instantiator. |
500 const Register kTempReg = kNoRegister; | 482 const Register kTempReg = kNoRegister; |
501 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg, | 483 return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg, |
502 kInstantiatorTypeArgumentsReg, | 484 kInstantiatorTypeArgumentsReg, |
503 kFunctionTypeArgumentsReg, kTempReg, | 485 kFunctionTypeArgumentsReg, kTempReg, |
504 is_instance_lbl, is_not_instance_lbl); | 486 is_instance_lbl, is_not_instance_lbl); |
505 } | 487 } |
506 return SubtypeTestCache::null(); | 488 return SubtypeTestCache::null(); |
507 } | 489 } |
508 | 490 |
509 | |
510 // Inputs: | 491 // Inputs: |
511 // - R0: instance being type checked (preserved). | 492 // - R0: instance being type checked (preserved). |
512 // - R1: optional instantiator type arguments (preserved). | 493 // - R1: optional instantiator type arguments (preserved). |
513 // - R2: optional function type arguments (preserved). | 494 // - R2: optional function type arguments (preserved). |
514 // Clobbers R3, R4, R8, R9. | 495 // Clobbers R3, R4, R8, R9. |
515 // Returns: | 496 // Returns: |
516 // - preserved instance in R0, optional instantiator type arguments in R1, and | 497 // - preserved instance in R0, optional instantiator type arguments in R1, and |
517 // optional function type arguments in R2. | 498 // optional function type arguments in R2. |
518 // Note that this inlined code must be followed by the runtime_call code, as it | 499 // Note that this inlined code must be followed by the runtime_call code, as it |
519 // may fall through to it. Otherwise, this inline code will jump to the label | 500 // may fall through to it. Otherwise, this inline code will jump to the label |
(...skipping 22 matching lines...) Expand all Loading... |
542 return GenerateSubtype1TestCacheLookup( | 523 return GenerateSubtype1TestCacheLookup( |
543 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); | 524 token_pos, type_class, is_instance_lbl, is_not_instance_lbl); |
544 } else { | 525 } else { |
545 return SubtypeTestCache::null(); | 526 return SubtypeTestCache::null(); |
546 } | 527 } |
547 } | 528 } |
548 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, | 529 return GenerateUninstantiatedTypeTest(token_pos, type, is_instance_lbl, |
549 is_not_instance_lbl); | 530 is_not_instance_lbl); |
550 } | 531 } |
551 | 532 |
552 | |
553 // If instanceof type test cannot be performed successfully at compile time and | 533 // If instanceof type test cannot be performed successfully at compile time and |
554 // therefore eliminated, optimize it by adding inlined tests for: | 534 // therefore eliminated, optimize it by adding inlined tests for: |
555 // - NULL -> return type == Null (type is not Object or dynamic). | 535 // - NULL -> return type == Null (type is not Object or dynamic). |
556 // - Smi -> compile time subtype check (only if dst class is not parameterized). | 536 // - Smi -> compile time subtype check (only if dst class is not parameterized). |
557 // - Class equality (only if class is not parameterized). | 537 // - Class equality (only if class is not parameterized). |
558 // Inputs: | 538 // Inputs: |
559 // - R0: object. | 539 // - R0: object. |
560 // - R1: instantiator type arguments or raw_null. | 540 // - R1: instantiator type arguments or raw_null. |
561 // - R2: function type arguments or raw_null. | 541 // - R2: function type arguments or raw_null. |
562 // Returns: | 542 // Returns: |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
615 __ LoadObject(R0, Bool::Get(false)); | 595 __ LoadObject(R0, Bool::Get(false)); |
616 __ b(&done); | 596 __ b(&done); |
617 | 597 |
618 __ Bind(&is_instance); | 598 __ Bind(&is_instance); |
619 __ LoadObject(R0, Bool::Get(true)); | 599 __ LoadObject(R0, Bool::Get(true)); |
620 __ Bind(&done); | 600 __ Bind(&done); |
621 // Remove instantiator type arguments and function type arguments. | 601 // Remove instantiator type arguments and function type arguments. |
622 __ Drop(2); | 602 __ Drop(2); |
623 } | 603 } |
624 | 604 |
625 | |
626 // Optimize assignable type check by adding inlined tests for: | 605 // Optimize assignable type check by adding inlined tests for: |
627 // - NULL -> return NULL. | 606 // - NULL -> return NULL. |
628 // - Smi -> compile time subtype check (only if dst class is not parameterized). | 607 // - Smi -> compile time subtype check (only if dst class is not parameterized). |
629 // - Class equality (only if class is not parameterized). | 608 // - Class equality (only if class is not parameterized). |
630 // Inputs: | 609 // Inputs: |
631 // - R0: instance being type checked. | 610 // - R0: instance being type checked. |
632 // - R1: instantiator type arguments or raw_null. | 611 // - R1: instantiator type arguments or raw_null. |
633 // - R2: function type arguments or raw_null. | 612 // - R2: function type arguments or raw_null. |
634 // Returns: | 613 // Returns: |
635 // - object in R0 for successful assignable check (or throws TypeError). | 614 // - object in R0 for successful assignable check (or throws TypeError). |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
689 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); | 668 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); |
690 // Pop the parameters supplied to the runtime entry. The result of the | 669 // Pop the parameters supplied to the runtime entry. The result of the |
691 // type check runtime call is the checked value. | 670 // type check runtime call is the checked value. |
692 __ Drop(6); | 671 __ Drop(6); |
693 __ Pop(R0); | 672 __ Pop(R0); |
694 | 673 |
695 __ Bind(&is_assignable); | 674 __ Bind(&is_assignable); |
696 __ PopPair(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg); | 675 __ PopPair(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg); |
697 } | 676 } |
698 | 677 |
699 | |
700 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { | 678 void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) { |
701 if (is_optimizing()) { | 679 if (is_optimizing()) { |
702 return; | 680 return; |
703 } | 681 } |
704 Definition* defn = instr->AsDefinition(); | 682 Definition* defn = instr->AsDefinition(); |
705 if ((defn != NULL) && defn->HasTemp()) { | 683 if ((defn != NULL) && defn->HasTemp()) { |
706 __ Push(defn->locs()->out(0).reg()); | 684 __ Push(defn->locs()->out(0).reg()); |
707 } | 685 } |
708 } | 686 } |
709 | 687 |
710 | |
711 // Input parameters: | 688 // Input parameters: |
712 // R4: arguments descriptor array. | 689 // R4: arguments descriptor array. |
713 void FlowGraphCompiler::CopyParameters() { | 690 void FlowGraphCompiler::CopyParameters() { |
714 __ Comment("Copy parameters"); | 691 __ Comment("Copy parameters"); |
715 const Function& function = parsed_function().function(); | 692 const Function& function = parsed_function().function(); |
716 LocalScope* scope = parsed_function().node_sequence()->scope(); | 693 LocalScope* scope = parsed_function().node_sequence()->scope(); |
717 const int num_fixed_params = function.num_fixed_parameters(); | 694 const int num_fixed_params = function.num_fixed_parameters(); |
718 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); | 695 const int num_opt_pos_params = function.NumOptionalPositionalParameters(); |
719 const int num_opt_named_params = function.NumOptionalNamedParameters(); | 696 const int num_opt_named_params = function.NumOptionalNamedParameters(); |
720 const int num_params = | 697 const int num_params = |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
794 opt_param[i + 1] = parameter; | 771 opt_param[i + 1] = parameter; |
795 opt_param_position[i + 1] = pos; | 772 opt_param_position[i + 1] = pos; |
796 } | 773 } |
797 // Generate code handling each optional parameter in alphabetical order. | 774 // Generate code handling each optional parameter in alphabetical order. |
798 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset()); | 775 __ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset()); |
799 // Let R7 point to the first passed argument, i.e. to | 776 // Let R7 point to the first passed argument, i.e. to |
800 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. | 777 // fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi. |
801 __ add(R7, FP, Operand(R7, LSL, 2)); | 778 __ add(R7, FP, Operand(R7, LSL, 2)); |
802 __ AddImmediate(R7, kParamEndSlotFromFp * kWordSize); | 779 __ AddImmediate(R7, kParamEndSlotFromFp * kWordSize); |
803 // Let R6 point to the entry of the first named argument. | 780 // Let R6 point to the entry of the first named argument. |
804 __ add(R6, R4, Operand(ArgumentsDescriptor::first_named_entry_offset() - | 781 __ add(R6, R4, |
805 kHeapObjectTag)); | 782 Operand(ArgumentsDescriptor::first_named_entry_offset() - |
| 783 kHeapObjectTag)); |
806 for (int i = 0; i < num_opt_named_params; i++) { | 784 for (int i = 0; i < num_opt_named_params; i++) { |
807 Label load_default_value, assign_optional_parameter; | 785 Label load_default_value, assign_optional_parameter; |
808 const int param_pos = opt_param_position[i]; | 786 const int param_pos = opt_param_position[i]; |
809 // Check if this named parameter was passed in. | 787 // Check if this named parameter was passed in. |
810 // Load R5 with the name of the argument. | 788 // Load R5 with the name of the argument. |
811 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::name_offset()); | 789 __ LoadFromOffset(R5, R6, ArgumentsDescriptor::name_offset()); |
812 ASSERT(opt_param[i]->name().IsSymbol()); | 790 ASSERT(opt_param[i]->name().IsSymbol()); |
813 __ CompareObject(R5, opt_param[i]->name()); | 791 __ CompareObject(R5, opt_param[i]->name()); |
814 __ b(&load_default_value, NE); | 792 __ b(&load_default_value, NE); |
815 // Load R5 with passed-in argument at provided arg_pos, i.e. at | 793 // Load R5 with passed-in argument at provided arg_pos, i.e. at |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
902 __ LoadObject(TMP, Object::null_object()); | 880 __ LoadObject(TMP, Object::null_object()); |
903 Label null_args_loop, null_args_loop_condition; | 881 Label null_args_loop, null_args_loop_condition; |
904 __ b(&null_args_loop_condition); | 882 __ b(&null_args_loop_condition); |
905 __ Bind(&null_args_loop); | 883 __ Bind(&null_args_loop); |
906 __ str(TMP, original_argument_addr); | 884 __ str(TMP, original_argument_addr); |
907 __ Bind(&null_args_loop_condition); | 885 __ Bind(&null_args_loop_condition); |
908 __ subs(R8, R8, Operand(1)); | 886 __ subs(R8, R8, Operand(1)); |
909 __ b(&null_args_loop, PL); | 887 __ b(&null_args_loop, PL); |
910 } | 888 } |
911 | 889 |
912 | |
913 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { | 890 void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { |
914 // LR: return address. | 891 // LR: return address. |
915 // SP: receiver. | 892 // SP: receiver. |
916 // Sequence node has one return node, its input is load field node. | 893 // Sequence node has one return node, its input is load field node. |
917 __ Comment("Inlined Getter"); | 894 __ Comment("Inlined Getter"); |
918 __ LoadFromOffset(R0, SP, 0 * kWordSize); | 895 __ LoadFromOffset(R0, SP, 0 * kWordSize); |
919 __ LoadFieldFromOffset(R0, R0, offset); | 896 __ LoadFieldFromOffset(R0, R0, offset); |
920 __ ret(); | 897 __ ret(); |
921 } | 898 } |
922 | 899 |
923 | |
924 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { | 900 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { |
925 // LR: return address. | 901 // LR: return address. |
926 // SP+1: receiver. | 902 // SP+1: receiver. |
927 // SP+0: value. | 903 // SP+0: value. |
928 // Sequence node has one store node and one return NULL node. | 904 // Sequence node has one store node and one return NULL node. |
929 __ Comment("Inlined Setter"); | 905 __ Comment("Inlined Setter"); |
930 __ LoadFromOffset(R0, SP, 1 * kWordSize); // Receiver. | 906 __ LoadFromOffset(R0, SP, 1 * kWordSize); // Receiver. |
931 __ LoadFromOffset(R1, SP, 0 * kWordSize); // Value. | 907 __ LoadFromOffset(R1, SP, 0 * kWordSize); // Value. |
932 __ StoreIntoObjectOffset(R0, offset, R1); | 908 __ StoreIntoObjectOffset(R0, offset, R1); |
933 __ LoadObject(R0, Object::null_object()); | 909 __ LoadObject(R0, Object::null_object()); |
934 __ ret(); | 910 __ ret(); |
935 } | 911 } |
936 | 912 |
937 | |
938 void FlowGraphCompiler::EmitFrameEntry() { | 913 void FlowGraphCompiler::EmitFrameEntry() { |
939 const Function& function = parsed_function().function(); | 914 const Function& function = parsed_function().function(); |
940 Register new_pp = kNoRegister; | 915 Register new_pp = kNoRegister; |
941 if (CanOptimizeFunction() && function.IsOptimizable() && | 916 if (CanOptimizeFunction() && function.IsOptimizable() && |
942 (!is_optimizing() || may_reoptimize())) { | 917 (!is_optimizing() || may_reoptimize())) { |
943 __ Comment("Invocation Count Check"); | 918 __ Comment("Invocation Count Check"); |
944 const Register function_reg = R6; | 919 const Register function_reg = R6; |
945 new_pp = R13; | 920 new_pp = R13; |
946 // The pool pointer is not setup before entering the Dart frame. | 921 // The pool pointer is not setup before entering the Dart frame. |
947 // Temporarily setup pool pointer for this dart function. | 922 // Temporarily setup pool pointer for this dart function. |
(...skipping 23 matching lines...) Expand all Loading... |
971 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - | 946 intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - |
972 flow_graph().num_copied_params(); | 947 flow_graph().num_copied_params(); |
973 ASSERT(extra_slots >= 0); | 948 ASSERT(extra_slots >= 0); |
974 __ EnterOsrFrame(extra_slots * kWordSize, new_pp); | 949 __ EnterOsrFrame(extra_slots * kWordSize, new_pp); |
975 } else { | 950 } else { |
976 ASSERT(StackSize() >= 0); | 951 ASSERT(StackSize() >= 0); |
977 __ EnterDartFrame(StackSize() * kWordSize, new_pp); | 952 __ EnterDartFrame(StackSize() * kWordSize, new_pp); |
978 } | 953 } |
979 } | 954 } |
980 | 955 |
981 | |
982 // Input parameters: | 956 // Input parameters: |
983 // LR: return address. | 957 // LR: return address. |
984 // SP: address of last argument. | 958 // SP: address of last argument. |
985 // FP: caller's frame pointer. | 959 // FP: caller's frame pointer. |
986 // PP: caller's pool pointer. | 960 // PP: caller's pool pointer. |
987 // R5: ic-data. | 961 // R5: ic-data. |
988 // R4: arguments descriptor array. | 962 // R4: arguments descriptor array. |
989 void FlowGraphCompiler::CompileGraph() { | 963 void FlowGraphCompiler::CompileGraph() { |
990 InitCompiler(); | 964 InitCompiler(); |
991 const Function& function = parsed_function().function(); | 965 const Function& function = parsed_function().function(); |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 // checked during resolution. | 1079 // checked during resolution. |
1106 | 1080 |
1107 EndCodeSourceRange(TokenPosition::kDartCodePrologue); | 1081 EndCodeSourceRange(TokenPosition::kDartCodePrologue); |
1108 VisitBlocks(); | 1082 VisitBlocks(); |
1109 | 1083 |
1110 __ brk(0); | 1084 __ brk(0); |
1111 ASSERT(assembler()->constant_pool_allowed()); | 1085 ASSERT(assembler()->constant_pool_allowed()); |
1112 GenerateDeferredCode(); | 1086 GenerateDeferredCode(); |
1113 } | 1087 } |
1114 | 1088 |
1115 | |
1116 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, | 1089 void FlowGraphCompiler::GenerateCall(TokenPosition token_pos, |
1117 const StubEntry& stub_entry, | 1090 const StubEntry& stub_entry, |
1118 RawPcDescriptors::Kind kind, | 1091 RawPcDescriptors::Kind kind, |
1119 LocationSummary* locs) { | 1092 LocationSummary* locs) { |
1120 __ BranchLink(stub_entry); | 1093 __ BranchLink(stub_entry); |
1121 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); | 1094 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); |
1122 } | 1095 } |
1123 | 1096 |
1124 | |
1125 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, | 1097 void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos, |
1126 const StubEntry& stub_entry, | 1098 const StubEntry& stub_entry, |
1127 RawPcDescriptors::Kind kind, | 1099 RawPcDescriptors::Kind kind, |
1128 LocationSummary* locs) { | 1100 LocationSummary* locs) { |
1129 __ BranchLinkPatchable(stub_entry); | 1101 __ BranchLinkPatchable(stub_entry); |
1130 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); | 1102 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, kind, locs); |
1131 } | 1103 } |
1132 | 1104 |
1133 | |
1134 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, | 1105 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, |
1135 TokenPosition token_pos, | 1106 TokenPosition token_pos, |
1136 const StubEntry& stub_entry, | 1107 const StubEntry& stub_entry, |
1137 RawPcDescriptors::Kind kind, | 1108 RawPcDescriptors::Kind kind, |
1138 LocationSummary* locs) { | 1109 LocationSummary* locs) { |
1139 __ BranchLinkPatchable(stub_entry); | 1110 __ BranchLinkPatchable(stub_entry); |
1140 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); | 1111 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); |
1141 // Marks either the continuation point in unoptimized code or the | 1112 // Marks either the continuation point in unoptimized code or the |
1142 // deoptimization point in optimized code, after call. | 1113 // deoptimization point in optimized code, after call. |
1143 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | 1114 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
1144 if (is_optimizing()) { | 1115 if (is_optimizing()) { |
1145 AddDeoptIndexAtCall(deopt_id_after); | 1116 AddDeoptIndexAtCall(deopt_id_after); |
1146 } else { | 1117 } else { |
1147 // Add deoptimization continuation point after the call and before the | 1118 // Add deoptimization continuation point after the call and before the |
1148 // arguments are removed. | 1119 // arguments are removed. |
1149 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1120 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1150 } | 1121 } |
1151 } | 1122 } |
1152 | 1123 |
1153 | |
1154 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, | 1124 void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id, |
1155 TokenPosition token_pos, | 1125 TokenPosition token_pos, |
1156 const StubEntry& stub_entry, | 1126 const StubEntry& stub_entry, |
1157 RawPcDescriptors::Kind kind, | 1127 RawPcDescriptors::Kind kind, |
1158 LocationSummary* locs, | 1128 LocationSummary* locs, |
1159 const Function& target) { | 1129 const Function& target) { |
1160 // Call sites to the same target can share object pool entries. These | 1130 // Call sites to the same target can share object pool entries. These |
1161 // call sites are never patched for breakpoints: the function is deoptimized | 1131 // call sites are never patched for breakpoints: the function is deoptimized |
1162 // and the unoptimized code with IC calls for static calls is patched instead. | 1132 // and the unoptimized code with IC calls for static calls is patched instead. |
1163 ASSERT(is_optimizing()); | 1133 ASSERT(is_optimizing()); |
1164 __ BranchLinkWithEquivalence(stub_entry, target); | 1134 __ BranchLinkWithEquivalence(stub_entry, target); |
1165 | 1135 |
1166 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); | 1136 EmitCallsiteMetaData(token_pos, deopt_id, kind, locs); |
1167 // Marks either the continuation point in unoptimized code or the | 1137 // Marks either the continuation point in unoptimized code or the |
1168 // deoptimization point in optimized code, after call. | 1138 // deoptimization point in optimized code, after call. |
1169 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | 1139 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
1170 if (is_optimizing()) { | 1140 if (is_optimizing()) { |
1171 AddDeoptIndexAtCall(deopt_id_after); | 1141 AddDeoptIndexAtCall(deopt_id_after); |
1172 } else { | 1142 } else { |
1173 // Add deoptimization continuation point after the call and before the | 1143 // Add deoptimization continuation point after the call and before the |
1174 // arguments are removed. | 1144 // arguments are removed. |
1175 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1145 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1176 } | 1146 } |
1177 AddStaticCallTarget(target); | 1147 AddStaticCallTarget(target); |
1178 } | 1148 } |
1179 | 1149 |
1180 | |
1181 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, | 1150 void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos, |
1182 intptr_t deopt_id, | 1151 intptr_t deopt_id, |
1183 const RuntimeEntry& entry, | 1152 const RuntimeEntry& entry, |
1184 intptr_t argument_count, | 1153 intptr_t argument_count, |
1185 LocationSummary* locs) { | 1154 LocationSummary* locs) { |
1186 __ CallRuntime(entry, argument_count); | 1155 __ CallRuntime(entry, argument_count); |
1187 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); | 1156 EmitCallsiteMetaData(token_pos, deopt_id, RawPcDescriptors::kOther, locs); |
1188 if (deopt_id != Thread::kNoDeoptId) { | 1157 if (deopt_id != Thread::kNoDeoptId) { |
1189 // Marks either the continuation point in unoptimized code or the | 1158 // Marks either the continuation point in unoptimized code or the |
1190 // deoptimization point in optimized code, after call. | 1159 // deoptimization point in optimized code, after call. |
1191 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | 1160 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
1192 if (is_optimizing()) { | 1161 if (is_optimizing()) { |
1193 AddDeoptIndexAtCall(deopt_id_after); | 1162 AddDeoptIndexAtCall(deopt_id_after); |
1194 } else { | 1163 } else { |
1195 // Add deoptimization continuation point after the call and before the | 1164 // Add deoptimization continuation point after the call and before the |
1196 // arguments are removed. | 1165 // arguments are removed. |
1197 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1166 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1198 } | 1167 } |
1199 } | 1168 } |
1200 } | 1169 } |
1201 | 1170 |
1202 | |
1203 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { | 1171 void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) { |
1204 // We do not check for overflow when incrementing the edge counter. The | 1172 // We do not check for overflow when incrementing the edge counter. The |
1205 // function should normally be optimized long before the counter can | 1173 // function should normally be optimized long before the counter can |
1206 // overflow; and though we do not reset the counters when we optimize or | 1174 // overflow; and though we do not reset the counters when we optimize or |
1207 // deoptimize, there is a bound on the number of | 1175 // deoptimize, there is a bound on the number of |
1208 // optimization/deoptimization cycles we will attempt. | 1176 // optimization/deoptimization cycles we will attempt. |
1209 ASSERT(!edge_counters_array_.IsNull()); | 1177 ASSERT(!edge_counters_array_.IsNull()); |
1210 ASSERT(assembler_->constant_pool_allowed()); | 1178 ASSERT(assembler_->constant_pool_allowed()); |
1211 __ Comment("Edge counter"); | 1179 __ Comment("Edge counter"); |
1212 __ LoadObject(R0, edge_counters_array_); | 1180 __ LoadObject(R0, edge_counters_array_); |
1213 __ LoadFieldFromOffset(TMP, R0, Array::element_offset(edge_id)); | 1181 __ LoadFieldFromOffset(TMP, R0, Array::element_offset(edge_id)); |
1214 __ add(TMP, TMP, Operand(Smi::RawValue(1))); | 1182 __ add(TMP, TMP, Operand(Smi::RawValue(1))); |
1215 __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id)); | 1183 __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id)); |
1216 } | 1184 } |
1217 | 1185 |
1218 | |
1219 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, | 1186 void FlowGraphCompiler::EmitOptimizedInstanceCall(const StubEntry& stub_entry, |
1220 const ICData& ic_data, | 1187 const ICData& ic_data, |
1221 intptr_t argument_count, | 1188 intptr_t argument_count, |
1222 intptr_t deopt_id, | 1189 intptr_t deopt_id, |
1223 TokenPosition token_pos, | 1190 TokenPosition token_pos, |
1224 LocationSummary* locs) { | 1191 LocationSummary* locs) { |
1225 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | 1192 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); |
1226 // Each ICData propagated from unoptimized to optimized code contains the | 1193 // Each ICData propagated from unoptimized to optimized code contains the |
1227 // function that corresponds to the Dart function of that IC call. Due | 1194 // function that corresponds to the Dart function of that IC call. Due |
1228 // to inlining in optimized code, that function may not correspond to the | 1195 // to inlining in optimized code, that function may not correspond to the |
1229 // top-level function (parsed_function().function()) which could be | 1196 // top-level function (parsed_function().function()) which could be |
1230 // reoptimized and which counter needs to be incremented. | 1197 // reoptimized and which counter needs to be incremented. |
1231 // Pass the function explicitly, it is used in IC stub. | 1198 // Pass the function explicitly, it is used in IC stub. |
1232 | 1199 |
1233 __ LoadObject(R6, parsed_function().function()); | 1200 __ LoadObject(R6, parsed_function().function()); |
1234 __ LoadUniqueObject(R5, ic_data); | 1201 __ LoadUniqueObject(R5, ic_data); |
1235 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, | 1202 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, |
1236 locs); | 1203 locs); |
1237 __ Drop(argument_count); | 1204 __ Drop(argument_count); |
1238 } | 1205 } |
1239 | 1206 |
1240 | |
1241 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, | 1207 void FlowGraphCompiler::EmitInstanceCall(const StubEntry& stub_entry, |
1242 const ICData& ic_data, | 1208 const ICData& ic_data, |
1243 intptr_t argument_count, | 1209 intptr_t argument_count, |
1244 intptr_t deopt_id, | 1210 intptr_t deopt_id, |
1245 TokenPosition token_pos, | 1211 TokenPosition token_pos, |
1246 LocationSummary* locs) { | 1212 LocationSummary* locs) { |
1247 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); | 1213 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0); |
1248 __ LoadUniqueObject(R5, ic_data); | 1214 __ LoadUniqueObject(R5, ic_data); |
1249 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, | 1215 GenerateDartCall(deopt_id, token_pos, stub_entry, RawPcDescriptors::kIcCall, |
1250 locs); | 1216 locs); |
1251 __ Drop(argument_count); | 1217 __ Drop(argument_count); |
1252 } | 1218 } |
1253 | 1219 |
1254 | |
1255 void FlowGraphCompiler::EmitMegamorphicInstanceCall( | 1220 void FlowGraphCompiler::EmitMegamorphicInstanceCall( |
1256 const String& name, | 1221 const String& name, |
1257 const Array& arguments_descriptor, | 1222 const Array& arguments_descriptor, |
1258 intptr_t argument_count, | 1223 intptr_t argument_count, |
1259 intptr_t deopt_id, | 1224 intptr_t deopt_id, |
1260 TokenPosition token_pos, | 1225 TokenPosition token_pos, |
1261 LocationSummary* locs, | 1226 LocationSummary* locs, |
1262 intptr_t try_index, | 1227 intptr_t try_index, |
1263 intptr_t slow_path_argument_count) { | 1228 intptr_t slow_path_argument_count) { |
1264 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); | 1229 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); |
(...skipping 27 matching lines...) Expand all Loading... |
1292 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, | 1257 AddCurrentDescriptor(RawPcDescriptors::kOther, Thread::kNoDeoptId, |
1293 token_pos); | 1258 token_pos); |
1294 // Add deoptimization continuation point after the call and before the | 1259 // Add deoptimization continuation point after the call and before the |
1295 // arguments are removed. | 1260 // arguments are removed. |
1296 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1261 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1297 } | 1262 } |
1298 EmitCatchEntryState(pending_deoptimization_env_, try_index); | 1263 EmitCatchEntryState(pending_deoptimization_env_, try_index); |
1299 __ Drop(argument_count); | 1264 __ Drop(argument_count); |
1300 } | 1265 } |
1301 | 1266 |
1302 | |
1303 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, | 1267 void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data, |
1304 intptr_t argument_count, | 1268 intptr_t argument_count, |
1305 intptr_t deopt_id, | 1269 intptr_t deopt_id, |
1306 TokenPosition token_pos, | 1270 TokenPosition token_pos, |
1307 LocationSummary* locs) { | 1271 LocationSummary* locs) { |
1308 ASSERT(ic_data.NumArgsTested() == 1); | 1272 ASSERT(ic_data.NumArgsTested() == 1); |
1309 const Code& initial_stub = | 1273 const Code& initial_stub = |
1310 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); | 1274 Code::ZoneHandle(StubCode::ICCallThroughFunction_entry()->code()); |
1311 __ Comment("SwitchableCall"); | 1275 __ Comment("SwitchableCall"); |
1312 | 1276 |
1313 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize); | 1277 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize); |
1314 __ LoadUniqueObject(CODE_REG, initial_stub); | 1278 __ LoadUniqueObject(CODE_REG, initial_stub); |
1315 __ ldr(TMP, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | 1279 __ ldr(TMP, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); |
1316 __ LoadUniqueObject(R5, ic_data); | 1280 __ LoadUniqueObject(R5, ic_data); |
1317 __ blr(TMP); | 1281 __ blr(TMP); |
1318 | 1282 |
1319 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, RawPcDescriptors::kOther, | 1283 EmitCallsiteMetaData(token_pos, Thread::kNoDeoptId, RawPcDescriptors::kOther, |
1320 locs); | 1284 locs); |
1321 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); | 1285 const intptr_t deopt_id_after = Thread::ToDeoptAfter(deopt_id); |
1322 if (is_optimizing()) { | 1286 if (is_optimizing()) { |
1323 AddDeoptIndexAtCall(deopt_id_after); | 1287 AddDeoptIndexAtCall(deopt_id_after); |
1324 } else { | 1288 } else { |
1325 // Add deoptimization continuation point after the call and before the | 1289 // Add deoptimization continuation point after the call and before the |
1326 // arguments are removed. | 1290 // arguments are removed. |
1327 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); | 1291 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); |
1328 } | 1292 } |
1329 __ Drop(argument_count); | 1293 __ Drop(argument_count); |
1330 } | 1294 } |
1331 | 1295 |
1332 | |
1333 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, | 1296 void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t argument_count, |
1334 intptr_t deopt_id, | 1297 intptr_t deopt_id, |
1335 TokenPosition token_pos, | 1298 TokenPosition token_pos, |
1336 LocationSummary* locs, | 1299 LocationSummary* locs, |
1337 const ICData& ic_data) { | 1300 const ICData& ic_data) { |
1338 const StubEntry* stub_entry = | 1301 const StubEntry* stub_entry = |
1339 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); | 1302 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested()); |
1340 __ LoadObject(R5, ic_data); | 1303 __ LoadObject(R5, ic_data); |
1341 GenerateDartCall(deopt_id, token_pos, *stub_entry, | 1304 GenerateDartCall(deopt_id, token_pos, *stub_entry, |
1342 RawPcDescriptors::kUnoptStaticCall, locs); | 1305 RawPcDescriptors::kUnoptStaticCall, locs); |
1343 __ Drop(argument_count); | 1306 __ Drop(argument_count); |
1344 } | 1307 } |
1345 | 1308 |
1346 | |
1347 void FlowGraphCompiler::EmitOptimizedStaticCall( | 1309 void FlowGraphCompiler::EmitOptimizedStaticCall( |
1348 const Function& function, | 1310 const Function& function, |
1349 const Array& arguments_descriptor, | 1311 const Array& arguments_descriptor, |
1350 intptr_t argument_count, | 1312 intptr_t argument_count, |
1351 intptr_t deopt_id, | 1313 intptr_t deopt_id, |
1352 TokenPosition token_pos, | 1314 TokenPosition token_pos, |
1353 LocationSummary* locs) { | 1315 LocationSummary* locs) { |
1354 ASSERT(!function.IsClosureFunction()); | 1316 ASSERT(!function.IsClosureFunction()); |
1355 if (function.HasOptionalParameters() || | 1317 if (function.HasOptionalParameters() || |
1356 (FLAG_reify_generic_functions && function.IsGeneric())) { | 1318 (FLAG_reify_generic_functions && function.IsGeneric())) { |
1357 __ LoadObject(R4, arguments_descriptor); | 1319 __ LoadObject(R4, arguments_descriptor); |
1358 } else { | 1320 } else { |
1359 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub. | 1321 __ LoadImmediate(R4, 0); // GC safe smi zero because of stub. |
1360 } | 1322 } |
1361 // Do not use the code from the function, but let the code be patched so that | 1323 // Do not use the code from the function, but let the code be patched so that |
1362 // we can record the outgoing edges to other code. | 1324 // we can record the outgoing edges to other code. |
1363 GenerateStaticDartCall(deopt_id, token_pos, | 1325 GenerateStaticDartCall(deopt_id, token_pos, |
1364 *StubCode::CallStaticFunction_entry(), | 1326 *StubCode::CallStaticFunction_entry(), |
1365 RawPcDescriptors::kOther, locs, function); | 1327 RawPcDescriptors::kOther, locs, function); |
1366 __ Drop(argument_count); | 1328 __ Drop(argument_count); |
1367 } | 1329 } |
1368 | 1330 |
1369 | |
1370 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( | 1331 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( |
1371 Register reg, | 1332 Register reg, |
1372 const Object& obj, | 1333 const Object& obj, |
1373 bool needs_number_check, | 1334 bool needs_number_check, |
1374 TokenPosition token_pos, | 1335 TokenPosition token_pos, |
1375 intptr_t deopt_id) { | 1336 intptr_t deopt_id) { |
1376 if (needs_number_check) { | 1337 if (needs_number_check) { |
1377 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); | 1338 ASSERT(!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()); |
1378 __ Push(reg); | 1339 __ Push(reg); |
1379 __ PushObject(obj); | 1340 __ PushObject(obj); |
1380 if (is_optimizing()) { | 1341 if (is_optimizing()) { |
1381 __ BranchLinkPatchable( | 1342 __ BranchLinkPatchable( |
1382 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); | 1343 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); |
1383 } else { | 1344 } else { |
1384 __ BranchLinkPatchable( | 1345 __ BranchLinkPatchable( |
1385 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); | 1346 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); |
1386 } | 1347 } |
1387 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); | 1348 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); |
1388 // Stub returns result in flags (result of a cmp, we need Z computed). | 1349 // Stub returns result in flags (result of a cmp, we need Z computed). |
1389 __ Drop(1); // Discard constant. | 1350 __ Drop(1); // Discard constant. |
1390 __ Pop(reg); // Restore 'reg'. | 1351 __ Pop(reg); // Restore 'reg'. |
1391 } else { | 1352 } else { |
1392 __ CompareObject(reg, obj); | 1353 __ CompareObject(reg, obj); |
1393 } | 1354 } |
1394 return EQ; | 1355 return EQ; |
1395 } | 1356 } |
1396 | 1357 |
1397 | |
1398 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, | 1358 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, |
1399 Register right, | 1359 Register right, |
1400 bool needs_number_check, | 1360 bool needs_number_check, |
1401 TokenPosition token_pos, | 1361 TokenPosition token_pos, |
1402 intptr_t deopt_id) { | 1362 intptr_t deopt_id) { |
1403 if (needs_number_check) { | 1363 if (needs_number_check) { |
1404 __ Push(left); | 1364 __ Push(left); |
1405 __ Push(right); | 1365 __ Push(right); |
1406 if (is_optimizing()) { | 1366 if (is_optimizing()) { |
1407 __ BranchLinkPatchable( | 1367 __ BranchLinkPatchable( |
1408 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); | 1368 *StubCode::OptimizedIdenticalWithNumberCheck_entry()); |
1409 } else { | 1369 } else { |
1410 __ BranchLinkPatchable( | 1370 __ BranchLinkPatchable( |
1411 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); | 1371 *StubCode::UnoptimizedIdenticalWithNumberCheck_entry()); |
1412 } | 1372 } |
1413 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); | 1373 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos); |
1414 // Stub returns result in flags (result of a cmp, we need Z computed). | 1374 // Stub returns result in flags (result of a cmp, we need Z computed). |
1415 __ Pop(right); | 1375 __ Pop(right); |
1416 __ Pop(left); | 1376 __ Pop(left); |
1417 } else { | 1377 } else { |
1418 __ CompareRegisters(left, right); | 1378 __ CompareRegisters(left, right); |
1419 } | 1379 } |
1420 return EQ; | 1380 return EQ; |
1421 } | 1381 } |
1422 | 1382 |
1423 | |
1424 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and | 1383 // This function must be in sync with FlowGraphCompiler::RecordSafepoint and |
1425 // FlowGraphCompiler::SlowPathEnvironmentFor. | 1384 // FlowGraphCompiler::SlowPathEnvironmentFor. |
1426 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { | 1385 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { |
1427 #if defined(DEBUG) | 1386 #if defined(DEBUG) |
1428 locs->CheckWritableInputs(); | 1387 locs->CheckWritableInputs(); |
1429 ClobberDeadTempRegisters(locs); | 1388 ClobberDeadTempRegisters(locs); |
1430 #endif | 1389 #endif |
1431 | 1390 |
1432 // TODO(vegorov): consider saving only caller save (volatile) registers. | 1391 // TODO(vegorov): consider saving only caller save (volatile) registers. |
1433 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); | 1392 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); |
(...skipping 11 matching lines...) Expand all Loading... |
1445 // The order in which the registers are pushed must match the order | 1404 // The order in which the registers are pushed must match the order |
1446 // in which the registers are encoded in the safe point's stack map. | 1405 // in which the registers are encoded in the safe point's stack map. |
1447 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { | 1406 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) { |
1448 Register reg = static_cast<Register>(i); | 1407 Register reg = static_cast<Register>(i); |
1449 if (locs->live_registers()->ContainsRegister(reg)) { | 1408 if (locs->live_registers()->ContainsRegister(reg)) { |
1450 __ Push(reg); | 1409 __ Push(reg); |
1451 } | 1410 } |
1452 } | 1411 } |
1453 } | 1412 } |
1454 | 1413 |
1455 | |
1456 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { | 1414 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { |
1457 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { | 1415 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) { |
1458 Register reg = static_cast<Register>(i); | 1416 Register reg = static_cast<Register>(i); |
1459 if (locs->live_registers()->ContainsRegister(reg)) { | 1417 if (locs->live_registers()->ContainsRegister(reg)) { |
1460 __ Pop(reg); | 1418 __ Pop(reg); |
1461 } | 1419 } |
1462 } | 1420 } |
1463 | 1421 |
1464 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); | 1422 const intptr_t fpu_regs_count = locs->live_registers()->FpuRegisterCount(); |
1465 if (fpu_regs_count > 0) { | 1423 if (fpu_regs_count > 0) { |
1466 // Fpu registers have the lowest register number at the lowest address. | 1424 // Fpu registers have the lowest register number at the lowest address. |
1467 for (intptr_t i = 0; i < kNumberOfVRegisters; ++i) { | 1425 for (intptr_t i = 0; i < kNumberOfVRegisters; ++i) { |
1468 VRegister fpu_reg = static_cast<VRegister>(i); | 1426 VRegister fpu_reg = static_cast<VRegister>(i); |
1469 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { | 1427 if (locs->live_registers()->ContainsFpuRegister(fpu_reg)) { |
1470 __ PopQuad(fpu_reg); | 1428 __ PopQuad(fpu_reg); |
1471 } | 1429 } |
1472 } | 1430 } |
1473 } | 1431 } |
1474 } | 1432 } |
1475 | 1433 |
1476 | |
1477 #if defined(DEBUG) | 1434 #if defined(DEBUG) |
1478 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { | 1435 void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) { |
1479 // Clobber temporaries that have not been manually preserved. | 1436 // Clobber temporaries that have not been manually preserved. |
1480 for (intptr_t i = 0; i < locs->temp_count(); ++i) { | 1437 for (intptr_t i = 0; i < locs->temp_count(); ++i) { |
1481 Location tmp = locs->temp(i); | 1438 Location tmp = locs->temp(i); |
1482 // TODO(zerny): clobber non-live temporary FPU registers. | 1439 // TODO(zerny): clobber non-live temporary FPU registers. |
1483 if (tmp.IsRegister() && | 1440 if (tmp.IsRegister() && |
1484 !locs->live_registers()->ContainsRegister(tmp.reg())) { | 1441 !locs->live_registers()->ContainsRegister(tmp.reg())) { |
1485 __ movz(tmp.reg(), Immediate(0xf7), 0); | 1442 __ movz(tmp.reg(), Immediate(0xf7), 0); |
1486 } | 1443 } |
1487 } | 1444 } |
1488 } | 1445 } |
1489 #endif | 1446 #endif |
1490 | 1447 |
1491 | |
1492 void FlowGraphCompiler::EmitTestAndCallLoadReceiver( | 1448 void FlowGraphCompiler::EmitTestAndCallLoadReceiver( |
1493 intptr_t argument_count, | 1449 intptr_t argument_count, |
1494 const Array& arguments_descriptor) { | 1450 const Array& arguments_descriptor) { |
1495 __ Comment("EmitTestAndCall"); | 1451 __ Comment("EmitTestAndCall"); |
1496 // Load receiver into R0. | 1452 // Load receiver into R0. |
1497 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize); | 1453 __ LoadFromOffset(R0, SP, (argument_count - 1) * kWordSize); |
1498 __ LoadObject(R4, arguments_descriptor); | 1454 __ LoadObject(R4, arguments_descriptor); |
1499 } | 1455 } |
1500 | 1456 |
1501 | |
1502 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) { | 1457 void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) { |
1503 __ tsti(R0, Immediate(kSmiTagMask)); | 1458 __ tsti(R0, Immediate(kSmiTagMask)); |
1504 // Jump if receiver is not Smi. | 1459 // Jump if receiver is not Smi. |
1505 __ b(label, if_smi ? EQ : NE); | 1460 __ b(label, if_smi ? EQ : NE); |
1506 } | 1461 } |
1507 | 1462 |
1508 | |
1509 void FlowGraphCompiler::EmitTestAndCallLoadCid() { | 1463 void FlowGraphCompiler::EmitTestAndCallLoadCid() { |
1510 __ LoadClassId(R2, R0); | 1464 __ LoadClassId(R2, R0); |
1511 } | 1465 } |
1512 | 1466 |
1513 | |
1514 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label, | 1467 int FlowGraphCompiler::EmitTestAndCallCheckCid(Label* next_label, |
1515 const CidRange& range, | 1468 const CidRange& range, |
1516 int bias) { | 1469 int bias) { |
1517 intptr_t cid_start = range.cid_start; | 1470 intptr_t cid_start = range.cid_start; |
1518 if (range.IsSingleCid()) { | 1471 if (range.IsSingleCid()) { |
1519 __ CompareImmediate(R2, cid_start - bias); | 1472 __ CompareImmediate(R2, cid_start - bias); |
1520 __ b(next_label, NE); | 1473 __ b(next_label, NE); |
1521 } else { | 1474 } else { |
1522 __ AddImmediate(R2, bias - cid_start); | 1475 __ AddImmediate(R2, bias - cid_start); |
1523 bias = cid_start; | 1476 bias = cid_start; |
1524 __ CompareImmediate(R2, range.Extent()); | 1477 __ CompareImmediate(R2, range.Extent()); |
1525 __ b(next_label, HI); // Unsigned higher. | 1478 __ b(next_label, HI); // Unsigned higher. |
1526 } | 1479 } |
1527 return bias; | 1480 return bias; |
1528 } | 1481 } |
1529 | 1482 |
1530 | |
1531 #undef __ | 1483 #undef __ |
1532 #define __ compiler_->assembler()-> | 1484 #define __ compiler_->assembler()-> |
1533 | 1485 |
1534 | |
1535 void ParallelMoveResolver::EmitMove(int index) { | 1486 void ParallelMoveResolver::EmitMove(int index) { |
1536 MoveOperands* move = moves_[index]; | 1487 MoveOperands* move = moves_[index]; |
1537 const Location source = move->src(); | 1488 const Location source = move->src(); |
1538 const Location destination = move->dest(); | 1489 const Location destination = move->dest(); |
1539 | 1490 |
1540 if (source.IsRegister()) { | 1491 if (source.IsRegister()) { |
1541 if (destination.IsRegister()) { | 1492 if (destination.IsRegister()) { |
1542 __ mov(destination.reg(), source.reg()); | 1493 __ mov(destination.reg(), source.reg()); |
1543 } else { | 1494 } else { |
1544 ASSERT(destination.IsStackSlot()); | 1495 ASSERT(destination.IsStackSlot()); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1637 } else { | 1588 } else { |
1638 __ LoadObject(tmp.reg(), constant); | 1589 __ LoadObject(tmp.reg(), constant); |
1639 } | 1590 } |
1640 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); | 1591 __ StoreToOffset(tmp.reg(), destination.base_reg(), dest_offset); |
1641 } | 1592 } |
1642 } | 1593 } |
1643 | 1594 |
1644 move->Eliminate(); | 1595 move->Eliminate(); |
1645 } | 1596 } |
1646 | 1597 |
1647 | |
1648 void ParallelMoveResolver::EmitSwap(int index) { | 1598 void ParallelMoveResolver::EmitSwap(int index) { |
1649 MoveOperands* move = moves_[index]; | 1599 MoveOperands* move = moves_[index]; |
1650 const Location source = move->src(); | 1600 const Location source = move->src(); |
1651 const Location destination = move->dest(); | 1601 const Location destination = move->dest(); |
1652 | 1602 |
1653 if (source.IsRegister() && destination.IsRegister()) { | 1603 if (source.IsRegister() && destination.IsRegister()) { |
1654 ASSERT(source.reg() != TMP); | 1604 ASSERT(source.reg() != TMP); |
1655 ASSERT(destination.reg() != TMP); | 1605 ASSERT(destination.reg() != TMP); |
1656 __ mov(TMP, source.reg()); | 1606 __ mov(TMP, source.reg()); |
1657 __ mov(source.reg(), destination.reg()); | 1607 __ mov(source.reg(), destination.reg()); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1726 for (int i = 0; i < moves_.length(); ++i) { | 1676 for (int i = 0; i < moves_.length(); ++i) { |
1727 const MoveOperands& other_move = *moves_[i]; | 1677 const MoveOperands& other_move = *moves_[i]; |
1728 if (other_move.Blocks(source)) { | 1678 if (other_move.Blocks(source)) { |
1729 moves_[i]->set_src(destination); | 1679 moves_[i]->set_src(destination); |
1730 } else if (other_move.Blocks(destination)) { | 1680 } else if (other_move.Blocks(destination)) { |
1731 moves_[i]->set_src(source); | 1681 moves_[i]->set_src(source); |
1732 } | 1682 } |
1733 } | 1683 } |
1734 } | 1684 } |
1735 | 1685 |
1736 | |
1737 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, | 1686 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, |
1738 const Address& src) { | 1687 const Address& src) { |
1739 UNREACHABLE(); | 1688 UNREACHABLE(); |
1740 } | 1689 } |
1741 | 1690 |
1742 | |
1743 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { | 1691 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { |
1744 UNREACHABLE(); | 1692 UNREACHABLE(); |
1745 } | 1693 } |
1746 | 1694 |
1747 | |
1748 // Do not call or implement this function. Instead, use the form below that | 1695 // Do not call or implement this function. Instead, use the form below that |
1749 // uses an offset from the frame pointer instead of an Address. | 1696 // uses an offset from the frame pointer instead of an Address. |
1750 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { | 1697 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { |
1751 UNREACHABLE(); | 1698 UNREACHABLE(); |
1752 } | 1699 } |
1753 | 1700 |
1754 | |
1755 // Do not call or implement this function. Instead, use the form below that | 1701 // Do not call or implement this function. Instead, use the form below that |
1756 // uses offsets from the frame pointer instead of Addresses. | 1702 // uses offsets from the frame pointer instead of Addresses. |
1757 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { | 1703 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { |
1758 UNREACHABLE(); | 1704 UNREACHABLE(); |
1759 } | 1705 } |
1760 | 1706 |
1761 | |
1762 void ParallelMoveResolver::Exchange(Register reg, | 1707 void ParallelMoveResolver::Exchange(Register reg, |
1763 Register base_reg, | 1708 Register base_reg, |
1764 intptr_t stack_offset) { | 1709 intptr_t stack_offset) { |
1765 ScratchRegisterScope tmp(this, reg); | 1710 ScratchRegisterScope tmp(this, reg); |
1766 __ mov(tmp.reg(), reg); | 1711 __ mov(tmp.reg(), reg); |
1767 __ LoadFromOffset(reg, base_reg, stack_offset); | 1712 __ LoadFromOffset(reg, base_reg, stack_offset); |
1768 __ StoreToOffset(tmp.reg(), base_reg, stack_offset); | 1713 __ StoreToOffset(tmp.reg(), base_reg, stack_offset); |
1769 } | 1714 } |
1770 | 1715 |
1771 | |
1772 void ParallelMoveResolver::Exchange(Register base_reg1, | 1716 void ParallelMoveResolver::Exchange(Register base_reg1, |
1773 intptr_t stack_offset1, | 1717 intptr_t stack_offset1, |
1774 Register base_reg2, | 1718 Register base_reg2, |
1775 intptr_t stack_offset2) { | 1719 intptr_t stack_offset2) { |
1776 ScratchRegisterScope tmp1(this, kNoRegister); | 1720 ScratchRegisterScope tmp1(this, kNoRegister); |
1777 ScratchRegisterScope tmp2(this, tmp1.reg()); | 1721 ScratchRegisterScope tmp2(this, tmp1.reg()); |
1778 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); | 1722 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1); |
1779 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); | 1723 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2); |
1780 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2); | 1724 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2); |
1781 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1); | 1725 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1); |
1782 } | 1726 } |
1783 | 1727 |
1784 | |
1785 void ParallelMoveResolver::SpillScratch(Register reg) { | 1728 void ParallelMoveResolver::SpillScratch(Register reg) { |
1786 __ Push(reg); | 1729 __ Push(reg); |
1787 } | 1730 } |
1788 | 1731 |
1789 | |
1790 void ParallelMoveResolver::RestoreScratch(Register reg) { | 1732 void ParallelMoveResolver::RestoreScratch(Register reg) { |
1791 __ Pop(reg); | 1733 __ Pop(reg); |
1792 } | 1734 } |
1793 | 1735 |
1794 | |
1795 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { | 1736 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { |
1796 __ PushDouble(reg); | 1737 __ PushDouble(reg); |
1797 } | 1738 } |
1798 | 1739 |
1799 | |
1800 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { | 1740 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { |
1801 __ PopDouble(reg); | 1741 __ PopDouble(reg); |
1802 } | 1742 } |
1803 | 1743 |
1804 | |
1805 #undef __ | 1744 #undef __ |
1806 | 1745 |
1807 } // namespace dart | 1746 } // namespace dart |
1808 | 1747 |
1809 #endif // defined TARGET_ARCH_ARM64 | 1748 #endif // defined TARGET_ARCH_ARM64 |
OLD | NEW |