Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(68)

Side by Side Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 1268783003: Simplify constant pool usage in x64 code generator (by removing extra argument (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "vm/ast_printer.h" 10 #include "vm/ast_printer.h"
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
60 60
61 61
62 bool FlowGraphCompiler::SupportsHardwareDivision() { 62 bool FlowGraphCompiler::SupportsHardwareDivision() {
63 return true; 63 return true;
64 } 64 }
65 65
66 66
67 void FlowGraphCompiler::EnterIntrinsicMode() { 67 void FlowGraphCompiler::EnterIntrinsicMode() {
68 ASSERT(!intrinsic_mode()); 68 ASSERT(!intrinsic_mode());
69 intrinsic_mode_ = true; 69 intrinsic_mode_ = true;
70 assembler()->set_constant_pool_allowed(false); 70 ASSERT(!assembler()->constant_pool_allowed());
71 } 71 }
72 72
73 73
74 void FlowGraphCompiler::ExitIntrinsicMode() { 74 void FlowGraphCompiler::ExitIntrinsicMode() {
75 ASSERT(intrinsic_mode()); 75 ASSERT(intrinsic_mode());
76 intrinsic_mode_ = false; 76 intrinsic_mode_ = false;
77 assembler()->set_constant_pool_allowed(true);
srdjan 2015/07/30 22:44:11 SHouldn't this be set (if at all) to whatever it w
regis 2015/07/30 23:04:16 The improved code maintains the state of constant_
78 } 77 }
79 78
80 79
81 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler, 80 RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
82 DeoptInfoBuilder* builder, 81 DeoptInfoBuilder* builder,
83 const Array& deopt_table) { 82 const Array& deopt_table) {
84 if (deopt_env_ == NULL) { 83 if (deopt_env_ == NULL) {
85 ++builder->current_info_number_; 84 ++builder->current_info_number_;
86 return TypedData::null(); 85 return TypedData::null();
87 } 86 }
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
182 Assembler* assem = compiler->assembler(); 181 Assembler* assem = compiler->assembler();
183 #define __ assem-> 182 #define __ assem->
184 __ Comment("%s", Name()); 183 __ Comment("%s", Name());
185 __ Bind(entry_label()); 184 __ Bind(entry_label());
186 if (FLAG_trap_on_deoptimization) { 185 if (FLAG_trap_on_deoptimization) {
187 __ int3(); 186 __ int3();
188 } 187 }
189 188
190 ASSERT(deopt_env() != NULL); 189 ASSERT(deopt_env() != NULL);
191 190
192 __ Call(&StubCode::DeoptimizeLabel(), PP); 191 __ Call(&StubCode::DeoptimizeLabel());
193 set_pc_offset(assem->CodeSize()); 192 set_pc_offset(assem->CodeSize());
194 __ int3(); 193 __ int3();
195 #undef __ 194 #undef __
196 } 195 }
197 196
198 197
199 #define __ assembler()-> 198 #define __ assembler()->
200 199
201 200
202 // Fall through if bool_register contains null. 201 // Fall through if bool_register contains null.
203 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 202 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
204 Label* is_true, 203 Label* is_true,
205 Label* is_false) { 204 Label* is_false) {
206 Label fall_through; 205 Label fall_through;
207 __ CompareObject(bool_register, Object::null_object(), PP); 206 __ CompareObject(bool_register, Object::null_object());
208 __ j(EQUAL, &fall_through, Assembler::kNearJump); 207 __ j(EQUAL, &fall_through, Assembler::kNearJump);
209 __ CompareObject(bool_register, Bool::True(), PP); 208 __ CompareObject(bool_register, Bool::True());
210 __ j(EQUAL, is_true); 209 __ j(EQUAL, is_true);
211 __ jmp(is_false); 210 __ jmp(is_false);
212 __ Bind(&fall_through); 211 __ Bind(&fall_through);
213 } 212 }
214 213
215 214
216 // Clobbers RCX. 215 // Clobbers RCX.
217 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 216 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
218 TypeTestStubKind test_kind, 217 TypeTestStubKind test_kind,
219 Register instance_reg, 218 Register instance_reg,
220 Register type_arguments_reg, 219 Register type_arguments_reg,
221 Register temp_reg, 220 Register temp_reg,
222 Label* is_instance_lbl, 221 Label* is_instance_lbl,
223 Label* is_not_instance_lbl) { 222 Label* is_not_instance_lbl) {
224 const SubtypeTestCache& type_test_cache = 223 const SubtypeTestCache& type_test_cache =
225 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 224 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
226 __ LoadUniqueObject(temp_reg, type_test_cache, PP); 225 __ LoadUniqueObject(temp_reg, type_test_cache);
227 __ pushq(temp_reg); // Subtype test cache. 226 __ pushq(temp_reg); // Subtype test cache.
228 __ pushq(instance_reg); // Instance. 227 __ pushq(instance_reg); // Instance.
229 if (test_kind == kTestTypeOneArg) { 228 if (test_kind == kTestTypeOneArg) {
230 ASSERT(type_arguments_reg == kNoRegister); 229 ASSERT(type_arguments_reg == kNoRegister);
231 __ PushObject(Object::null_object(), PP); 230 __ PushObject(Object::null_object());
232 __ Call(&StubCode::Subtype1TestCacheLabel(), PP); 231 __ Call(&StubCode::Subtype1TestCacheLabel());
233 } else if (test_kind == kTestTypeTwoArgs) { 232 } else if (test_kind == kTestTypeTwoArgs) {
234 ASSERT(type_arguments_reg == kNoRegister); 233 ASSERT(type_arguments_reg == kNoRegister);
235 __ PushObject(Object::null_object(), PP); 234 __ PushObject(Object::null_object());
236 __ Call(&StubCode::Subtype2TestCacheLabel(), PP); 235 __ Call(&StubCode::Subtype2TestCacheLabel());
237 } else if (test_kind == kTestTypeThreeArgs) { 236 } else if (test_kind == kTestTypeThreeArgs) {
238 __ pushq(type_arguments_reg); 237 __ pushq(type_arguments_reg);
239 __ Call(&StubCode::Subtype3TestCacheLabel(), PP); 238 __ Call(&StubCode::Subtype3TestCacheLabel());
240 } else { 239 } else {
241 UNREACHABLE(); 240 UNREACHABLE();
242 } 241 }
243 // Result is in RCX: null -> not found, otherwise Bool::True or Bool::False. 242 // Result is in RCX: null -> not found, otherwise Bool::True or Bool::False.
244 ASSERT(instance_reg != RCX); 243 ASSERT(instance_reg != RCX);
245 ASSERT(temp_reg != RCX); 244 ASSERT(temp_reg != RCX);
246 __ popq(instance_reg); // Discard. 245 __ popq(instance_reg); // Discard.
247 __ popq(instance_reg); // Restore receiver. 246 __ popq(instance_reg); // Restore receiver.
248 __ popq(temp_reg); // Discard. 247 __ popq(temp_reg); // Discard.
249 GenerateBoolToJump(RCX, is_instance_lbl, is_not_instance_lbl); 248 GenerateBoolToJump(RCX, is_instance_lbl, is_not_instance_lbl);
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 // interfaces. 376 // interfaces.
378 // Bool interface can be implemented only by core class Bool. 377 // Bool interface can be implemented only by core class Bool.
379 if (type.IsBoolType()) { 378 if (type.IsBoolType()) {
380 __ cmpl(kClassIdReg, Immediate(kBoolCid)); 379 __ cmpl(kClassIdReg, Immediate(kBoolCid));
381 __ j(EQUAL, is_instance_lbl); 380 __ j(EQUAL, is_instance_lbl);
382 __ jmp(is_not_instance_lbl); 381 __ jmp(is_not_instance_lbl);
383 return false; 382 return false;
384 } 383 }
385 if (type.IsFunctionType()) { 384 if (type.IsFunctionType()) {
386 // Check if instance is a closure. 385 // Check if instance is a closure.
387 __ LoadClassById(R13, kClassIdReg, PP); 386 __ LoadClassById(R13, kClassIdReg);
388 __ movq(R13, FieldAddress(R13, Class::signature_function_offset())); 387 __ movq(R13, FieldAddress(R13, Class::signature_function_offset()));
389 __ CompareObject(R13, Object::null_object(), PP); 388 __ CompareObject(R13, Object::null_object());
390 __ j(NOT_EQUAL, is_instance_lbl); 389 __ j(NOT_EQUAL, is_instance_lbl);
391 } 390 }
392 // Custom checking for numbers (Smi, Mint, Bigint and Double). 391 // Custom checking for numbers (Smi, Mint, Bigint and Double).
393 // Note that instance is not Smi (checked above). 392 // Note that instance is not Smi (checked above).
394 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { 393 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) {
395 GenerateNumberTypeCheck( 394 GenerateNumberTypeCheck(
396 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl); 395 kClassIdReg, type, is_instance_lbl, is_not_instance_lbl);
397 return false; 396 return false;
398 } 397 }
399 if (type.IsStringType()) { 398 if (type.IsStringType()) {
(...skipping 12 matching lines...) Expand all
412 // TODO(srdjan): Implement a quicker subtype check, as type test 411 // TODO(srdjan): Implement a quicker subtype check, as type test
413 // arrays can grow too high, but they may be useful when optimizing 412 // arrays can grow too high, but they may be useful when optimizing
414 // code (type-feedback). 413 // code (type-feedback).
415 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup( 414 RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
416 intptr_t token_pos, 415 intptr_t token_pos,
417 const Class& type_class, 416 const Class& type_class,
418 Label* is_instance_lbl, 417 Label* is_instance_lbl,
419 Label* is_not_instance_lbl) { 418 Label* is_not_instance_lbl) {
420 __ Comment("Subtype1TestCacheLookup"); 419 __ Comment("Subtype1TestCacheLookup");
421 const Register kInstanceReg = RAX; 420 const Register kInstanceReg = RAX;
422 __ LoadClass(R10, kInstanceReg, PP); 421 __ LoadClass(R10, kInstanceReg);
423 // R10: instance class. 422 // R10: instance class.
424 // Check immediate superclass equality. 423 // Check immediate superclass equality.
425 __ movq(R13, FieldAddress(R10, Class::super_type_offset())); 424 __ movq(R13, FieldAddress(R10, Class::super_type_offset()));
426 __ movq(R13, FieldAddress(R13, Type::type_class_offset())); 425 __ movq(R13, FieldAddress(R13, Type::type_class_offset()));
427 __ CompareObject(R13, type_class, PP); 426 __ CompareObject(R13, type_class);
428 __ j(EQUAL, is_instance_lbl); 427 __ j(EQUAL, is_instance_lbl);
429 428
430 const Register kTypeArgumentsReg = kNoRegister; 429 const Register kTypeArgumentsReg = kNoRegister;
431 const Register kTempReg = R10; 430 const Register kTempReg = R10;
432 return GenerateCallSubtypeTestStub(kTestTypeOneArg, 431 return GenerateCallSubtypeTestStub(kTestTypeOneArg,
433 kInstanceReg, 432 kInstanceReg,
434 kTypeArgumentsReg, 433 kTypeArgumentsReg,
435 kTempReg, 434 kTempReg,
436 is_instance_lbl, 435 is_instance_lbl,
437 is_not_instance_lbl); 436 is_not_instance_lbl);
(...skipping 10 matching lines...) Expand all
448 Label* is_not_instance_lbl) { 447 Label* is_not_instance_lbl) {
449 __ Comment("UninstantiatedTypeTest"); 448 __ Comment("UninstantiatedTypeTest");
450 ASSERT(!type.IsInstantiated()); 449 ASSERT(!type.IsInstantiated());
451 // Skip check if destination is a dynamic type. 450 // Skip check if destination is a dynamic type.
452 if (type.IsTypeParameter()) { 451 if (type.IsTypeParameter()) {
453 const TypeParameter& type_param = TypeParameter::Cast(type); 452 const TypeParameter& type_param = TypeParameter::Cast(type);
454 // Load instantiator (or null) and instantiator type arguments on stack. 453 // Load instantiator (or null) and instantiator type arguments on stack.
455 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments. 454 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments.
456 // RDX: instantiator type arguments. 455 // RDX: instantiator type arguments.
457 // Check if type arguments are null, i.e. equivalent to vector of dynamic. 456 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
458 __ CompareObject(RDX, Object::null_object(), PP); 457 __ CompareObject(RDX, Object::null_object());
459 __ j(EQUAL, is_instance_lbl); 458 __ j(EQUAL, is_instance_lbl);
460 __ movq(RDI, 459 __ movq(RDI,
461 FieldAddress(RDX, TypeArguments::type_at_offset(type_param.index()))); 460 FieldAddress(RDX, TypeArguments::type_at_offset(type_param.index())));
462 // RDI: Concrete type of type. 461 // RDI: Concrete type of type.
463 // Check if type argument is dynamic. 462 // Check if type argument is dynamic.
464 __ CompareObject(RDI, Type::ZoneHandle(Type::DynamicType()), PP); 463 __ CompareObject(RDI, Type::ZoneHandle(Type::DynamicType()));
465 __ j(EQUAL, is_instance_lbl); 464 __ j(EQUAL, is_instance_lbl);
466 const Type& object_type = Type::ZoneHandle(Type::ObjectType()); 465 const Type& object_type = Type::ZoneHandle(Type::ObjectType());
467 __ CompareObject(RDI, object_type, PP); 466 __ CompareObject(RDI, object_type);
468 __ j(EQUAL, is_instance_lbl); 467 __ j(EQUAL, is_instance_lbl);
469 468
470 // For Smi check quickly against int and num interfaces. 469 // For Smi check quickly against int and num interfaces.
471 Label not_smi; 470 Label not_smi;
472 __ testq(RAX, Immediate(kSmiTagMask)); // Value is Smi? 471 __ testq(RAX, Immediate(kSmiTagMask)); // Value is Smi?
473 __ j(NOT_ZERO, &not_smi, Assembler::kNearJump); 472 __ j(NOT_ZERO, &not_smi, Assembler::kNearJump);
474 __ CompareObject(RDI, Type::ZoneHandle(Type::IntType()), PP); 473 __ CompareObject(RDI, Type::ZoneHandle(Type::IntType()));
475 __ j(EQUAL, is_instance_lbl); 474 __ j(EQUAL, is_instance_lbl);
476 __ CompareObject(RDI, Type::ZoneHandle(Type::Number()), PP); 475 __ CompareObject(RDI, Type::ZoneHandle(Type::Number()));
477 __ j(EQUAL, is_instance_lbl); 476 __ j(EQUAL, is_instance_lbl);
478 // Smi must be handled in runtime. 477 // Smi must be handled in runtime.
479 Label fall_through; 478 Label fall_through;
480 __ jmp(&fall_through); 479 __ jmp(&fall_through);
481 480
482 __ Bind(&not_smi); 481 __ Bind(&not_smi);
483 // RDX: instantiator type arguments. 482 // RDX: instantiator type arguments.
484 // RAX: instance. 483 // RAX: instance.
485 const Register kInstanceReg = RAX; 484 const Register kInstanceReg = RAX;
486 const Register kTypeArgumentsReg = RDX; 485 const Register kTypeArgumentsReg = RDX;
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
594 // If type is instantiated and non-parameterized, we can inline code 593 // If type is instantiated and non-parameterized, we can inline code
595 // checking whether the tested instance is a Smi. 594 // checking whether the tested instance is a Smi.
596 if (type.IsInstantiated()) { 595 if (type.IsInstantiated()) {
597 // A null object is only an instance of Object and dynamic, which has 596 // A null object is only an instance of Object and dynamic, which has
598 // already been checked above (if the type is instantiated). So we can 597 // already been checked above (if the type is instantiated). So we can
599 // return false here if the instance is null (and if the type is 598 // return false here if the instance is null (and if the type is
600 // instantiated). 599 // instantiated).
601 // We can only inline this null check if the type is instantiated at compile 600 // We can only inline this null check if the type is instantiated at compile
602 // time, since an uninstantiated type at compile time could be Object or 601 // time, since an uninstantiated type at compile time could be Object or
603 // dynamic at run time. 602 // dynamic at run time.
604 __ CompareObject(RAX, Object::null_object(), PP); 603 __ CompareObject(RAX, Object::null_object());
605 __ j(EQUAL, type.IsNullType() ? &is_instance : &is_not_instance); 604 __ j(EQUAL, type.IsNullType() ? &is_instance : &is_not_instance);
606 } 605 }
607 606
608 // Generate inline instanceof test. 607 // Generate inline instanceof test.
609 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); 608 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle();
610 test_cache = GenerateInlineInstanceof(token_pos, type, 609 test_cache = GenerateInlineInstanceof(token_pos, type,
611 &is_instance, &is_not_instance); 610 &is_instance, &is_not_instance);
612 611
613 // test_cache is null if there is no fall-through. 612 // test_cache is null if there is no fall-through.
614 Label done; 613 Label done;
615 if (!test_cache.IsNull()) { 614 if (!test_cache.IsNull()) {
616 // Generate runtime call. 615 // Generate runtime call.
617 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments. 616 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments.
618 __ movq(RCX, Address(RSP, kWordSize)); // Get instantiator. 617 __ movq(RCX, Address(RSP, kWordSize)); // Get instantiator.
619 __ PushObject(Object::null_object(), PP); // Make room for the result. 618 __ PushObject(Object::null_object()); // Make room for the result.
620 __ pushq(RAX); // Push the instance. 619 __ pushq(RAX); // Push the instance.
621 __ PushObject(type, PP); // Push the type. 620 __ PushObject(type); // Push the type.
622 __ pushq(RCX); // TODO(srdjan): Pass instantiator instead of null. 621 __ pushq(RCX); // TODO(srdjan): Pass instantiator instead of null.
623 __ pushq(RDX); // Instantiator type arguments. 622 __ pushq(RDX); // Instantiator type arguments.
624 __ LoadUniqueObject(RAX, test_cache, PP); 623 __ LoadUniqueObject(RAX, test_cache);
625 __ pushq(RAX); 624 __ pushq(RAX);
626 GenerateRuntimeCall(token_pos, 625 GenerateRuntimeCall(token_pos,
627 deopt_id, 626 deopt_id,
628 kInstanceofRuntimeEntry, 627 kInstanceofRuntimeEntry,
629 5, 628 5,
630 locs); 629 locs);
631 // Pop the parameters supplied to the runtime entry. The result of the 630 // Pop the parameters supplied to the runtime entry. The result of the
632 // instanceof runtime call will be left as the result of the operation. 631 // instanceof runtime call will be left as the result of the operation.
633 __ Drop(5); 632 __ Drop(5);
634 if (negate_result) { 633 if (negate_result) {
635 __ popq(RDX); 634 __ popq(RDX);
636 __ LoadObject(RAX, Bool::True(), PP); 635 __ LoadObject(RAX, Bool::True());
637 __ cmpq(RDX, RAX); 636 __ cmpq(RDX, RAX);
638 __ j(NOT_EQUAL, &done, Assembler::kNearJump); 637 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
639 __ LoadObject(RAX, Bool::False(), PP); 638 __ LoadObject(RAX, Bool::False());
640 } else { 639 } else {
641 __ popq(RAX); 640 __ popq(RAX);
642 } 641 }
643 __ jmp(&done, Assembler::kNearJump); 642 __ jmp(&done, Assembler::kNearJump);
644 } 643 }
645 __ Bind(&is_not_instance); 644 __ Bind(&is_not_instance);
646 __ LoadObject(RAX, Bool::Get(negate_result), PP); 645 __ LoadObject(RAX, Bool::Get(negate_result));
647 __ jmp(&done, Assembler::kNearJump); 646 __ jmp(&done, Assembler::kNearJump);
648 647
649 __ Bind(&is_instance); 648 __ Bind(&is_instance);
650 __ LoadObject(RAX, Bool::Get(!negate_result), PP); 649 __ LoadObject(RAX, Bool::Get(!negate_result));
651 __ Bind(&done); 650 __ Bind(&done);
652 __ popq(RDX); // Remove pushed instantiator type arguments. 651 __ popq(RDX); // Remove pushed instantiator type arguments.
653 __ popq(RCX); // Remove pushed instantiator. 652 __ popq(RCX); // Remove pushed instantiator.
654 } 653 }
655 654
656 655
657 // Optimize assignable type check by adding inlined tests for: 656 // Optimize assignable type check by adding inlined tests for:
658 // - NULL -> return NULL. 657 // - NULL -> return NULL.
659 // - Smi -> compile time subtype check (only if dst class is not parameterized). 658 // - Smi -> compile time subtype check (only if dst class is not parameterized).
660 // - Class equality (only if class is not parameterized). 659 // - Class equality (only if class is not parameterized).
(...skipping 13 matching lines...) Expand all
674 ASSERT(token_pos >= 0); 673 ASSERT(token_pos >= 0);
675 ASSERT(!dst_type.IsNull()); 674 ASSERT(!dst_type.IsNull());
676 ASSERT(dst_type.IsFinalized()); 675 ASSERT(dst_type.IsFinalized());
677 // Assignable check is skipped in FlowGraphBuilder, not here. 676 // Assignable check is skipped in FlowGraphBuilder, not here.
678 ASSERT(dst_type.IsMalformedOrMalbounded() || 677 ASSERT(dst_type.IsMalformedOrMalbounded() ||
679 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); 678 (!dst_type.IsDynamicType() && !dst_type.IsObjectType()));
680 __ pushq(RCX); // Store instantiator. 679 __ pushq(RCX); // Store instantiator.
681 __ pushq(RDX); // Store instantiator type arguments. 680 __ pushq(RDX); // Store instantiator type arguments.
682 // A null object is always assignable and is returned as result. 681 // A null object is always assignable and is returned as result.
683 Label is_assignable, runtime_call; 682 Label is_assignable, runtime_call;
684 __ CompareObject(RAX, Object::null_object(), PP); 683 __ CompareObject(RAX, Object::null_object());
685 __ j(EQUAL, &is_assignable); 684 __ j(EQUAL, &is_assignable);
686 685
687 // Generate throw new TypeError() if the type is malformed or malbounded. 686 // Generate throw new TypeError() if the type is malformed or malbounded.
688 if (dst_type.IsMalformedOrMalbounded()) { 687 if (dst_type.IsMalformedOrMalbounded()) {
689 __ PushObject(Object::null_object(), PP); // Make room for the result. 688 __ PushObject(Object::null_object()); // Make room for the result.
690 __ pushq(RAX); // Push the source object. 689 __ pushq(RAX); // Push the source object.
691 __ PushObject(dst_name, PP); // Push the name of the destination. 690 __ PushObject(dst_name); // Push the name of the destination.
692 __ PushObject(dst_type, PP); // Push the type of the destination. 691 __ PushObject(dst_type); // Push the type of the destination.
693 GenerateRuntimeCall(token_pos, 692 GenerateRuntimeCall(token_pos,
694 deopt_id, 693 deopt_id,
695 kBadTypeErrorRuntimeEntry, 694 kBadTypeErrorRuntimeEntry,
696 3, 695 3,
697 locs); 696 locs);
698 // We should never return here. 697 // We should never return here.
699 __ int3(); 698 __ int3();
700 699
701 __ Bind(&is_assignable); // For a null object. 700 __ Bind(&is_assignable); // For a null object.
702 __ popq(RDX); // Remove pushed instantiator type arguments. 701 __ popq(RDX); // Remove pushed instantiator type arguments.
703 __ popq(RCX); // Remove pushed instantiator. 702 __ popq(RCX); // Remove pushed instantiator.
704 return; 703 return;
705 } 704 }
706 705
707 // Generate inline type check, linking to runtime call if not assignable. 706 // Generate inline type check, linking to runtime call if not assignable.
708 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle(); 707 SubtypeTestCache& test_cache = SubtypeTestCache::ZoneHandle();
709 test_cache = GenerateInlineInstanceof(token_pos, dst_type, 708 test_cache = GenerateInlineInstanceof(token_pos, dst_type,
710 &is_assignable, &runtime_call); 709 &is_assignable, &runtime_call);
711 710
712 __ Bind(&runtime_call); 711 __ Bind(&runtime_call);
713 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments. 712 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments.
714 __ movq(RCX, Address(RSP, kWordSize)); // Get instantiator. 713 __ movq(RCX, Address(RSP, kWordSize)); // Get instantiator.
715 __ PushObject(Object::null_object(), PP); // Make room for the result. 714 __ PushObject(Object::null_object()); // Make room for the result.
716 __ pushq(RAX); // Push the source object. 715 __ pushq(RAX); // Push the source object.
717 __ PushObject(dst_type, PP); // Push the type of the destination. 716 __ PushObject(dst_type); // Push the type of the destination.
718 __ pushq(RCX); // Instantiator. 717 __ pushq(RCX); // Instantiator.
719 __ pushq(RDX); // Instantiator type arguments. 718 __ pushq(RDX); // Instantiator type arguments.
720 __ PushObject(dst_name, PP); // Push the name of the destination. 719 __ PushObject(dst_name); // Push the name of the destination.
721 __ LoadUniqueObject(RAX, test_cache, PP); 720 __ LoadUniqueObject(RAX, test_cache);
722 __ pushq(RAX); 721 __ pushq(RAX);
723 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs); 722 GenerateRuntimeCall(token_pos, deopt_id, kTypeCheckRuntimeEntry, 6, locs);
724 // Pop the parameters supplied to the runtime entry. The result of the 723 // Pop the parameters supplied to the runtime entry. The result of the
725 // type check runtime call is the checked value. 724 // type check runtime call is the checked value.
726 __ Drop(6); 725 __ Drop(6);
727 __ popq(RAX); 726 __ popq(RAX);
728 727
729 __ Bind(&is_assignable); 728 __ Bind(&is_assignable);
730 __ popq(RDX); // Remove pushed instantiator type arguments. 729 __ popq(RDX); // Remove pushed instantiator type arguments.
731 __ popq(RCX); // Remove pushed instantiator. 730 __ popq(RCX); // Remove pushed instantiator.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
763 762
764 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args, 763 // Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
765 // where num_pos_args is the number of positional arguments passed in. 764 // where num_pos_args is the number of positional arguments passed in.
766 const int min_num_pos_args = num_fixed_params; 765 const int min_num_pos_args = num_fixed_params;
767 const int max_num_pos_args = num_fixed_params + num_opt_pos_params; 766 const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
768 767
769 __ movq(RCX, 768 __ movq(RCX,
770 FieldAddress(R10, ArgumentsDescriptor::positional_count_offset())); 769 FieldAddress(R10, ArgumentsDescriptor::positional_count_offset()));
771 // Check that min_num_pos_args <= num_pos_args. 770 // Check that min_num_pos_args <= num_pos_args.
772 Label wrong_num_arguments; 771 Label wrong_num_arguments;
773 __ CompareImmediate(RCX, Immediate(Smi::RawValue(min_num_pos_args)), PP); 772 __ CompareImmediate(RCX, Immediate(Smi::RawValue(min_num_pos_args)));
774 __ j(LESS, &wrong_num_arguments); 773 __ j(LESS, &wrong_num_arguments);
775 // Check that num_pos_args <= max_num_pos_args. 774 // Check that num_pos_args <= max_num_pos_args.
776 __ CompareImmediate(RCX, Immediate(Smi::RawValue(max_num_pos_args)), PP); 775 __ CompareImmediate(RCX, Immediate(Smi::RawValue(max_num_pos_args)));
777 __ j(GREATER, &wrong_num_arguments); 776 __ j(GREATER, &wrong_num_arguments);
778 777
779 // Copy positional arguments. 778 // Copy positional arguments.
780 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied 779 // Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
781 // to fp[kFirstLocalSlotFromFp - i]. 780 // to fp[kFirstLocalSlotFromFp - i].
782 781
783 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 782 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
784 // Since RBX and RCX are Smi, use TIMES_4 instead of TIMES_8. 783 // Since RBX and RCX are Smi, use TIMES_4 instead of TIMES_8.
785 // Let RBX point to the last passed positional argument, i.e. to 784 // Let RBX point to the last passed positional argument, i.e. to
786 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)]. 785 // fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
848 // Let RDI point to the entry of the first named argument. 847 // Let RDI point to the entry of the first named argument.
849 __ leaq(RDI, 848 __ leaq(RDI,
850 FieldAddress(R10, ArgumentsDescriptor::first_named_entry_offset())); 849 FieldAddress(R10, ArgumentsDescriptor::first_named_entry_offset()));
851 for (int i = 0; i < num_opt_named_params; i++) { 850 for (int i = 0; i < num_opt_named_params; i++) {
852 Label load_default_value, assign_optional_parameter; 851 Label load_default_value, assign_optional_parameter;
853 const int param_pos = opt_param_position[i]; 852 const int param_pos = opt_param_position[i];
854 // Check if this named parameter was passed in. 853 // Check if this named parameter was passed in.
855 // Load RAX with the name of the argument. 854 // Load RAX with the name of the argument.
856 __ movq(RAX, Address(RDI, ArgumentsDescriptor::name_offset())); 855 __ movq(RAX, Address(RDI, ArgumentsDescriptor::name_offset()));
857 ASSERT(opt_param[i]->name().IsSymbol()); 856 ASSERT(opt_param[i]->name().IsSymbol());
858 __ CompareObject(RAX, opt_param[i]->name(), PP); 857 __ CompareObject(RAX, opt_param[i]->name());
859 __ j(NOT_EQUAL, &load_default_value, Assembler::kNearJump); 858 __ j(NOT_EQUAL, &load_default_value, Assembler::kNearJump);
860 // Load RAX with passed-in argument at provided arg_pos, i.e. at 859 // Load RAX with passed-in argument at provided arg_pos, i.e. at
861 // fp[kParamEndSlotFromFp + num_args - arg_pos]. 860 // fp[kParamEndSlotFromFp + num_args - arg_pos].
862 __ movq(RAX, Address(RDI, ArgumentsDescriptor::position_offset())); 861 __ movq(RAX, Address(RDI, ArgumentsDescriptor::position_offset()));
863 // RAX is arg_pos as Smi. 862 // RAX is arg_pos as Smi.
864 // Point to next named entry. 863 // Point to next named entry.
865 __ AddImmediate( 864 __ AddImmediate(
866 RDI, Immediate(ArgumentsDescriptor::named_entry_size()), PP); 865 RDI, Immediate(ArgumentsDescriptor::named_entry_size()));
867 __ negq(RAX); 866 __ negq(RAX);
868 Address argument_addr(RBX, RAX, TIMES_4, 0); // RAX is a negative Smi. 867 Address argument_addr(RBX, RAX, TIMES_4, 0); // RAX is a negative Smi.
869 __ movq(RAX, argument_addr); 868 __ movq(RAX, argument_addr);
870 __ jmp(&assign_optional_parameter, Assembler::kNearJump); 869 __ jmp(&assign_optional_parameter, Assembler::kNearJump);
871 __ Bind(&load_default_value); 870 __ Bind(&load_default_value);
872 // Load RAX with default argument. 871 // Load RAX with default argument.
873 const Object& value = Object::ZoneHandle( 872 const Object& value = Object::ZoneHandle(
874 parsed_function().default_parameter_values().At( 873 parsed_function().default_parameter_values().At(
875 param_pos - num_fixed_params)); 874 param_pos - num_fixed_params));
876 __ LoadObject(RAX, value, PP); 875 __ LoadObject(RAX, value);
877 __ Bind(&assign_optional_parameter); 876 __ Bind(&assign_optional_parameter);
878 // Assign RAX to fp[kFirstLocalSlotFromFp - param_pos]. 877 // Assign RAX to fp[kFirstLocalSlotFromFp - param_pos].
879 // We do not use the final allocation index of the variable here, i.e. 878 // We do not use the final allocation index of the variable here, i.e.
880 // scope->VariableAt(i)->index(), because captured variables still need 879 // scope->VariableAt(i)->index(), because captured variables still need
881 // to be copied to the context that is not yet allocated. 880 // to be copied to the context that is not yet allocated.
882 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; 881 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
883 const Address param_addr(RBP, computed_param_pos * kWordSize); 882 const Address param_addr(RBP, computed_param_pos * kWordSize);
884 __ movq(param_addr, RAX); 883 __ movq(param_addr, RAX);
885 } 884 }
886 delete[] opt_param; 885 delete[] opt_param;
887 delete[] opt_param_position; 886 delete[] opt_param_position;
888 if (check_correct_named_args) { 887 if (check_correct_named_args) {
889 // Check that RDI now points to the null terminator in the arguments 888 // Check that RDI now points to the null terminator in the arguments
890 // descriptor. 889 // descriptor.
891 __ LoadObject(TMP, Object::null_object(), PP); 890 __ LoadObject(TMP, Object::null_object());
892 __ cmpq(Address(RDI, 0), TMP); 891 __ cmpq(Address(RDI, 0), TMP);
893 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump); 892 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
894 } 893 }
895 } else { 894 } else {
896 ASSERT(num_opt_pos_params > 0); 895 ASSERT(num_opt_pos_params > 0);
897 __ movq(RCX, 896 __ movq(RCX,
898 FieldAddress(R10, ArgumentsDescriptor::positional_count_offset())); 897 FieldAddress(R10, ArgumentsDescriptor::positional_count_offset()));
899 __ SmiUntag(RCX); 898 __ SmiUntag(RCX);
900 for (int i = 0; i < num_opt_pos_params; i++) { 899 for (int i = 0; i < num_opt_pos_params; i++) {
901 Label next_parameter; 900 Label next_parameter;
902 // Handle this optional positional parameter only if k or fewer positional 901 // Handle this optional positional parameter only if k or fewer positional
903 // arguments have been passed, where k is param_pos, the position of this 902 // arguments have been passed, where k is param_pos, the position of this
904 // optional parameter in the formal parameter list. 903 // optional parameter in the formal parameter list.
905 const int param_pos = num_fixed_params + i; 904 const int param_pos = num_fixed_params + i;
906 __ CompareImmediate(RCX, Immediate(param_pos), PP); 905 __ CompareImmediate(RCX, Immediate(param_pos));
907 __ j(GREATER, &next_parameter, Assembler::kNearJump); 906 __ j(GREATER, &next_parameter, Assembler::kNearJump);
908 // Load RAX with default argument. 907 // Load RAX with default argument.
909 const Object& value = Object::ZoneHandle( 908 const Object& value = Object::ZoneHandle(
910 parsed_function().default_parameter_values().At(i)); 909 parsed_function().default_parameter_values().At(i));
911 __ LoadObject(RAX, value, PP); 910 __ LoadObject(RAX, value);
912 // Assign RAX to fp[kFirstLocalSlotFromFp - param_pos]. 911 // Assign RAX to fp[kFirstLocalSlotFromFp - param_pos].
913 // We do not use the final allocation index of the variable here, i.e. 912 // We do not use the final allocation index of the variable here, i.e.
914 // scope->VariableAt(i)->index(), because captured variables still need 913 // scope->VariableAt(i)->index(), because captured variables still need
915 // to be copied to the context that is not yet allocated. 914 // to be copied to the context that is not yet allocated.
916 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos; 915 const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
917 const Address param_addr(RBP, computed_param_pos * kWordSize); 916 const Address param_addr(RBP, computed_param_pos * kWordSize);
918 __ movq(param_addr, RAX); 917 __ movq(param_addr, RAX);
919 __ Bind(&next_parameter); 918 __ Bind(&next_parameter);
920 } 919 }
921 if (check_correct_named_args) { 920 if (check_correct_named_args) {
922 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 921 __ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
923 __ SmiUntag(RBX); 922 __ SmiUntag(RBX);
924 // Check that RCX equals RBX, i.e. no named arguments passed. 923 // Check that RCX equals RBX, i.e. no named arguments passed.
925 __ cmpq(RCX, RBX); 924 __ cmpq(RCX, RBX);
926 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump); 925 __ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
927 } 926 }
928 } 927 }
929 928
930 __ Bind(&wrong_num_arguments); 929 __ Bind(&wrong_num_arguments);
931 if (function.IsClosureFunction()) { 930 if (function.IsClosureFunction()) {
931 ASSERT(assembler()->constant_pool_allowed());
932 __ LeaveDartFrame(); // The arguments are still on the stack. 932 __ LeaveDartFrame(); // The arguments are still on the stack.
933 ASSERT(!assembler()->constant_pool_allowed());
933 __ jmp(&StubCode::CallClosureNoSuchMethodLabel()); 934 __ jmp(&StubCode::CallClosureNoSuchMethodLabel());
935 __ set_constant_pool_allowed(true);
934 // The noSuchMethod call may return to the caller, but not here. 936 // The noSuchMethod call may return to the caller, but not here.
935 } else if (check_correct_named_args) { 937 } else if (check_correct_named_args) {
936 __ Stop("Wrong arguments"); 938 __ Stop("Wrong arguments");
937 } 939 }
938 940
939 __ Bind(&all_arguments_processed); 941 __ Bind(&all_arguments_processed);
940 // Nullify originally passed arguments only after they have been copied and 942 // Nullify originally passed arguments only after they have been copied and
941 // checked, otherwise noSuchMethod would not see their original values. 943 // checked, otherwise noSuchMethod would not see their original values.
942 // This step can be skipped in case we decide that formal parameters are 944 // This step can be skipped in case we decide that formal parameters are
943 // implicitly final, since garbage collecting the unmodified value is not 945 // implicitly final, since garbage collecting the unmodified value is not
944 // an issue anymore. 946 // an issue anymore.
945 947
946 // R10 : arguments descriptor array. 948 // R10 : arguments descriptor array.
947 __ movq(RCX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 949 __ movq(RCX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
948 __ SmiUntag(RCX); 950 __ SmiUntag(RCX);
949 __ LoadObject(R12, Object::null_object(), PP); 951 __ LoadObject(R12, Object::null_object());
950 Label null_args_loop, null_args_loop_condition; 952 Label null_args_loop, null_args_loop_condition;
951 __ jmp(&null_args_loop_condition, Assembler::kNearJump); 953 __ jmp(&null_args_loop_condition, Assembler::kNearJump);
952 const Address original_argument_addr( 954 const Address original_argument_addr(
953 RBP, RCX, TIMES_8, (kParamEndSlotFromFp + 1) * kWordSize); 955 RBP, RCX, TIMES_8, (kParamEndSlotFromFp + 1) * kWordSize);
954 __ Bind(&null_args_loop); 956 __ Bind(&null_args_loop);
955 __ movq(original_argument_addr, R12); 957 __ movq(original_argument_addr, R12);
956 __ Bind(&null_args_loop_condition); 958 __ Bind(&null_args_loop_condition);
957 __ decq(RCX); 959 __ decq(RCX);
958 __ j(POSITIVE, &null_args_loop, Assembler::kNearJump); 960 __ j(POSITIVE, &null_args_loop, Assembler::kNearJump);
959 } 961 }
(...skipping 12 matching lines...) Expand all
972 974
973 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 975 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
974 // TOS: return address. 976 // TOS: return address.
975 // +1 : value 977 // +1 : value
976 // +2 : receiver. 978 // +2 : receiver.
977 // Sequence node has one store node and one return NULL node. 979 // Sequence node has one store node and one return NULL node.
978 __ Comment("Inlined Setter"); 980 __ Comment("Inlined Setter");
979 __ movq(RAX, Address(RSP, 2 * kWordSize)); // Receiver. 981 __ movq(RAX, Address(RSP, 2 * kWordSize)); // Receiver.
980 __ movq(RBX, Address(RSP, 1 * kWordSize)); // Value. 982 __ movq(RBX, Address(RSP, 1 * kWordSize)); // Value.
981 __ StoreIntoObject(RAX, FieldAddress(RAX, offset), RBX); 983 __ StoreIntoObject(RAX, FieldAddress(RAX, offset), RBX);
982 __ LoadObject(RAX, Object::null_object(), PP); 984 __ LoadObject(RAX, Object::null_object());
983 __ ret(); 985 __ ret();
984 } 986 }
985 987
986 988
987 // NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc 989 // NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
988 // needs to be updated to match. 990 // needs to be updated to match.
989 void FlowGraphCompiler::EmitFrameEntry() { 991 void FlowGraphCompiler::EmitFrameEntry() {
990 ASSERT(Assembler::EntryPointToPcMarkerOffset() == 0); 992 ASSERT(Assembler::EntryPointToPcMarkerOffset() == 0);
991 993
992 const Function& function = parsed_function().function(); 994 const Function& function = parsed_function().function();
(...skipping 16 matching lines...) Expand all
1009 - flow_graph().num_stack_locals() 1011 - flow_graph().num_stack_locals()
1010 - flow_graph().num_copied_params(); 1012 - flow_graph().num_copied_params();
1011 ASSERT(extra_slots >= 0); 1013 ASSERT(extra_slots >= 0);
1012 __ EnterOsrFrame(extra_slots * kWordSize, new_pp, new_pc); 1014 __ EnterOsrFrame(extra_slots * kWordSize, new_pp, new_pc);
1013 } else { 1015 } else {
1014 if (CanOptimizeFunction() && 1016 if (CanOptimizeFunction() &&
1015 function.IsOptimizable() && 1017 function.IsOptimizable() &&
1016 (!is_optimizing() || may_reoptimize())) { 1018 (!is_optimizing() || may_reoptimize())) {
1017 const Register function_reg = RDI; 1019 const Register function_reg = RDI;
1018 // Load function object using the callee's pool pointer. 1020 // Load function object using the callee's pool pointer.
1019 __ LoadObject(function_reg, function, new_pp); 1021 __ LoadFunctionFromNewPool(function_reg, function, new_pp);
1020 1022
1021 // Patch point is after the eventually inlined function object. 1023 // Patch point is after the eventually inlined function object.
1022 entry_patch_pc_offset_ = assembler()->CodeSize(); 1024 entry_patch_pc_offset_ = assembler()->CodeSize();
1023 1025
1024 // Reoptimization of an optimized function is triggered by counting in 1026 // Reoptimization of an optimized function is triggered by counting in
1025 // IC stubs, but not at the entry of the function. 1027 // IC stubs, but not at the entry of the function.
1026 if (!is_optimizing()) { 1028 if (!is_optimizing()) {
1027 __ incl(FieldAddress(function_reg, Function::usage_counter_offset())); 1029 __ incl(FieldAddress(function_reg, Function::usage_counter_offset()));
1028 } 1030 }
1029 __ cmpl( 1031 __ cmpl(
(...skipping 12 matching lines...) Expand all
1042 } 1044 }
1043 } 1045 }
1044 1046
1045 1047
1046 void FlowGraphCompiler::CompileGraph() { 1048 void FlowGraphCompiler::CompileGraph() {
1047 InitCompiler(); 1049 InitCompiler();
1048 1050
1049 TryIntrinsify(); 1051 TryIntrinsify();
1050 1052
1051 EmitFrameEntry(); 1053 EmitFrameEntry();
1054 ASSERT(assembler()->constant_pool_allowed());
1052 1055
1053 const Function& function = parsed_function().function(); 1056 const Function& function = parsed_function().function();
1054 1057
1055 const int num_fixed_params = function.num_fixed_parameters(); 1058 const int num_fixed_params = function.num_fixed_parameters();
1056 const int num_copied_params = parsed_function().num_copied_params(); 1059 const int num_copied_params = parsed_function().num_copied_params();
1057 const int num_locals = parsed_function().num_stack_locals(); 1060 const int num_locals = parsed_function().num_stack_locals();
1058 1061
1059 // We check the number of passed arguments when we have to copy them due to 1062 // We check the number of passed arguments when we have to copy them due to
1060 // the presence of optional parameters. 1063 // the presence of optional parameters.
1061 // No such checking code is generated if only fixed parameters are declared, 1064 // No such checking code is generated if only fixed parameters are declared,
1062 // unless we are in debug mode or unless we are compiling a closure. 1065 // unless we are in debug mode or unless we are compiling a closure.
1063 if (num_copied_params == 0) { 1066 if (num_copied_params == 0) {
1064 #ifdef DEBUG 1067 #ifdef DEBUG
1065 ASSERT(!parsed_function().function().HasOptionalParameters()); 1068 ASSERT(!parsed_function().function().HasOptionalParameters());
1066 const bool check_arguments = !flow_graph().IsCompiledForOsr(); 1069 const bool check_arguments = !flow_graph().IsCompiledForOsr();
1067 #else 1070 #else
1068 const bool check_arguments = 1071 const bool check_arguments =
1069 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr(); 1072 function.IsClosureFunction() && !flow_graph().IsCompiledForOsr();
1070 #endif 1073 #endif
1071 if (check_arguments) { 1074 if (check_arguments) {
1072 __ Comment("Check argument count"); 1075 __ Comment("Check argument count");
1073 // Check that exactly num_fixed arguments are passed in. 1076 // Check that exactly num_fixed arguments are passed in.
1074 Label correct_num_arguments, wrong_num_arguments; 1077 Label correct_num_arguments, wrong_num_arguments;
1075 __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset())); 1078 __ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
1076 __ CompareImmediate(RAX, Immediate(Smi::RawValue(num_fixed_params)), PP); 1079 __ CompareImmediate(RAX, Immediate(Smi::RawValue(num_fixed_params)));
1077 __ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump); 1080 __ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump);
1078 __ cmpq(RAX, 1081 __ cmpq(RAX,
1079 FieldAddress(R10, 1082 FieldAddress(R10,
1080 ArgumentsDescriptor::positional_count_offset())); 1083 ArgumentsDescriptor::positional_count_offset()));
1081 __ j(EQUAL, &correct_num_arguments, Assembler::kNearJump); 1084 __ j(EQUAL, &correct_num_arguments, Assembler::kNearJump);
1082 1085
1083 __ Bind(&wrong_num_arguments); 1086 __ Bind(&wrong_num_arguments);
1084 if (function.IsClosureFunction()) { 1087 if (function.IsClosureFunction()) {
1088 ASSERT(assembler()->constant_pool_allowed());
1085 __ LeaveDartFrame(); // The arguments are still on the stack. 1089 __ LeaveDartFrame(); // The arguments are still on the stack.
1090 ASSERT(!assembler()->constant_pool_allowed());
1086 __ jmp(&StubCode::CallClosureNoSuchMethodLabel()); 1091 __ jmp(&StubCode::CallClosureNoSuchMethodLabel());
1092 __ set_constant_pool_allowed(true);
1087 // The noSuchMethod call may return to the caller, but not here. 1093 // The noSuchMethod call may return to the caller, but not here.
1088 } else { 1094 } else {
1089 __ Stop("Wrong number of arguments"); 1095 __ Stop("Wrong number of arguments");
1090 } 1096 }
1091 __ Bind(&correct_num_arguments); 1097 __ Bind(&correct_num_arguments);
1092 } 1098 }
1093 } else if (!flow_graph().IsCompiledForOsr()) { 1099 } else if (!flow_graph().IsCompiledForOsr()) {
1094 CopyParameters(); 1100 CopyParameters();
1095 } 1101 }
1096 1102
(...skipping 15 matching lines...) Expand all
1112 1118
1113 // In unoptimized code, initialize (non-argument) stack allocated slots to 1119 // In unoptimized code, initialize (non-argument) stack allocated slots to
1114 // null. 1120 // null.
1115 if (!is_optimizing()) { 1121 if (!is_optimizing()) {
1116 ASSERT(num_locals > 0); // There is always at least context_var. 1122 ASSERT(num_locals > 0); // There is always at least context_var.
1117 __ Comment("Initialize spill slots"); 1123 __ Comment("Initialize spill slots");
1118 const intptr_t slot_base = parsed_function().first_stack_local_index(); 1124 const intptr_t slot_base = parsed_function().first_stack_local_index();
1119 const intptr_t context_index = 1125 const intptr_t context_index =
1120 parsed_function().current_context_var()->index(); 1126 parsed_function().current_context_var()->index();
1121 if (num_locals > 1) { 1127 if (num_locals > 1) {
1122 __ LoadObject(RAX, Object::null_object(), PP); 1128 __ LoadObject(RAX, Object::null_object());
1123 } 1129 }
1124 for (intptr_t i = 0; i < num_locals; ++i) { 1130 for (intptr_t i = 0; i < num_locals; ++i) {
1125 // Subtract index i (locals lie at lower addresses than RBP). 1131 // Subtract index i (locals lie at lower addresses than RBP).
1126 if (((slot_base - i) == context_index)) { 1132 if (((slot_base - i) == context_index)) {
1127 if (function.IsClosureFunction()) { 1133 if (function.IsClosureFunction()) {
1128 __ movq(Address(RBP, (slot_base - i) * kWordSize), CTX); 1134 __ movq(Address(RBP, (slot_base - i) * kWordSize), CTX);
1129 } else { 1135 } else {
1130 const Context& empty_context = Context::ZoneHandle( 1136 const Context& empty_context = Context::ZoneHandle(
1131 zone(), isolate()->object_store()->empty_context()); 1137 zone(), isolate()->object_store()->empty_context());
1132 __ StoreObject( 1138 __ StoreObject(
1133 Address(RBP, (slot_base - i) * kWordSize), empty_context, PP); 1139 Address(RBP, (slot_base - i) * kWordSize), empty_context);
1134 } 1140 }
1135 } else { 1141 } else {
1136 ASSERT(num_locals > 1); 1142 ASSERT(num_locals > 1);
1137 __ movq(Address(RBP, (slot_base - i) * kWordSize), RAX); 1143 __ movq(Address(RBP, (slot_base - i) * kWordSize), RAX);
1138 } 1144 }
1139 } 1145 }
1140 } 1146 }
1141 1147
1142 ASSERT(!block_order().is_empty()); 1148 ASSERT(!block_order().is_empty());
1143 VisitBlocks(); 1149 VisitBlocks();
1144 1150
1145 __ int3(); 1151 __ int3();
1152 ASSERT(assembler()->constant_pool_allowed());
1146 GenerateDeferredCode(); 1153 GenerateDeferredCode();
1147 // Emit function patching code. This will be swapped with the first 13 bytes 1154 // Emit function patching code. This will be swapped with the first 13 bytes
1148 // at entry point. 1155 // at entry point.
1149 patch_code_pc_offset_ = assembler()->CodeSize(); 1156 patch_code_pc_offset_ = assembler()->CodeSize();
1150 // This is patched up to a point in FrameEntry where the PP for the 1157 // This is patched up to a point in FrameEntry where the PP for the
1151 // current function is in R13 instead of PP. 1158 // current function is in R13 instead of PP.
1152 __ JmpPatchable(&StubCode::FixCallersTargetLabel(), R13); 1159 __ JmpPatchable(&StubCode::FixCallersTargetLabel(), R13);
1153 1160
1154 if (is_optimizing()) { 1161 if (is_optimizing()) {
1155 lazy_deopt_pc_offset_ = assembler()->CodeSize(); 1162 lazy_deopt_pc_offset_ = assembler()->CodeSize();
1156 __ Jmp(&StubCode::DeoptimizeLazyLabel(), PP); 1163 __ Jmp(&StubCode::DeoptimizeLazyLabel(), PP);
1157 } 1164 }
1158 } 1165 }
1159 1166
1160 1167
1161 void FlowGraphCompiler::GenerateCall(intptr_t token_pos, 1168 void FlowGraphCompiler::GenerateCall(intptr_t token_pos,
1162 const ExternalLabel* label, 1169 const ExternalLabel* label,
1163 RawPcDescriptors::Kind kind, 1170 RawPcDescriptors::Kind kind,
1164 LocationSummary* locs) { 1171 LocationSummary* locs) {
1165 __ Call(label, PP); 1172 __ Call(label);
1166 AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos); 1173 AddCurrentDescriptor(kind, Isolate::kNoDeoptId, token_pos);
1167 RecordSafepoint(locs); 1174 RecordSafepoint(locs);
1168 } 1175 }
1169 1176
1170 1177
1171 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id, 1178 void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
1172 intptr_t token_pos, 1179 intptr_t token_pos,
1173 const ExternalLabel* label, 1180 const ExternalLabel* label,
1174 RawPcDescriptors::Kind kind, 1181 RawPcDescriptors::Kind kind,
1175 LocationSummary* locs) { 1182 LocationSummary* locs) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1214 1221
1215 void FlowGraphCompiler::EmitUnoptimizedStaticCall( 1222 void FlowGraphCompiler::EmitUnoptimizedStaticCall(
1216 intptr_t argument_count, 1223 intptr_t argument_count,
1217 intptr_t deopt_id, 1224 intptr_t deopt_id,
1218 intptr_t token_pos, 1225 intptr_t token_pos,
1219 LocationSummary* locs, 1226 LocationSummary* locs,
1220 const ICData& ic_data) { 1227 const ICData& ic_data) {
1221 const uword label_address = 1228 const uword label_address =
1222 StubCode::UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested()); 1229 StubCode::UnoptimizedStaticCallEntryPoint(ic_data.NumArgsTested());
1223 ExternalLabel target_label(label_address); 1230 ExternalLabel target_label(label_address);
1224 __ LoadObject(RBX, ic_data, PP); 1231 __ LoadObject(RBX, ic_data);
1225 GenerateDartCall(deopt_id, 1232 GenerateDartCall(deopt_id,
1226 token_pos, 1233 token_pos,
1227 &target_label, 1234 &target_label,
1228 RawPcDescriptors::kUnoptStaticCall, 1235 RawPcDescriptors::kUnoptStaticCall,
1229 locs); 1236 locs);
1230 __ Drop(argument_count, RCX); 1237 __ Drop(argument_count, RCX);
1231 } 1238 }
1232 1239
1233 1240
1234 void FlowGraphCompiler::EmitEdgeCounter() { 1241 void FlowGraphCompiler::EmitEdgeCounter() {
1235 // We do not check for overflow when incrementing the edge counter. The 1242 // We do not check for overflow when incrementing the edge counter. The
1236 // function should normally be optimized long before the counter can 1243 // function should normally be optimized long before the counter can
1237 // overflow; and though we do not reset the counters when we optimize or 1244 // overflow; and though we do not reset the counters when we optimize or
1238 // deoptimize, there is a bound on the number of 1245 // deoptimize, there is a bound on the number of
1239 // optimization/deoptimization cycles we will attempt. 1246 // optimization/deoptimization cycles we will attempt.
1247 ASSERT(assembler_->constant_pool_allowed());
1240 const Array& counter = Array::ZoneHandle(Array::New(1, Heap::kOld)); 1248 const Array& counter = Array::ZoneHandle(Array::New(1, Heap::kOld));
1241 counter.SetAt(0, Smi::Handle(Smi::New(0))); 1249 counter.SetAt(0, Smi::Handle(Smi::New(0)));
1242 __ Comment("Edge counter"); 1250 __ Comment("Edge counter");
1243 __ LoadUniqueObject(RAX, counter, PP); 1251 __ LoadUniqueObject(RAX, counter);
1244 intptr_t increment_start = assembler_->CodeSize(); 1252 intptr_t increment_start = assembler_->CodeSize();
1245 __ IncrementSmiField(FieldAddress(RAX, Array::element_offset(0)), 1); 1253 __ IncrementSmiField(FieldAddress(RAX, Array::element_offset(0)), 1);
1246 int32_t size = assembler_->CodeSize() - increment_start; 1254 int32_t size = assembler_->CodeSize() - increment_start;
1247 if (isolate()->edge_counter_increment_size() == -1) { 1255 if (isolate()->edge_counter_increment_size() == -1) {
1248 isolate()->set_edge_counter_increment_size(size); 1256 isolate()->set_edge_counter_increment_size(size);
1249 } else { 1257 } else {
1250 ASSERT(size == isolate()->edge_counter_increment_size()); 1258 ASSERT(size == isolate()->edge_counter_increment_size());
1251 } 1259 }
1252 } 1260 }
1253 1261
(...skipping 12 matching lines...) Expand all
1266 intptr_t deopt_id, 1274 intptr_t deopt_id,
1267 intptr_t token_pos, 1275 intptr_t token_pos,
1268 LocationSummary* locs) { 1276 LocationSummary* locs) {
1269 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1277 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1270 // Each ICData propagated from unoptimized to optimized code contains the 1278 // Each ICData propagated from unoptimized to optimized code contains the
1271 // function that corresponds to the Dart function of that IC call. Due 1279 // function that corresponds to the Dart function of that IC call. Due
1272 // to inlining in optimized code, that function may not correspond to the 1280 // to inlining in optimized code, that function may not correspond to the
1273 // top-level function (parsed_function().function()) which could be 1281 // top-level function (parsed_function().function()) which could be
1274 // reoptimized and which counter needs to be incremented. 1282 // reoptimized and which counter needs to be incremented.
1275 // Pass the function explicitly, it is used in IC stub. 1283 // Pass the function explicitly, it is used in IC stub.
1276 __ LoadObject(RDI, parsed_function().function(), PP); 1284 __ LoadObject(RDI, parsed_function().function());
1277 __ LoadUniqueObject(RBX, ic_data, PP); 1285 __ LoadUniqueObject(RBX, ic_data);
1278 GenerateDartCall(deopt_id, 1286 GenerateDartCall(deopt_id,
1279 token_pos, 1287 token_pos,
1280 target_label, 1288 target_label,
1281 RawPcDescriptors::kIcCall, 1289 RawPcDescriptors::kIcCall,
1282 locs); 1290 locs);
1283 __ Drop(argument_count, RCX); 1291 __ Drop(argument_count, RCX);
1284 } 1292 }
1285 1293
1286 1294
1287 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label, 1295 void FlowGraphCompiler::EmitInstanceCall(ExternalLabel* target_label,
1288 const ICData& ic_data, 1296 const ICData& ic_data,
1289 intptr_t argument_count, 1297 intptr_t argument_count,
1290 intptr_t deopt_id, 1298 intptr_t deopt_id,
1291 intptr_t token_pos, 1299 intptr_t token_pos,
1292 LocationSummary* locs) { 1300 LocationSummary* locs) {
1293 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0); 1301 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
1294 __ LoadUniqueObject(RBX, ic_data, PP); 1302 __ LoadUniqueObject(RBX, ic_data);
1295 GenerateDartCall(deopt_id, 1303 GenerateDartCall(deopt_id,
1296 token_pos, 1304 token_pos,
1297 target_label, 1305 target_label,
1298 RawPcDescriptors::kIcCall, 1306 RawPcDescriptors::kIcCall,
1299 locs); 1307 locs);
1300 __ Drop(argument_count, RCX); 1308 __ Drop(argument_count, RCX);
1301 } 1309 }
1302 1310
1303 1311
1304 void FlowGraphCompiler::EmitMegamorphicInstanceCall( 1312 void FlowGraphCompiler::EmitMegamorphicInstanceCall(
1305 const ICData& ic_data, 1313 const ICData& ic_data,
1306 intptr_t argument_count, 1314 intptr_t argument_count,
1307 intptr_t deopt_id, 1315 intptr_t deopt_id,
1308 intptr_t token_pos, 1316 intptr_t token_pos,
1309 LocationSummary* locs) { 1317 LocationSummary* locs) {
1310 MegamorphicCacheTable* table = isolate()->megamorphic_cache_table(); 1318 MegamorphicCacheTable* table = isolate()->megamorphic_cache_table();
1311 const String& name = String::Handle(ic_data.target_name()); 1319 const String& name = String::Handle(ic_data.target_name());
1312 const Array& arguments_descriptor = 1320 const Array& arguments_descriptor =
1313 Array::ZoneHandle(ic_data.arguments_descriptor()); 1321 Array::ZoneHandle(ic_data.arguments_descriptor());
1314 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0)); 1322 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
1315 const MegamorphicCache& cache = 1323 const MegamorphicCache& cache =
1316 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor)); 1324 MegamorphicCache::ZoneHandle(table->Lookup(name, arguments_descriptor));
1317 const Register receiverR = RDI; 1325 const Register receiverR = RDI;
1318 const Register cacheR = RBX; 1326 const Register cacheR = RBX;
1319 const Register targetR = RCX; 1327 const Register targetR = RCX;
1320 __ movq(receiverR, Address(RSP, (argument_count - 1) * kWordSize)); 1328 __ movq(receiverR, Address(RSP, (argument_count - 1) * kWordSize));
1321 __ LoadObject(cacheR, cache, PP); 1329 __ LoadObject(cacheR, cache);
1322 1330
1323 if (FLAG_use_megamorphic_stub) { 1331 if (FLAG_use_megamorphic_stub) {
1324 __ call(&StubCode::MegamorphicLookupLabel()); 1332 __ call(&StubCode::MegamorphicLookupLabel());
1325 } else { 1333 } else {
1326 StubCode::EmitMegamorphicLookup(assembler(), receiverR, cacheR, targetR); 1334 StubCode::EmitMegamorphicLookup(assembler(), receiverR, cacheR, targetR);
1327 } 1335 }
1328 __ LoadObject(RBX, ic_data, PP); 1336 __ LoadObject(RBX, ic_data);
1329 __ LoadObject(R10, arguments_descriptor, PP); 1337 __ LoadObject(R10, arguments_descriptor);
1330 __ call(targetR); 1338 __ call(targetR);
1331 AddCurrentDescriptor(RawPcDescriptors::kOther, 1339 AddCurrentDescriptor(RawPcDescriptors::kOther,
1332 Isolate::kNoDeoptId, token_pos); 1340 Isolate::kNoDeoptId, token_pos);
1333 RecordSafepoint(locs); 1341 RecordSafepoint(locs);
1334 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id); 1342 const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id);
1335 if (is_optimizing()) { 1343 if (is_optimizing()) {
1336 AddDeoptIndexAtCall(deopt_id_after, token_pos); 1344 AddDeoptIndexAtCall(deopt_id_after, token_pos);
1337 } else { 1345 } else {
1338 // Add deoptimization continuation point after the call and before the 1346 // Add deoptimization continuation point after the call and before the
1339 // arguments are removed. 1347 // arguments are removed.
1340 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos); 1348 AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
1341 } 1349 }
1342 __ Drop(argument_count, RCX); 1350 __ Drop(argument_count, RCX);
1343 } 1351 }
1344 1352
1345 1353
1346 void FlowGraphCompiler::EmitOptimizedStaticCall( 1354 void FlowGraphCompiler::EmitOptimizedStaticCall(
1347 const Function& function, 1355 const Function& function,
1348 const Array& arguments_descriptor, 1356 const Array& arguments_descriptor,
1349 intptr_t argument_count, 1357 intptr_t argument_count,
1350 intptr_t deopt_id, 1358 intptr_t deopt_id,
1351 intptr_t token_pos, 1359 intptr_t token_pos,
1352 LocationSummary* locs) { 1360 LocationSummary* locs) {
1353 __ LoadObject(R10, arguments_descriptor, PP); 1361 __ LoadObject(R10, arguments_descriptor);
1354 // Do not use the code from the function, but let the code be patched so that 1362 // Do not use the code from the function, but let the code be patched so that
1355 // we can record the outgoing edges to other code. 1363 // we can record the outgoing edges to other code.
1356 GenerateDartCall(deopt_id, 1364 GenerateDartCall(deopt_id,
1357 token_pos, 1365 token_pos,
1358 &StubCode::CallStaticFunctionLabel(), 1366 &StubCode::CallStaticFunctionLabel(),
1359 RawPcDescriptors::kOther, 1367 RawPcDescriptors::kOther,
1360 locs); 1368 locs);
1361 AddStaticCallTarget(function); 1369 AddStaticCallTarget(function);
1362 __ Drop(argument_count, RCX); 1370 __ Drop(argument_count, RCX);
1363 } 1371 }
1364 1372
1365 1373
1366 Condition FlowGraphCompiler::EmitEqualityRegConstCompare( 1374 Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
1367 Register reg, 1375 Register reg,
1368 const Object& obj, 1376 const Object& obj,
1369 bool needs_number_check, 1377 bool needs_number_check,
1370 intptr_t token_pos) { 1378 intptr_t token_pos) {
1371 ASSERT(!needs_number_check || 1379 ASSERT(!needs_number_check ||
1372 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint())); 1380 (!obj.IsMint() && !obj.IsDouble() && !obj.IsBigint()));
1373 1381
1374 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) { 1382 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
1375 ASSERT(!needs_number_check); 1383 ASSERT(!needs_number_check);
1376 __ testq(reg, reg); 1384 __ testq(reg, reg);
1377 return EQUAL; 1385 return EQUAL;
1378 } 1386 }
1379 1387
1380 if (needs_number_check) { 1388 if (needs_number_check) {
1381 __ pushq(reg); 1389 __ pushq(reg);
1382 __ PushObject(obj, PP); 1390 __ PushObject(obj);
1383 if (is_optimizing()) { 1391 if (is_optimizing()) {
1384 __ CallPatchable(&StubCode::OptimizedIdenticalWithNumberCheckLabel()); 1392 __ CallPatchable(&StubCode::OptimizedIdenticalWithNumberCheckLabel());
1385 } else { 1393 } else {
1386 __ CallPatchable(&StubCode::UnoptimizedIdenticalWithNumberCheckLabel()); 1394 __ CallPatchable(&StubCode::UnoptimizedIdenticalWithNumberCheckLabel());
1387 } 1395 }
1388 if (token_pos != Scanner::kNoSourcePos) { 1396 if (token_pos != Scanner::kNoSourcePos) {
1389 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, 1397 AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall,
1390 Isolate::kNoDeoptId, 1398 Isolate::kNoDeoptId,
1391 token_pos); 1399 token_pos);
1392 } 1400 }
1393 // Stub returns result in flags (result of a cmpq, we need ZF computed). 1401 // Stub returns result in flags (result of a cmpq, we need ZF computed).
1394 __ popq(reg); // Discard constant. 1402 __ popq(reg); // Discard constant.
1395 __ popq(reg); // Restore 'reg'. 1403 __ popq(reg); // Restore 'reg'.
1396 } else { 1404 } else {
1397 __ CompareObject(reg, obj, PP); 1405 __ CompareObject(reg, obj);
1398 } 1406 }
1399 return EQUAL; 1407 return EQUAL;
1400 } 1408 }
1401 1409
1402 1410
1403 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left, 1411 Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
1404 Register right, 1412 Register right,
1405 bool needs_number_check, 1413 bool needs_number_check,
1406 intptr_t token_pos) { 1414 intptr_t token_pos) {
1407 if (needs_number_check) { 1415 if (needs_number_check) {
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
1472 LocationSummary* locs) { 1480 LocationSummary* locs) {
1473 ASSERT(is_optimizing()); 1481 ASSERT(is_optimizing());
1474 1482
1475 __ Comment("EmitTestAndCall"); 1483 __ Comment("EmitTestAndCall");
1476 const Array& arguments_descriptor = 1484 const Array& arguments_descriptor =
1477 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, 1485 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
1478 argument_names)); 1486 argument_names));
1479 // Load receiver into RAX. 1487 // Load receiver into RAX.
1480 __ movq(RAX, 1488 __ movq(RAX,
1481 Address(RSP, (argument_count - 1) * kWordSize)); 1489 Address(RSP, (argument_count - 1) * kWordSize));
1482 __ LoadObject(R10, arguments_descriptor, PP); 1490 __ LoadObject(R10, arguments_descriptor);
1483 1491
1484 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid; 1492 const bool kFirstCheckIsSmi = ic_data.GetReceiverClassIdAt(0) == kSmiCid;
1485 const intptr_t kNumChecks = ic_data.NumberOfChecks(); 1493 const intptr_t kNumChecks = ic_data.NumberOfChecks();
1486 1494
1487 ASSERT(!ic_data.IsNull() && (kNumChecks > 0)); 1495 ASSERT(!ic_data.IsNull() && (kNumChecks > 0));
1488 1496
1489 Label after_smi_test; 1497 Label after_smi_test;
1490 __ testq(RAX, Immediate(kSmiTagMask)); 1498 __ testq(RAX, Immediate(kSmiTagMask));
1491 if (kFirstCheckIsSmi) { 1499 if (kFirstCheckIsSmi) {
1492 // Jump if receiver is not Smi. 1500 // Jump if receiver is not Smi.
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
1610 } else { 1618 } else {
1611 ASSERT(source.IsConstant()); 1619 ASSERT(source.IsConstant());
1612 const Object& constant = source.constant(); 1620 const Object& constant = source.constant();
1613 if (destination.IsRegister()) { 1621 if (destination.IsRegister()) {
1614 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1622 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
1615 __ xorq(destination.reg(), destination.reg()); 1623 __ xorq(destination.reg(), destination.reg());
1616 } else if (constant.IsSmi() && 1624 } else if (constant.IsSmi() &&
1617 (source.constant_instruction()->representation() == kUnboxedInt32)) { 1625 (source.constant_instruction()->representation() == kUnboxedInt32)) {
1618 __ movl(destination.reg(), Immediate(Smi::Cast(constant).Value())); 1626 __ movl(destination.reg(), Immediate(Smi::Cast(constant).Value()));
1619 } else { 1627 } else {
1620 __ LoadObject(destination.reg(), constant, PP); 1628 __ LoadObject(destination.reg(), constant);
1621 } 1629 }
1622 } else if (destination.IsFpuRegister()) { 1630 } else if (destination.IsFpuRegister()) {
1623 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { 1631 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) {
1624 __ xorps(destination.fpu_reg(), destination.fpu_reg()); 1632 __ xorps(destination.fpu_reg(), destination.fpu_reg());
1625 } else { 1633 } else {
1626 __ LoadObject(TMP, constant, PP); 1634 __ LoadObject(TMP, constant);
1627 __ movsd(destination.fpu_reg(), 1635 __ movsd(destination.fpu_reg(),
1628 FieldAddress(TMP, Double::value_offset())); 1636 FieldAddress(TMP, Double::value_offset()));
1629 } 1637 }
1630 } else if (destination.IsDoubleStackSlot()) { 1638 } else if (destination.IsDoubleStackSlot()) {
1631 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) { 1639 if (Utils::DoublesBitEqual(Double::Cast(constant).value(), 0.0)) {
1632 __ xorps(XMM0, XMM0); 1640 __ xorps(XMM0, XMM0);
1633 } else { 1641 } else {
1634 __ LoadObject(TMP, constant, PP); 1642 __ LoadObject(TMP, constant);
1635 __ movsd(XMM0, FieldAddress(TMP, Double::value_offset())); 1643 __ movsd(XMM0, FieldAddress(TMP, Double::value_offset()));
1636 } 1644 }
1637 __ movsd(destination.ToStackSlotAddress(), XMM0); 1645 __ movsd(destination.ToStackSlotAddress(), XMM0);
1638 } else { 1646 } else {
1639 ASSERT(destination.IsStackSlot()); 1647 ASSERT(destination.IsStackSlot());
1640 if (constant.IsSmi() && 1648 if (constant.IsSmi() &&
1641 (source.constant_instruction()->representation() == kUnboxedInt32)) { 1649 (source.constant_instruction()->representation() == kUnboxedInt32)) {
1642 __ movl(destination.ToStackSlotAddress(), 1650 __ movl(destination.ToStackSlotAddress(),
1643 Immediate(Smi::Cast(constant).Value())); 1651 Immediate(Smi::Cast(constant).Value()));
1644 } else { 1652 } else {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
1729 } 1737 }
1730 1738
1731 1739
1732 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst, 1740 void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
1733 const Address& src) { 1741 const Address& src) {
1734 __ MoveMemoryToMemory(dst, src); 1742 __ MoveMemoryToMemory(dst, src);
1735 } 1743 }
1736 1744
1737 1745
1738 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) { 1746 void ParallelMoveResolver::StoreObject(const Address& dst, const Object& obj) {
1739 __ StoreObject(dst, obj, PP); 1747 __ StoreObject(dst, obj);
1740 } 1748 }
1741 1749
1742 1750
1743 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) { 1751 void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
1744 __ Exchange(reg, mem); 1752 __ Exchange(reg, mem);
1745 } 1753 }
1746 1754
1747 1755
1748 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { 1756 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
1749 __ Exchange(mem1, mem2); 1757 __ Exchange(mem1, mem2);
(...skipping 19 matching lines...) Expand all
1769 __ pushq(reg); 1777 __ pushq(reg);
1770 } 1778 }
1771 1779
1772 1780
1773 void ParallelMoveResolver::RestoreScratch(Register reg) { 1781 void ParallelMoveResolver::RestoreScratch(Register reg) {
1774 __ popq(reg); 1782 __ popq(reg);
1775 } 1783 }
1776 1784
1777 1785
1778 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) { 1786 void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
1779 __ AddImmediate(RSP, Immediate(-kFpuRegisterSize), PP); 1787 __ AddImmediate(RSP, Immediate(-kFpuRegisterSize));
1780 __ movups(Address(RSP, 0), reg); 1788 __ movups(Address(RSP, 0), reg);
1781 } 1789 }
1782 1790
1783 1791
1784 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) { 1792 void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
1785 __ movups(reg, Address(RSP, 0)); 1793 __ movups(reg, Address(RSP, 0));
1786 __ AddImmediate(RSP, Immediate(kFpuRegisterSize), PP); 1794 __ AddImmediate(RSP, Immediate(kFpuRegisterSize));
1787 } 1795 }
1788 1796
1789 1797
1790 #undef __ 1798 #undef __
1791 1799
1792 } // namespace dart 1800 } // namespace dart
1793 1801
1794 #endif // defined TARGET_ARCH_X64 1802 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698