Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Side by Side Diff: runtime/vm/flow_graph_compiler_x64.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/ast_printer.h" 11 #include "vm/ast_printer.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/il_printer.h" 13 #include "vm/il_printer.h"
14 #include "vm/locations.h" 14 #include "vm/locations.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/parser.h" 16 #include "vm/parser.h"
17 #include "vm/stub_code.h" 17 #include "vm/stub_code.h"
18 #include "vm/symbols.h" 18 #include "vm/symbols.h"
19 19
20 namespace dart { 20 namespace dart {
21 21
22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); 22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
23 DECLARE_FLAG(int, optimization_counter_threshold); 23 DECLARE_FLAG(int, optimization_counter_threshold);
24 DECLARE_FLAG(bool, print_ast); 24 DECLARE_FLAG(bool, print_ast);
25 DECLARE_FLAG(bool, print_scopes); 25 DECLARE_FLAG(bool, print_scopes);
26 DECLARE_FLAG(bool, use_sse41); 26 DECLARE_FLAG(bool, use_sse41);
27 27
28 28
29 FlowGraphCompiler::~FlowGraphCompiler() {
30 // BlockInfos are zone-allocated, so their destructors are not called.
31 // Verify the labels explicitly here.
32 for (int i = 0; i < block_info_.length(); ++i) {
33 ASSERT(!block_info_[i]->label.IsLinked());
34 ASSERT(!block_info_[i]->label.HasNear());
35 }
36 }
37
38
29 bool FlowGraphCompiler::SupportsUnboxedMints() { 39 bool FlowGraphCompiler::SupportsUnboxedMints() {
30 return false; 40 return false;
31 } 41 }
32 42
33 43
34 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 44 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
35 intptr_t stub_ix) { 45 intptr_t stub_ix) {
36 // Calls do not need stubs, they share a deoptimization trampoline. 46 // Calls do not need stubs, they share a deoptimization trampoline.
37 ASSERT(reason() != kDeoptAtCall); 47 ASSERT(reason() != kDeoptAtCall);
38 Assembler* assem = compiler->assembler(); 48 Assembler* assem = compiler->assembler();
(...skipping 11 matching lines...) Expand all
50 } 60 }
51 61
52 62
53 #define __ assembler()-> 63 #define __ assembler()->
54 64
55 65
56 // Fall through if bool_register contains null. 66 // Fall through if bool_register contains null.
57 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 67 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
58 Label* is_true, 68 Label* is_true,
59 Label* is_false) { 69 Label* is_false) {
60 const Immediate raw_null = 70 const Immediate& raw_null =
61 Immediate(reinterpret_cast<intptr_t>(Object::null())); 71 Immediate(reinterpret_cast<intptr_t>(Object::null()));
62 Label fall_through; 72 Label fall_through;
63 __ cmpq(bool_register, raw_null); 73 __ cmpq(bool_register, raw_null);
64 __ j(EQUAL, &fall_through, Assembler::kNearJump); 74 __ j(EQUAL, &fall_through, Assembler::kNearJump);
65 __ CompareObject(bool_register, Bool::True()); 75 __ CompareObject(bool_register, Bool::True());
66 __ j(EQUAL, is_true); 76 __ j(EQUAL, is_true);
67 __ jmp(is_false); 77 __ jmp(is_false);
68 __ Bind(&fall_through); 78 __ Bind(&fall_through);
69 } 79 }
70 80
71 81
72 // Clobbers RCX. 82 // Clobbers RCX.
73 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 83 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
74 TypeTestStubKind test_kind, 84 TypeTestStubKind test_kind,
75 Register instance_reg, 85 Register instance_reg,
76 Register type_arguments_reg, 86 Register type_arguments_reg,
77 Register temp_reg, 87 Register temp_reg,
78 Label* is_instance_lbl, 88 Label* is_instance_lbl,
79 Label* is_not_instance_lbl) { 89 Label* is_not_instance_lbl) {
80 const SubtypeTestCache& type_test_cache = 90 const SubtypeTestCache& type_test_cache =
81 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 91 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
82 const Immediate raw_null = 92 const Immediate& raw_null =
83 Immediate(reinterpret_cast<intptr_t>(Object::null())); 93 Immediate(reinterpret_cast<intptr_t>(Object::null()));
84 __ LoadObject(temp_reg, type_test_cache); 94 __ LoadObject(temp_reg, type_test_cache);
85 __ pushq(temp_reg); // Subtype test cache. 95 __ pushq(temp_reg); // Subtype test cache.
86 __ pushq(instance_reg); // Instance. 96 __ pushq(instance_reg); // Instance.
87 if (test_kind == kTestTypeOneArg) { 97 if (test_kind == kTestTypeOneArg) {
88 ASSERT(type_arguments_reg == kNoRegister); 98 ASSERT(type_arguments_reg == kNoRegister);
89 __ pushq(raw_null); 99 __ pushq(raw_null);
90 __ call(&StubCode::Subtype1TestCacheLabel()); 100 __ call(&StubCode::Subtype1TestCacheLabel());
91 } else if (test_kind == kTestTypeTwoArgs) { 101 } else if (test_kind == kTestTypeTwoArgs) {
92 ASSERT(type_arguments_reg == kNoRegister); 102 ASSERT(type_arguments_reg == kNoRegister);
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 // Bool interface can be implemented only by core class Bool. 230 // Bool interface can be implemented only by core class Bool.
221 // (see ClassFinalizer::ResolveInterfaces for list of restricted interfaces). 231 // (see ClassFinalizer::ResolveInterfaces for list of restricted interfaces).
222 if (type.IsBoolType()) { 232 if (type.IsBoolType()) {
223 __ cmpl(kClassIdReg, Immediate(kBoolCid)); 233 __ cmpl(kClassIdReg, Immediate(kBoolCid));
224 __ j(EQUAL, is_instance_lbl); 234 __ j(EQUAL, is_instance_lbl);
225 __ jmp(is_not_instance_lbl); 235 __ jmp(is_not_instance_lbl);
226 return false; 236 return false;
227 } 237 }
228 if (type.IsFunctionType()) { 238 if (type.IsFunctionType()) {
229 // Check if instance is a closure. 239 // Check if instance is a closure.
230 const Immediate raw_null = 240 const Immediate& raw_null =
231 Immediate(reinterpret_cast<intptr_t>(Object::null())); 241 Immediate(reinterpret_cast<intptr_t>(Object::null()));
232 __ LoadClassById(R13, kClassIdReg); 242 __ LoadClassById(R13, kClassIdReg);
233 __ movq(R13, FieldAddress(R13, Class::signature_function_offset())); 243 __ movq(R13, FieldAddress(R13, Class::signature_function_offset()));
234 __ cmpq(R13, raw_null); 244 __ cmpq(R13, raw_null);
235 __ j(NOT_EQUAL, is_instance_lbl); 245 __ j(NOT_EQUAL, is_instance_lbl);
236 } 246 }
237 // Custom checking for numbers (Smi, Mint, Bigint and Double). 247 // Custom checking for numbers (Smi, Mint, Bigint and Double).
238 // Note that instance is not Smi (checked above). 248 // Note that instance is not Smi (checked above).
239 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { 249 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) {
240 GenerateNumberTypeCheck( 250 GenerateNumberTypeCheck(
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 // RAX: instance (preserved). 297 // RAX: instance (preserved).
288 // Clobbers RDI, RDX, R10. 298 // Clobbers RDI, RDX, R10.
289 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 299 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
290 intptr_t token_pos, 300 intptr_t token_pos,
291 const AbstractType& type, 301 const AbstractType& type,
292 Label* is_instance_lbl, 302 Label* is_instance_lbl,
293 Label* is_not_instance_lbl) { 303 Label* is_not_instance_lbl) {
294 __ Comment("UninstantiatedTypeTest"); 304 __ Comment("UninstantiatedTypeTest");
295 ASSERT(!type.IsInstantiated()); 305 ASSERT(!type.IsInstantiated());
296 // Skip check if destination is a dynamic type. 306 // Skip check if destination is a dynamic type.
297 const Immediate raw_null = 307 const Immediate& raw_null =
298 Immediate(reinterpret_cast<intptr_t>(Object::null())); 308 Immediate(reinterpret_cast<intptr_t>(Object::null()));
299 if (type.IsTypeParameter()) { 309 if (type.IsTypeParameter()) {
300 const TypeParameter& type_param = TypeParameter::Cast(type); 310 const TypeParameter& type_param = TypeParameter::Cast(type);
301 // Load instantiator (or null) and instantiator type arguments on stack. 311 // Load instantiator (or null) and instantiator type arguments on stack.
302 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments. 312 __ movq(RDX, Address(RSP, 0)); // Get instantiator type arguments.
303 // RDX: instantiator type arguments. 313 // RDX: instantiator type arguments.
304 // Check if type argument is dynamic. 314 // Check if type argument is dynamic.
305 __ cmpq(RDX, raw_null); 315 __ cmpq(RDX, raw_null);
306 __ j(EQUAL, is_instance_lbl); 316 __ j(EQUAL, is_instance_lbl);
307 // Can handle only type arguments that are instances of TypeArguments. 317 // Can handle only type arguments that are instances of TypeArguments.
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 // - RCX: instantiator or raw_null. 457 // - RCX: instantiator or raw_null.
448 // Clobbers RCX and RDX. 458 // Clobbers RCX and RDX.
449 // Returns: 459 // Returns:
450 // - true or false in RAX. 460 // - true or false in RAX.
451 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos, 461 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos,
452 const AbstractType& type, 462 const AbstractType& type,
453 bool negate_result, 463 bool negate_result,
454 LocationSummary* locs) { 464 LocationSummary* locs) {
455 ASSERT(type.IsFinalized() && !type.IsMalformed()); 465 ASSERT(type.IsFinalized() && !type.IsMalformed());
456 466
457 const Immediate raw_null = 467 const Immediate& raw_null =
458 Immediate(reinterpret_cast<intptr_t>(Object::null())); 468 Immediate(reinterpret_cast<intptr_t>(Object::null()));
459 Label is_instance, is_not_instance; 469 Label is_instance, is_not_instance;
460 __ pushq(RCX); // Store instantiator on stack. 470 __ pushq(RCX); // Store instantiator on stack.
461 __ pushq(RDX); // Store instantiator type arguments. 471 __ pushq(RDX); // Store instantiator type arguments.
462 // If type is instantiated and non-parameterized, we can inline code 472 // If type is instantiated and non-parameterized, we can inline code
463 // checking whether the tested instance is a Smi. 473 // checking whether the tested instance is a Smi.
464 if (type.IsInstantiated()) { 474 if (type.IsInstantiated()) {
465 // A null object is only an instance of Object and dynamic, which has 475 // A null object is only an instance of Object and dynamic, which has
466 // already been checked above (if the type is instantiated). So we can 476 // already been checked above (if the type is instantiated). So we can
467 // return false here if the instance is null (and if the type is 477 // return false here if the instance is null (and if the type is
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
536 LocationSummary* locs) { 546 LocationSummary* locs) {
537 ASSERT(token_pos >= 0); 547 ASSERT(token_pos >= 0);
538 ASSERT(!dst_type.IsNull()); 548 ASSERT(!dst_type.IsNull());
539 ASSERT(dst_type.IsFinalized()); 549 ASSERT(dst_type.IsFinalized());
540 // Assignable check is skipped in FlowGraphBuilder, not here. 550 // Assignable check is skipped in FlowGraphBuilder, not here.
541 ASSERT(dst_type.IsMalformed() || 551 ASSERT(dst_type.IsMalformed() ||
542 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); 552 (!dst_type.IsDynamicType() && !dst_type.IsObjectType()));
543 __ pushq(RCX); // Store instantiator. 553 __ pushq(RCX); // Store instantiator.
544 __ pushq(RDX); // Store instantiator type arguments. 554 __ pushq(RDX); // Store instantiator type arguments.
545 // A null object is always assignable and is returned as result. 555 // A null object is always assignable and is returned as result.
546 const Immediate raw_null = 556 const Immediate& raw_null =
547 Immediate(reinterpret_cast<intptr_t>(Object::null())); 557 Immediate(reinterpret_cast<intptr_t>(Object::null()));
548 Label is_assignable, runtime_call; 558 Label is_assignable, runtime_call;
549 __ cmpq(RAX, raw_null); 559 __ cmpq(RAX, raw_null);
550 __ j(EQUAL, &is_assignable); 560 __ j(EQUAL, &is_assignable);
551 561
552 // Generate throw new TypeError() if the type is malformed. 562 // Generate throw new TypeError() if the type is malformed.
553 if (dst_type.IsMalformed()) { 563 if (dst_type.IsMalformed()) {
554 const Error& error = Error::Handle(dst_type.malformed_error()); 564 const Error& error = Error::Handle(dst_type.malformed_error());
555 const String& error_message = String::ZoneHandle( 565 const String& error_message = String::ZoneHandle(
556 Symbols::New(error.ToErrorCString())); 566 Symbols::New(error.ToErrorCString()));
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 const Address argument_addr(RBX, RCX, TIMES_8, 0); 680 const Address argument_addr(RBX, RCX, TIMES_8, 0);
671 const Address copy_addr(RDI, RCX, TIMES_8, 0); 681 const Address copy_addr(RDI, RCX, TIMES_8, 0);
672 __ Bind(&loop); 682 __ Bind(&loop);
673 __ movq(RAX, argument_addr); 683 __ movq(RAX, argument_addr);
674 __ movq(copy_addr, RAX); 684 __ movq(copy_addr, RAX);
675 __ Bind(&loop_condition); 685 __ Bind(&loop_condition);
676 __ decq(RCX); 686 __ decq(RCX);
677 __ j(POSITIVE, &loop, Assembler::kNearJump); 687 __ j(POSITIVE, &loop, Assembler::kNearJump);
678 688
679 // Copy or initialize optional named arguments. 689 // Copy or initialize optional named arguments.
680 const Immediate raw_null = 690 const Immediate& raw_null =
681 Immediate(reinterpret_cast<intptr_t>(Object::null())); 691 Immediate(reinterpret_cast<intptr_t>(Object::null()));
682 Label all_arguments_processed; 692 Label all_arguments_processed;
683 if (num_opt_named_params > 0) { 693 if (num_opt_named_params > 0) {
684 // Start by alphabetically sorting the names of the optional parameters. 694 // Start by alphabetically sorting the names of the optional parameters.
685 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; 695 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
686 int* opt_param_position = new int[num_opt_named_params]; 696 int* opt_param_position = new int[num_opt_named_params];
687 for (int pos = num_fixed_params; pos < num_params; pos++) { 697 for (int pos = num_fixed_params; pos < num_params; pos++) {
688 LocalVariable* parameter = scope->VariableAt(pos); 698 LocalVariable* parameter = scope->VariableAt(pos);
689 const String& opt_param_name = parameter->name(); 699 const String& opt_param_name = parameter->name();
690 int i = pos - num_fixed_params; 700 int i = pos - num_fixed_params;
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
852 862
853 863
854 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 864 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
855 // TOS: return address. 865 // TOS: return address.
856 // +1 : value 866 // +1 : value
857 // +2 : receiver. 867 // +2 : receiver.
858 // Sequence node has one store node and one return NULL node. 868 // Sequence node has one store node and one return NULL node.
859 __ movq(RAX, Address(RSP, 2 * kWordSize)); // Receiver. 869 __ movq(RAX, Address(RSP, 2 * kWordSize)); // Receiver.
860 __ movq(RBX, Address(RSP, 1 * kWordSize)); // Value. 870 __ movq(RBX, Address(RSP, 1 * kWordSize)); // Value.
861 __ StoreIntoObject(RAX, FieldAddress(RAX, offset), RBX); 871 __ StoreIntoObject(RAX, FieldAddress(RAX, offset), RBX);
862 const Immediate raw_null = 872 const Immediate& raw_null =
863 Immediate(reinterpret_cast<intptr_t>(Object::null())); 873 Immediate(reinterpret_cast<intptr_t>(Object::null()));
864 __ movq(RAX, raw_null); 874 __ movq(RAX, raw_null);
865 __ ret(); 875 __ ret();
866 } 876 }
867 877
868 878
869 void FlowGraphCompiler::EmitFrameEntry() { 879 void FlowGraphCompiler::EmitFrameEntry() {
870 const Function& function = parsed_function().function(); 880 const Function& function = parsed_function().function();
871 if (CanOptimizeFunction() && function.is_optimizable()) { 881 if (CanOptimizeFunction() && function.is_optimizable()) {
872 const bool can_optimize = !is_optimizing() || may_reoptimize(); 882 const bool can_optimize = !is_optimizing() || may_reoptimize();
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
1008 __ movq(Address(RBP, slot * kWordSize), kArgumentsDescriptorReg); 1018 __ movq(Address(RBP, slot * kWordSize), kArgumentsDescriptorReg);
1009 } 1019 }
1010 CopyParameters(); 1020 CopyParameters();
1011 } 1021 }
1012 1022
1013 // In unoptimized code, initialize (non-argument) stack allocated slots to 1023 // In unoptimized code, initialize (non-argument) stack allocated slots to
1014 // null. This does not cover the saved_args_desc_var slot. 1024 // null. This does not cover the saved_args_desc_var slot.
1015 if (!is_optimizing() && (num_locals > 0)) { 1025 if (!is_optimizing() && (num_locals > 0)) {
1016 __ Comment("Initialize spill slots"); 1026 __ Comment("Initialize spill slots");
1017 const intptr_t slot_base = parsed_function().first_stack_local_index(); 1027 const intptr_t slot_base = parsed_function().first_stack_local_index();
1018 const Immediate raw_null = 1028 const Immediate& raw_null =
1019 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1029 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1020 __ movq(RAX, raw_null); 1030 __ movq(RAX, raw_null);
1021 for (intptr_t i = 0; i < num_locals; ++i) { 1031 for (intptr_t i = 0; i < num_locals; ++i) {
1022 // Subtract index i (locals lie at lower addresses than RBP). 1032 // Subtract index i (locals lie at lower addresses than RBP).
1023 __ movq(Address(RBP, (slot_base - i) * kWordSize), RAX); 1033 __ movq(Address(RBP, (slot_base - i) * kWordSize), RAX);
1024 } 1034 }
1025 } 1035 }
1026 1036
1027 if (FLAG_print_scopes) { 1037 if (FLAG_print_scopes) {
1028 // Print the function scope (again) after generating the prologue in order 1038 // Print the function scope (again) after generating the prologue in order
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
1264 } else { 1274 } else {
1265 __ cmpl(left, right); 1275 __ cmpl(left, right);
1266 } 1276 }
1267 } 1277 }
1268 1278
1269 1279
1270 // Implement equality spec: if any of the arguments is null do identity check. 1280 // Implement equality spec: if any of the arguments is null do identity check.
1271 // Fallthrough calls super equality. 1281 // Fallthrough calls super equality.
1272 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result, 1282 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result,
1273 Label* skip_call) { 1283 Label* skip_call) {
1274 const Immediate raw_null = 1284 const Immediate& raw_null =
1275 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1285 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1276 Label check_identity, fall_through; 1286 Label check_identity, fall_through;
1277 __ cmpq(Address(RSP, 0 * kWordSize), raw_null); 1287 __ cmpq(Address(RSP, 0 * kWordSize), raw_null);
1278 __ j(EQUAL, &check_identity, Assembler::kNearJump); 1288 __ j(EQUAL, &check_identity, Assembler::kNearJump);
1279 __ cmpq(Address(RSP, 1 * kWordSize), raw_null); 1289 __ cmpq(Address(RSP, 1 * kWordSize), raw_null);
1280 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump); 1290 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
1281 1291
1282 __ Bind(&check_identity); 1292 __ Bind(&check_identity);
1283 __ popq(result); 1293 __ popq(result);
1284 __ cmpq(result, Address(RSP, 0 * kWordSize)); 1294 __ cmpq(result, Address(RSP, 0 * kWordSize));
1285 Label is_false; 1295 Label is_false;
1286 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); 1296 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
1287 __ LoadObject(result, Bool::True()); 1297 __ LoadObject(result, Bool::True());
1288 __ Drop(1); 1298 __ Drop(1);
1289 __ jmp(skip_call); 1299 __ jmp(skip_call);
1290 __ Bind(&is_false); 1300 __ Bind(&is_false);
1291 __ LoadObject(result, Bool::False()); 1301 __ LoadObject(result, Bool::False());
1292 __ Drop(1); 1302 __ Drop(1);
1293 __ jmp(skip_call); 1303 __ jmp(skip_call);
1294 __ Bind(&fall_through); 1304 __ Bind(&fall_through);
1295 } 1305 }
1296 1306
1297 1307
1298 void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, 1308 void FlowGraphCompiler::LoadDoubleOrSmiToFpu(FpuRegister result,
1299 Register reg, 1309 Register reg,
1300 Register temp, 1310 Register temp,
1301 Label* not_double_or_smi) { 1311 Label* not_double_or_smi) {
1302 Label is_smi, done; 1312 Label is_smi, done;
1303 __ testq(reg, Immediate(kSmiTagMask)); 1313 __ testq(reg, Immediate(kSmiTagMask));
1304 __ j(ZERO, &is_smi); 1314 __ j(ZERO, &is_smi);
1305 __ CompareClassId(reg, kDoubleCid); 1315 __ CompareClassId(reg, kDoubleCid);
1306 __ j(NOT_EQUAL, not_double_or_smi); 1316 __ j(NOT_EQUAL, not_double_or_smi);
1307 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1317 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1308 __ jmp(&done); 1318 __ jmp(&done);
1309 __ Bind(&is_smi); 1319 __ Bind(&is_smi);
1310 __ movq(temp, reg); 1320 __ movq(temp, reg);
1311 __ SmiUntag(temp); 1321 __ SmiUntag(temp);
1312 __ cvtsi2sd(result, temp); 1322 __ cvtsi2sd(result, temp);
1313 __ Bind(&done); 1323 __ Bind(&done);
1314 } 1324 }
1315 1325
1316 1326
1317 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1327 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1318 // TODO(vegorov): consider saving only caller save (volatile) registers. 1328 // TODO(vegorov): consider saving only caller save (volatile) registers.
1319 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1329 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1320 if (xmm_regs_count > 0) { 1330 if (xmm_regs_count > 0) {
1321 __ subq(RSP, Immediate(xmm_regs_count * kDoubleSize)); 1331 __ subq(RSP, Immediate(xmm_regs_count * kDoubleSize));
1322 // Store XMM registers with the lowest register number at the lowest 1332 // Store XMM registers with the lowest register number at the lowest
1323 // address. 1333 // address.
1324 intptr_t offset = 0; 1334 intptr_t offset = 0;
1325 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1335 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1326 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1336 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1327 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1337 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1328 __ movsd(Address(RSP, offset), xmm_reg); 1338 __ movsd(Address(RSP, offset), xmm_reg);
1329 offset += kDoubleSize; 1339 offset += kDoubleSize;
1330 } 1340 }
1331 } 1341 }
1332 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1342 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1333 } 1343 }
1334 1344
1335 // Store general purpose registers with the highest register number at the 1345 // Store general purpose registers with the highest register number at the
1336 // lowest address. 1346 // lowest address.
1337 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) { 1347 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
1338 Register reg = static_cast<Register>(reg_idx); 1348 Register reg = static_cast<Register>(reg_idx);
1339 if (locs->live_registers()->ContainsRegister(reg)) { 1349 if (locs->live_registers()->ContainsRegister(reg)) {
1340 __ pushq(reg); 1350 __ pushq(reg);
1341 } 1351 }
1342 } 1352 }
1343 } 1353 }
1344 1354
1345 1355
1346 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1356 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1347 // General purpose registers have the highest register number at the 1357 // General purpose registers have the highest register number at the
1348 // lowest address. 1358 // lowest address.
1349 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) { 1359 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
1350 Register reg = static_cast<Register>(reg_idx); 1360 Register reg = static_cast<Register>(reg_idx);
1351 if (locs->live_registers()->ContainsRegister(reg)) { 1361 if (locs->live_registers()->ContainsRegister(reg)) {
1352 __ popq(reg); 1362 __ popq(reg);
1353 } 1363 }
1354 } 1364 }
1355 1365
1356 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1366 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1357 if (xmm_regs_count > 0) { 1367 if (xmm_regs_count > 0) {
1358 // XMM registers have the lowest register number at the lowest address. 1368 // XMM registers have the lowest register number at the lowest address.
1359 intptr_t offset = 0; 1369 intptr_t offset = 0;
1360 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1370 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1361 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1371 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1362 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1372 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1363 __ movsd(xmm_reg, Address(RSP, offset)); 1373 __ movsd(xmm_reg, Address(RSP, offset));
1364 offset += kDoubleSize; 1374 offset += kDoubleSize;
1365 } 1375 }
1366 } 1376 }
1367 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1377 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1368 __ addq(RSP, Immediate(offset)); 1378 __ addq(RSP, Immediate(offset));
1369 } 1379 }
1370 } 1380 }
1371 1381
1372 1382
1383 struct CidTarget {
1384 intptr_t cid;
1385 Function* target;
1386 intptr_t count;
1387 CidTarget(intptr_t cid_arg,
1388 Function* target_arg,
1389 intptr_t count_arg)
1390 : cid(cid_arg), target(target_arg), count(count_arg) {}
1391 };
1392
1393
1394 // TODO(regis): Make static member, move to shared FlowGraphCompiler, and merge
1395 // all 3 header files.
1396 // Returns 'sorted' array in decreasing count order.
1397 // The expected number of elements to sort is less than 10.
1398 static void SortICDataByCount(const ICData& ic_data,
1399 GrowableArray<CidTarget>* sorted) {
1400 ASSERT(ic_data.num_args_tested() == 1);
1401 const intptr_t len = ic_data.NumberOfChecks();
1402 sorted->Clear();
1403
1404 for (int i = 0; i < len; i++) {
1405 sorted->Add(CidTarget(ic_data.GetReceiverClassIdAt(i),
1406 &Function::ZoneHandle(ic_data.GetTargetAt(i)),
1407 ic_data.GetCountAt(i)));
1408 }
1409 for (int i = 0; i < len; i++) {
1410 intptr_t largest_ix = i;
1411 for (int k = i + 1; k < len; k++) {
1412 if ((*sorted)[largest_ix].count < (*sorted)[k].count) {
1413 largest_ix = k;
1414 }
1415 }
1416 if (i != largest_ix) {
1417 // Swap.
1418 CidTarget temp = (*sorted)[i];
1419 (*sorted)[i] = (*sorted)[largest_ix];
1420 (*sorted)[largest_ix] = temp;
1421 }
1422 }
1423 }
1424
1425
1426 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1427 Register class_id_reg,
1428 intptr_t arg_count,
1429 const Array& arg_names,
1430 Label* deopt,
1431 intptr_t deopt_id,
1432 intptr_t token_index,
1433 LocationSummary* locs) {
1434 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1435 Label match_found;
1436 const intptr_t len = ic_data.NumberOfChecks();
1437 GrowableArray<CidTarget> sorted(len);
1438 SortICDataByCount(ic_data, &sorted);
1439 for (intptr_t i = 0; i < len; i++) {
1440 const bool is_last_check = (i == (len - 1));
1441 Label next_test;
1442 assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid));
1443 if (is_last_check) {
1444 assembler()->j(NOT_EQUAL, deopt);
1445 } else {
1446 assembler()->j(NOT_EQUAL, &next_test);
1447 }
1448 GenerateStaticCall(deopt_id,
1449 token_index,
1450 *sorted[i].target,
1451 arg_count,
1452 arg_names,
1453 locs);
1454 if (!is_last_check) {
1455 assembler()->jmp(&match_found);
1456 }
1457 assembler()->Bind(&next_test);
1458 }
1459 assembler()->Bind(&match_found);
1460 }
1461
1462
1463 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1464 FpuRegister left,
1465 FpuRegister right,
1466 BranchInstr* branch) {
1467 ASSERT(branch != NULL);
1468 assembler()->comisd(left, right);
1469 BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ?
1470 branch->true_successor() : branch->false_successor();
1471 assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result));
1472 branch->EmitBranchOnCondition(this, true_condition);
1473 }
1474
1475
1476
1477 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1478 FpuRegister left,
1479 FpuRegister right,
1480 Register result) {
1481 assembler()->comisd(left, right);
1482 Label is_false, is_true, done;
1483 assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false;
1484 assembler()->j(true_condition, &is_true, Assembler::kNearJump);
1485 assembler()->Bind(&is_false);
1486 assembler()->LoadObject(result, Bool::False());
1487 assembler()->jmp(&done);
1488 assembler()->Bind(&is_true);
1489 assembler()->LoadObject(result, Bool::True());
1490 assembler()->Bind(&done);
1491 }
1492
1493
1494 Condition FlowGraphCompiler::FlipCondition(Condition condition) {
1495 switch (condition) {
1496 case EQUAL: return EQUAL;
1497 case NOT_EQUAL: return NOT_EQUAL;
1498 case LESS: return GREATER;
1499 case LESS_EQUAL: return GREATER_EQUAL;
1500 case GREATER: return LESS;
1501 case GREATER_EQUAL: return LESS_EQUAL;
1502 case BELOW: return ABOVE;
1503 case BELOW_EQUAL: return ABOVE_EQUAL;
1504 case ABOVE: return BELOW;
1505 case ABOVE_EQUAL: return BELOW_EQUAL;
1506 default:
1507 UNIMPLEMENTED();
1508 return EQUAL;
1509 }
1510 }
1511
1512
1513 bool FlowGraphCompiler::EvaluateCondition(Condition condition,
1514 intptr_t left,
1515 intptr_t right) {
1516 const uintptr_t unsigned_left = static_cast<uintptr_t>(left);
1517 const uintptr_t unsigned_right = static_cast<uintptr_t>(right);
1518 switch (condition) {
1519 case EQUAL: return left == right;
1520 case NOT_EQUAL: return left != right;
1521 case LESS: return left < right;
1522 case LESS_EQUAL: return left <= right;
1523 case GREATER: return left > right;
1524 case GREATER_EQUAL: return left >= right;
1525 case BELOW: return unsigned_left < unsigned_right;
1526 case BELOW_EQUAL: return unsigned_left <= unsigned_right;
1527 case ABOVE: return unsigned_left > unsigned_right;
1528 case ABOVE_EQUAL: return unsigned_left >= unsigned_right;
1529 default:
1530 UNIMPLEMENTED();
1531 return false;
1532 }
1533 }
1534
1535
1536 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1537 Register array,
1538 intptr_t index) {
1539 const int64_t disp =
1540 static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid);
1541 ASSERT(Utils::IsInt(32, disp));
1542 return FieldAddress(array, static_cast<int32_t>(disp));
1543 }
1544
1545
1546 FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
1547 Register array,
1548 Register index) {
1549 // Note that index is smi-tagged, (i.e, times 2) for all arrays with element
1550 // size > 1. For Uint8Array and OneByteString the index is expected to be
1551 // untagged before accessing.
1552 ASSERT(kSmiTagShift == 1);
1553 switch (cid) {
1554 case kArrayCid:
1555 case kImmutableArrayCid:
1556 return FieldAddress(
1557 array, index, TIMES_HALF_WORD_SIZE, Array::data_offset());
1558 case kFloat32ArrayCid:
1559 return FieldAddress(array, index, TIMES_2, Float32Array::data_offset());
1560 case kFloat64ArrayCid:
1561 return FieldAddress(array, index, TIMES_4, Float64Array::data_offset());
1562 case kUint8ArrayCid:
1563 return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset());
1564 case kUint8ClampedArrayCid:
1565 return
1566 FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset());
1567 case kOneByteStringCid:
1568 return FieldAddress(array, index, TIMES_1, OneByteString::data_offset());
1569 case kTwoByteStringCid:
1570 return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset());
1571 default:
1572 UNIMPLEMENTED();
1573 return FieldAddress(SPREG, 0);
1574 }
1575 }
1576
1577
1373 #undef __ 1578 #undef __
1374 #define __ compiler_->assembler()-> 1579 #define __ compiler_->assembler()->
1375 1580
1376 1581
1377 void ParallelMoveResolver::EmitMove(int index) { 1582 void ParallelMoveResolver::EmitMove(int index) {
1378 MoveOperands* move = moves_[index]; 1583 MoveOperands* move = moves_[index];
1379 const Location source = move->src(); 1584 const Location source = move->src();
1380 const Location destination = move->dest(); 1585 const Location destination = move->dest();
1381 1586
1382 if (source.IsRegister()) { 1587 if (source.IsRegister()) {
1383 if (destination.IsRegister()) { 1588 if (destination.IsRegister()) {
1384 __ movq(destination.reg(), source.reg()); 1589 __ movq(destination.reg(), source.reg());
1385 } else { 1590 } else {
1386 ASSERT(destination.IsStackSlot()); 1591 ASSERT(destination.IsStackSlot());
1387 __ movq(destination.ToStackSlotAddress(), source.reg()); 1592 __ movq(destination.ToStackSlotAddress(), source.reg());
1388 } 1593 }
1389 } else if (source.IsStackSlot()) { 1594 } else if (source.IsStackSlot()) {
1390 if (destination.IsRegister()) { 1595 if (destination.IsRegister()) {
1391 __ movq(destination.reg(), source.ToStackSlotAddress()); 1596 __ movq(destination.reg(), source.ToStackSlotAddress());
1392 } else { 1597 } else {
1393 ASSERT(destination.IsStackSlot()); 1598 ASSERT(destination.IsStackSlot());
1394 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1599 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1395 source.ToStackSlotAddress()); 1600 source.ToStackSlotAddress());
1396 } 1601 }
1397 } else if (source.IsXmmRegister()) { 1602 } else if (source.IsFpuRegister()) {
1398 if (destination.IsXmmRegister()) { 1603 if (destination.IsFpuRegister()) {
1399 // Optimization manual recommends using MOVAPS for register 1604 // Optimization manual recommends using MOVAPS for register
1400 // to register moves. 1605 // to register moves.
1401 __ movaps(destination.xmm_reg(), source.xmm_reg()); 1606 __ movaps(destination.fpu_reg(), source.fpu_reg());
1402 } else { 1607 } else {
1403 ASSERT(destination.IsDoubleStackSlot()); 1608 ASSERT(destination.IsDoubleStackSlot());
1404 __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); 1609 __ movsd(destination.ToStackSlotAddress(), source.fpu_reg());
1405 } 1610 }
1406 } else if (source.IsDoubleStackSlot()) { 1611 } else if (source.IsDoubleStackSlot()) {
1407 if (destination.IsXmmRegister()) { 1612 if (destination.IsFpuRegister()) {
1408 __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); 1613 __ movsd(destination.fpu_reg(), source.ToStackSlotAddress());
1409 } else { 1614 } else {
1410 ASSERT(destination.IsDoubleStackSlot()); 1615 ASSERT(destination.IsDoubleStackSlot());
1411 __ movsd(XMM0, source.ToStackSlotAddress()); 1616 __ movsd(XMM0, source.ToStackSlotAddress());
1412 __ movsd(destination.ToStackSlotAddress(), XMM0); 1617 __ movsd(destination.ToStackSlotAddress(), XMM0);
1413 } 1618 }
1414 } else { 1619 } else {
1415 ASSERT(source.IsConstant()); 1620 ASSERT(source.IsConstant());
1416 if (destination.IsRegister()) { 1621 if (destination.IsRegister()) {
1417 const Object& constant = source.constant(); 1622 const Object& constant = source.constant();
1418 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1623 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
(...skipping 17 matching lines...) Expand all
1436 const Location destination = move->dest(); 1641 const Location destination = move->dest();
1437 1642
1438 if (source.IsRegister() && destination.IsRegister()) { 1643 if (source.IsRegister() && destination.IsRegister()) {
1439 __ xchgq(destination.reg(), source.reg()); 1644 __ xchgq(destination.reg(), source.reg());
1440 } else if (source.IsRegister() && destination.IsStackSlot()) { 1645 } else if (source.IsRegister() && destination.IsStackSlot()) {
1441 Exchange(source.reg(), destination.ToStackSlotAddress()); 1646 Exchange(source.reg(), destination.ToStackSlotAddress());
1442 } else if (source.IsStackSlot() && destination.IsRegister()) { 1647 } else if (source.IsStackSlot() && destination.IsRegister()) {
1443 Exchange(destination.reg(), source.ToStackSlotAddress()); 1648 Exchange(destination.reg(), source.ToStackSlotAddress());
1444 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1649 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1445 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); 1650 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
1446 } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { 1651 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1447 __ movaps(XMM0, source.xmm_reg()); 1652 __ movaps(XMM0, source.fpu_reg());
1448 __ movaps(source.xmm_reg(), destination.xmm_reg()); 1653 __ movaps(source.fpu_reg(), destination.fpu_reg());
1449 __ movaps(destination.xmm_reg(), XMM0); 1654 __ movaps(destination.fpu_reg(), XMM0);
1450 } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { 1655 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1451 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1656 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1452 XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() 1657 XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1453 : destination.xmm_reg(); 1658 : destination.fpu_reg();
1454 Address slot_address = source.IsXmmRegister() 1659 Address slot_address = source.IsFpuRegister()
1455 ? destination.ToStackSlotAddress() 1660 ? destination.ToStackSlotAddress()
1456 : source.ToStackSlotAddress(); 1661 : source.ToStackSlotAddress();
1457 1662
1458 __ movsd(XMM0, slot_address); 1663 __ movsd(XMM0, slot_address);
1459 __ movsd(slot_address, reg); 1664 __ movsd(slot_address, reg);
1460 __ movaps(reg, XMM0); 1665 __ movaps(reg, XMM0);
1461 } else { 1666 } else {
1462 UNREACHABLE(); 1667 UNREACHABLE();
1463 } 1668 }
1464 1669
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1499 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) { 1704 void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
1500 __ Exchange(mem1, mem2); 1705 __ Exchange(mem1, mem2);
1501 } 1706 }
1502 1707
1503 1708
1504 #undef __ 1709 #undef __
1505 1710
1506 } // namespace dart 1711 } // namespace dart
1507 1712
1508 #endif // defined TARGET_ARCH_X64 1713 #endif // defined TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698