Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(662)

Side by Side Diff: runtime/vm/flow_graph_compiler_ia32.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.h ('k') | runtime/vm/flow_graph_compiler_x64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6 #if defined(TARGET_ARCH_IA32) 6 #if defined(TARGET_ARCH_IA32)
7 7
8 #include "vm/flow_graph_compiler.h" 8 #include "vm/flow_graph_compiler.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/ast_printer.h" 11 #include "vm/ast_printer.h"
12 #include "vm/dart_entry.h" 12 #include "vm/dart_entry.h"
13 #include "vm/il_printer.h" 13 #include "vm/il_printer.h"
14 #include "vm/locations.h" 14 #include "vm/locations.h"
15 #include "vm/object_store.h" 15 #include "vm/object_store.h"
16 #include "vm/parser.h" 16 #include "vm/parser.h"
17 #include "vm/stub_code.h" 17 #include "vm/stub_code.h"
18 #include "vm/symbols.h" 18 #include "vm/symbols.h"
19 19
20 namespace dart { 20 namespace dart {
21 21
22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization."); 22 DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
23 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic."); 23 DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
24 DECLARE_FLAG(int, optimization_counter_threshold); 24 DECLARE_FLAG(int, optimization_counter_threshold);
25 DECLARE_FLAG(bool, print_ast); 25 DECLARE_FLAG(bool, print_ast);
26 DECLARE_FLAG(bool, print_scopes); 26 DECLARE_FLAG(bool, print_scopes);
27 27
28 28
29 FlowGraphCompiler::~FlowGraphCompiler() {
30 // BlockInfos are zone-allocated, so their destructors are not called.
31 // Verify the labels explicitly here.
32 for (int i = 0; i < block_info_.length(); ++i) {
33 ASSERT(!block_info_[i]->label.IsLinked());
34 ASSERT(!block_info_[i]->label.HasNear());
35 }
36 }
37
38
29 bool FlowGraphCompiler::SupportsUnboxedMints() { 39 bool FlowGraphCompiler::SupportsUnboxedMints() {
30 // Support unboxed mints when SSE 4.1 is available. 40 // Support unboxed mints when SSE 4.1 is available.
31 return FLAG_unbox_mints && CPUFeatures::sse4_1_supported(); 41 return FLAG_unbox_mints && CPUFeatures::sse4_1_supported();
32 } 42 }
33 43
34 44
35 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler, 45 void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
36 intptr_t stub_ix) { 46 intptr_t stub_ix) {
37 // Calls do not need stubs, they share a deoptimization trampoline. 47 // Calls do not need stubs, they share a deoptimization trampoline.
38 ASSERT(reason() != kDeoptAtCall); 48 ASSERT(reason() != kDeoptAtCall);
(...skipping 11 matching lines...) Expand all
50 } 60 }
51 61
52 62
53 #define __ assembler()-> 63 #define __ assembler()->
54 64
55 65
56 // Fall through if bool_register contains null. 66 // Fall through if bool_register contains null.
57 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register, 67 void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
58 Label* is_true, 68 Label* is_true,
59 Label* is_false) { 69 Label* is_false) {
60 const Immediate raw_null = 70 const Immediate& raw_null =
61 Immediate(reinterpret_cast<intptr_t>(Object::null())); 71 Immediate(reinterpret_cast<intptr_t>(Object::null()));
62 Label fall_through; 72 Label fall_through;
63 __ cmpl(bool_register, raw_null); 73 __ cmpl(bool_register, raw_null);
64 __ j(EQUAL, &fall_through, Assembler::kNearJump); 74 __ j(EQUAL, &fall_through, Assembler::kNearJump);
65 __ CompareObject(bool_register, Bool::True()); 75 __ CompareObject(bool_register, Bool::True());
66 __ j(EQUAL, is_true); 76 __ j(EQUAL, is_true);
67 __ jmp(is_false); 77 __ jmp(is_false);
68 __ Bind(&fall_through); 78 __ Bind(&fall_through);
69 } 79 }
70 80
71 81
72 // Clobbers ECX. 82 // Clobbers ECX.
73 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub( 83 RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
74 TypeTestStubKind test_kind, 84 TypeTestStubKind test_kind,
75 Register instance_reg, 85 Register instance_reg,
76 Register type_arguments_reg, 86 Register type_arguments_reg,
77 Register temp_reg, 87 Register temp_reg,
78 Label* is_instance_lbl, 88 Label* is_instance_lbl,
79 Label* is_not_instance_lbl) { 89 Label* is_not_instance_lbl) {
80 const SubtypeTestCache& type_test_cache = 90 const SubtypeTestCache& type_test_cache =
81 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New()); 91 SubtypeTestCache::ZoneHandle(SubtypeTestCache::New());
82 const Immediate raw_null = 92 const Immediate& raw_null =
83 Immediate(reinterpret_cast<intptr_t>(Object::null())); 93 Immediate(reinterpret_cast<intptr_t>(Object::null()));
84 __ LoadObject(temp_reg, type_test_cache); 94 __ LoadObject(temp_reg, type_test_cache);
85 __ pushl(temp_reg); // Subtype test cache. 95 __ pushl(temp_reg); // Subtype test cache.
86 __ pushl(instance_reg); // Instance. 96 __ pushl(instance_reg); // Instance.
87 if (test_kind == kTestTypeOneArg) { 97 if (test_kind == kTestTypeOneArg) {
88 ASSERT(type_arguments_reg == kNoRegister); 98 ASSERT(type_arguments_reg == kNoRegister);
89 __ pushl(raw_null); 99 __ pushl(raw_null);
90 __ call(&StubCode::Subtype1TestCacheLabel()); 100 __ call(&StubCode::Subtype1TestCacheLabel());
91 } else if (test_kind == kTestTypeTwoArgs) { 101 } else if (test_kind == kTestTypeTwoArgs) {
92 ASSERT(type_arguments_reg == kNoRegister); 102 ASSERT(type_arguments_reg == kNoRegister);
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
220 // Bool interface can be implemented only by core class Bool. 230 // Bool interface can be implemented only by core class Bool.
221 // (see ClassFinalizer::ResolveInterfaces for list of restricted interfaces). 231 // (see ClassFinalizer::ResolveInterfaces for list of restricted interfaces).
222 if (type.IsBoolType()) { 232 if (type.IsBoolType()) {
223 __ cmpl(kClassIdReg, Immediate(kBoolCid)); 233 __ cmpl(kClassIdReg, Immediate(kBoolCid));
224 __ j(EQUAL, is_instance_lbl); 234 __ j(EQUAL, is_instance_lbl);
225 __ jmp(is_not_instance_lbl); 235 __ jmp(is_not_instance_lbl);
226 return false; 236 return false;
227 } 237 }
228 if (type.IsFunctionType()) { 238 if (type.IsFunctionType()) {
229 // Check if instance is a closure. 239 // Check if instance is a closure.
230 const Immediate raw_null = 240 const Immediate& raw_null =
231 Immediate(reinterpret_cast<intptr_t>(Object::null())); 241 Immediate(reinterpret_cast<intptr_t>(Object::null()));
232 __ LoadClassById(EDI, kClassIdReg); 242 __ LoadClassById(EDI, kClassIdReg);
233 __ movl(EDI, FieldAddress(EDI, Class::signature_function_offset())); 243 __ movl(EDI, FieldAddress(EDI, Class::signature_function_offset()));
234 __ cmpl(EDI, raw_null); 244 __ cmpl(EDI, raw_null);
235 __ j(NOT_EQUAL, is_instance_lbl); 245 __ j(NOT_EQUAL, is_instance_lbl);
236 } 246 }
237 // Custom checking for numbers (Smi, Mint, Bigint and Double). 247 // Custom checking for numbers (Smi, Mint, Bigint and Double).
238 // Note that instance is not Smi (checked above). 248 // Note that instance is not Smi (checked above).
239 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) { 249 if (type.IsSubtypeOf(Type::Handle(Type::Number()), NULL)) {
240 GenerateNumberTypeCheck( 250 GenerateNumberTypeCheck(
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 // EAX: instance (preserved). 297 // EAX: instance (preserved).
288 // Clobbers EDX, EDI, ECX. 298 // Clobbers EDX, EDI, ECX.
289 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest( 299 RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
290 intptr_t token_pos, 300 intptr_t token_pos,
291 const AbstractType& type, 301 const AbstractType& type,
292 Label* is_instance_lbl, 302 Label* is_instance_lbl,
293 Label* is_not_instance_lbl) { 303 Label* is_not_instance_lbl) {
294 __ Comment("UninstantiatedTypeTest"); 304 __ Comment("UninstantiatedTypeTest");
295 ASSERT(!type.IsInstantiated()); 305 ASSERT(!type.IsInstantiated());
296 // Skip check if destination is a dynamic type. 306 // Skip check if destination is a dynamic type.
297 const Immediate raw_null = 307 const Immediate& raw_null =
298 Immediate(reinterpret_cast<intptr_t>(Object::null())); 308 Immediate(reinterpret_cast<intptr_t>(Object::null()));
299 if (type.IsTypeParameter()) { 309 if (type.IsTypeParameter()) {
300 const TypeParameter& type_param = TypeParameter::Cast(type); 310 const TypeParameter& type_param = TypeParameter::Cast(type);
301 // Load instantiator (or null) and instantiator type arguments on stack. 311 // Load instantiator (or null) and instantiator type arguments on stack.
302 __ movl(EDX, Address(ESP, 0)); // Get instantiator type arguments. 312 __ movl(EDX, Address(ESP, 0)); // Get instantiator type arguments.
303 // EDX: instantiator type arguments. 313 // EDX: instantiator type arguments.
304 // Check if type argument is dynamic. 314 // Check if type argument is dynamic.
305 __ cmpl(EDX, raw_null); 315 __ cmpl(EDX, raw_null);
306 __ j(EQUAL, is_instance_lbl); 316 __ j(EQUAL, is_instance_lbl);
307 // Can handle only type arguments that are instances of TypeArguments. 317 // Can handle only type arguments that are instances of TypeArguments.
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 // - ECX: instantiator or raw_null. 457 // - ECX: instantiator or raw_null.
448 // Clobbers ECX and EDX. 458 // Clobbers ECX and EDX.
449 // Returns: 459 // Returns:
450 // - true or false in EAX. 460 // - true or false in EAX.
451 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos, 461 void FlowGraphCompiler::GenerateInstanceOf(intptr_t token_pos,
452 const AbstractType& type, 462 const AbstractType& type,
453 bool negate_result, 463 bool negate_result,
454 LocationSummary* locs) { 464 LocationSummary* locs) {
455 ASSERT(type.IsFinalized() && !type.IsMalformed()); 465 ASSERT(type.IsFinalized() && !type.IsMalformed());
456 466
457 const Immediate raw_null = 467 const Immediate& raw_null =
458 Immediate(reinterpret_cast<intptr_t>(Object::null())); 468 Immediate(reinterpret_cast<intptr_t>(Object::null()));
459 Label is_instance, is_not_instance; 469 Label is_instance, is_not_instance;
460 __ pushl(ECX); // Store instantiator on stack. 470 __ pushl(ECX); // Store instantiator on stack.
461 __ pushl(EDX); // Store instantiator type arguments. 471 __ pushl(EDX); // Store instantiator type arguments.
462 // If type is instantiated and non-parameterized, we can inline code 472 // If type is instantiated and non-parameterized, we can inline code
463 // checking whether the tested instance is a Smi. 473 // checking whether the tested instance is a Smi.
464 if (type.IsInstantiated()) { 474 if (type.IsInstantiated()) {
465 // A null object is only an instance of Object and dynamic, which has 475 // A null object is only an instance of Object and dynamic, which has
466 // already been checked above (if the type is instantiated). So we can 476 // already been checked above (if the type is instantiated). So we can
467 // return false here if the instance is null (and if the type is 477 // return false here if the instance is null (and if the type is
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
536 LocationSummary* locs) { 546 LocationSummary* locs) {
537 ASSERT(token_pos >= 0); 547 ASSERT(token_pos >= 0);
538 ASSERT(!dst_type.IsNull()); 548 ASSERT(!dst_type.IsNull());
539 ASSERT(dst_type.IsFinalized()); 549 ASSERT(dst_type.IsFinalized());
540 // Assignable check is skipped in FlowGraphBuilder, not here. 550 // Assignable check is skipped in FlowGraphBuilder, not here.
541 ASSERT(dst_type.IsMalformed() || 551 ASSERT(dst_type.IsMalformed() ||
542 (!dst_type.IsDynamicType() && !dst_type.IsObjectType())); 552 (!dst_type.IsDynamicType() && !dst_type.IsObjectType()));
543 __ pushl(ECX); // Store instantiator. 553 __ pushl(ECX); // Store instantiator.
544 __ pushl(EDX); // Store instantiator type arguments. 554 __ pushl(EDX); // Store instantiator type arguments.
545 // A null object is always assignable and is returned as result. 555 // A null object is always assignable and is returned as result.
546 const Immediate raw_null = 556 const Immediate& raw_null =
547 Immediate(reinterpret_cast<intptr_t>(Object::null())); 557 Immediate(reinterpret_cast<intptr_t>(Object::null()));
548 Label is_assignable, runtime_call; 558 Label is_assignable, runtime_call;
549 __ cmpl(EAX, raw_null); 559 __ cmpl(EAX, raw_null);
550 __ j(EQUAL, &is_assignable); 560 __ j(EQUAL, &is_assignable);
551 561
552 // Generate throw new TypeError() if the type is malformed. 562 // Generate throw new TypeError() if the type is malformed.
553 if (dst_type.IsMalformed()) { 563 if (dst_type.IsMalformed()) {
554 const Error& error = Error::Handle(dst_type.malformed_error()); 564 const Error& error = Error::Handle(dst_type.malformed_error());
555 const String& error_message = String::ZoneHandle( 565 const String& error_message = String::ZoneHandle(
556 Symbols::New(error.ToErrorCString())); 566 Symbols::New(error.ToErrorCString()));
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
668 const Address argument_addr(EBX, ECX, TIMES_4, 0); 678 const Address argument_addr(EBX, ECX, TIMES_4, 0);
669 const Address copy_addr(EDI, ECX, TIMES_4, 0); 679 const Address copy_addr(EDI, ECX, TIMES_4, 0);
670 __ Bind(&loop); 680 __ Bind(&loop);
671 __ movl(EAX, argument_addr); 681 __ movl(EAX, argument_addr);
672 __ movl(copy_addr, EAX); 682 __ movl(copy_addr, EAX);
673 __ Bind(&loop_condition); 683 __ Bind(&loop_condition);
674 __ decl(ECX); 684 __ decl(ECX);
675 __ j(POSITIVE, &loop, Assembler::kNearJump); 685 __ j(POSITIVE, &loop, Assembler::kNearJump);
676 686
677 // Copy or initialize optional named arguments. 687 // Copy or initialize optional named arguments.
678 const Immediate raw_null = 688 const Immediate& raw_null =
679 Immediate(reinterpret_cast<intptr_t>(Object::null())); 689 Immediate(reinterpret_cast<intptr_t>(Object::null()));
680 Label all_arguments_processed; 690 Label all_arguments_processed;
681 if (num_opt_named_params > 0) { 691 if (num_opt_named_params > 0) {
682 // Start by alphabetically sorting the names of the optional parameters. 692 // Start by alphabetically sorting the names of the optional parameters.
683 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params]; 693 LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
684 int* opt_param_position = new int[num_opt_named_params]; 694 int* opt_param_position = new int[num_opt_named_params];
685 for (int pos = num_fixed_params; pos < num_params; pos++) { 695 for (int pos = num_fixed_params; pos < num_params; pos++) {
686 LocalVariable* parameter = scope->VariableAt(pos); 696 LocalVariable* parameter = scope->VariableAt(pos);
687 const String& opt_param_name = parameter->name(); 697 const String& opt_param_name = parameter->name();
688 int i = pos - num_fixed_params; 698 int i = pos - num_fixed_params;
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
850 860
851 861
852 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) { 862 void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
853 // TOS: return address. 863 // TOS: return address.
854 // +1 : value 864 // +1 : value
855 // +2 : receiver. 865 // +2 : receiver.
856 // Sequence node has one store node and one return NULL node. 866 // Sequence node has one store node and one return NULL node.
857 __ movl(EAX, Address(ESP, 2 * kWordSize)); // Receiver. 867 __ movl(EAX, Address(ESP, 2 * kWordSize)); // Receiver.
858 __ movl(EBX, Address(ESP, 1 * kWordSize)); // Value. 868 __ movl(EBX, Address(ESP, 1 * kWordSize)); // Value.
859 __ StoreIntoObject(EAX, FieldAddress(EAX, offset), EBX); 869 __ StoreIntoObject(EAX, FieldAddress(EAX, offset), EBX);
860 const Immediate raw_null = 870 const Immediate& raw_null =
861 Immediate(reinterpret_cast<intptr_t>(Object::null())); 871 Immediate(reinterpret_cast<intptr_t>(Object::null()));
862 __ movl(EAX, raw_null); 872 __ movl(EAX, raw_null);
863 __ ret(); 873 __ ret();
864 } 874 }
865 875
866 876
867 void FlowGraphCompiler::EmitFrameEntry() { 877 void FlowGraphCompiler::EmitFrameEntry() {
868 const Function& function = parsed_function().function(); 878 const Function& function = parsed_function().function();
869 if (CanOptimizeFunction() && function.is_optimizable()) { 879 if (CanOptimizeFunction() && function.is_optimizable()) {
870 const bool can_optimize = !is_optimizing() || may_reoptimize(); 880 const bool can_optimize = !is_optimizing() || may_reoptimize();
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
1005 __ movl(Address(EBP, slot * kWordSize), kArgumentsDescriptorReg); 1015 __ movl(Address(EBP, slot * kWordSize), kArgumentsDescriptorReg);
1006 } 1016 }
1007 CopyParameters(); 1017 CopyParameters();
1008 } 1018 }
1009 1019
1010 // In unoptimized code, initialize (non-argument) stack allocated slots to 1020 // In unoptimized code, initialize (non-argument) stack allocated slots to
1011 // null. This does not cover the saved_args_desc_var slot. 1021 // null. This does not cover the saved_args_desc_var slot.
1012 if (!is_optimizing() && (num_locals > 0)) { 1022 if (!is_optimizing() && (num_locals > 0)) {
1013 __ Comment("Initialize spill slots"); 1023 __ Comment("Initialize spill slots");
1014 const intptr_t slot_base = parsed_function().first_stack_local_index(); 1024 const intptr_t slot_base = parsed_function().first_stack_local_index();
1015 const Immediate raw_null = 1025 const Immediate& raw_null =
1016 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1026 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1017 __ movl(EAX, raw_null); 1027 __ movl(EAX, raw_null);
1018 for (intptr_t i = 0; i < num_locals; ++i) { 1028 for (intptr_t i = 0; i < num_locals; ++i) {
1019 // Subtract index i (locals lie at lower addresses than EBP). 1029 // Subtract index i (locals lie at lower addresses than EBP).
1020 __ movl(Address(EBP, (slot_base - i) * kWordSize), EAX); 1030 __ movl(Address(EBP, (slot_base - i) * kWordSize), EAX);
1021 } 1031 }
1022 } 1032 }
1023 1033
1024 if (FLAG_print_scopes) { 1034 if (FLAG_print_scopes) {
1025 // Print the function scope (again) after generating the prologue in order 1035 // Print the function scope (again) after generating the prologue in order
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 } else { 1270 } else {
1261 __ cmpl(left, right); 1271 __ cmpl(left, right);
1262 } 1272 }
1263 } 1273 }
1264 1274
1265 1275
1266 // Implement equality spec: if any of the arguments is null do identity check. 1276 // Implement equality spec: if any of the arguments is null do identity check.
1267 // Fallthrough calls super equality. 1277 // Fallthrough calls super equality.
1268 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result, 1278 void FlowGraphCompiler::EmitSuperEqualityCallPrologue(Register result,
1269 Label* skip_call) { 1279 Label* skip_call) {
1270 const Immediate raw_null = 1280 const Immediate& raw_null =
1271 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1281 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1272 Label check_identity, fall_through; 1282 Label check_identity, fall_through;
1273 __ cmpl(Address(ESP, 0 * kWordSize), raw_null); 1283 __ cmpl(Address(ESP, 0 * kWordSize), raw_null);
1274 __ j(EQUAL, &check_identity, Assembler::kNearJump); 1284 __ j(EQUAL, &check_identity, Assembler::kNearJump);
1275 __ cmpl(Address(ESP, 1 * kWordSize), raw_null); 1285 __ cmpl(Address(ESP, 1 * kWordSize), raw_null);
1276 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump); 1286 __ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
1277 1287
1278 __ Bind(&check_identity); 1288 __ Bind(&check_identity);
1279 __ popl(result); 1289 __ popl(result);
1280 __ cmpl(result, Address(ESP, 0 * kWordSize)); 1290 __ cmpl(result, Address(ESP, 0 * kWordSize));
1281 Label is_false; 1291 Label is_false;
1282 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump); 1292 __ j(NOT_EQUAL, &is_false, Assembler::kNearJump);
1283 __ LoadObject(result, Bool::True()); 1293 __ LoadObject(result, Bool::True());
1284 __ Drop(1); 1294 __ Drop(1);
1285 __ jmp(skip_call); 1295 __ jmp(skip_call);
1286 __ Bind(&is_false); 1296 __ Bind(&is_false);
1287 __ LoadObject(result, Bool::False()); 1297 __ LoadObject(result, Bool::False());
1288 __ Drop(1); 1298 __ Drop(1);
1289 __ jmp(skip_call); 1299 __ jmp(skip_call);
1290 __ Bind(&fall_through); 1300 __ Bind(&fall_through);
1291 } 1301 }
1292 1302
1293 1303
1294 void FlowGraphCompiler::LoadDoubleOrSmiToXmm(XmmRegister result, 1304 void FlowGraphCompiler::LoadDoubleOrSmiToFpu(FpuRegister result,
1295 Register reg, 1305 Register reg,
1296 Register temp, 1306 Register temp,
1297 Label* not_double_or_smi) { 1307 Label* not_double_or_smi) {
1298 Label is_smi, done; 1308 Label is_smi, done;
1299 __ testl(reg, Immediate(kSmiTagMask)); 1309 __ testl(reg, Immediate(kSmiTagMask));
1300 __ j(ZERO, &is_smi); 1310 __ j(ZERO, &is_smi);
1301 __ CompareClassId(reg, kDoubleCid, temp); 1311 __ CompareClassId(reg, kDoubleCid, temp);
1302 __ j(NOT_EQUAL, not_double_or_smi); 1312 __ j(NOT_EQUAL, not_double_or_smi);
1303 __ movsd(result, FieldAddress(reg, Double::value_offset())); 1313 __ movsd(result, FieldAddress(reg, Double::value_offset()));
1304 __ jmp(&done); 1314 __ jmp(&done);
1305 __ Bind(&is_smi); 1315 __ Bind(&is_smi);
1306 __ movl(temp, reg); 1316 __ movl(temp, reg);
1307 __ SmiUntag(temp); 1317 __ SmiUntag(temp);
1308 __ cvtsi2sd(result, temp); 1318 __ cvtsi2sd(result, temp);
1309 __ Bind(&done); 1319 __ Bind(&done);
1310 } 1320 }
1311 1321
1312 1322
1313 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) { 1323 void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
1314 // TODO(vegorov): consider saving only caller save (volatile) registers. 1324 // TODO(vegorov): consider saving only caller save (volatile) registers.
1315 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1325 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1316 if (xmm_regs_count > 0) { 1326 if (xmm_regs_count > 0) {
1317 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize)); 1327 __ subl(ESP, Immediate(xmm_regs_count * kDoubleSize));
1318 // Store XMM registers with the lowest register number at the lowest 1328 // Store XMM registers with the lowest register number at the lowest
1319 // address. 1329 // address.
1320 intptr_t offset = 0; 1330 intptr_t offset = 0;
1321 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1331 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1322 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1332 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1323 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1333 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1324 __ movsd(Address(ESP, offset), xmm_reg); 1334 __ movsd(Address(ESP, offset), xmm_reg);
1325 offset += kDoubleSize; 1335 offset += kDoubleSize;
1326 } 1336 }
1327 } 1337 }
1328 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1338 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1329 } 1339 }
1330 1340
1331 // Store general purpose registers with the highest register number at the 1341 // Store general purpose registers with the highest register number at the
1332 // lowest address. 1342 // lowest address.
1333 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) { 1343 for (intptr_t reg_idx = 0; reg_idx < kNumberOfCpuRegisters; ++reg_idx) {
1334 Register reg = static_cast<Register>(reg_idx); 1344 Register reg = static_cast<Register>(reg_idx);
1335 if (locs->live_registers()->ContainsRegister(reg)) { 1345 if (locs->live_registers()->ContainsRegister(reg)) {
1336 __ pushl(reg); 1346 __ pushl(reg);
1337 } 1347 }
1338 } 1348 }
1339 } 1349 }
1340 1350
1341 1351
1342 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) { 1352 void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
1343 // General purpose registers have the highest register number at the 1353 // General purpose registers have the highest register number at the
1344 // lowest address. 1354 // lowest address.
1345 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) { 1355 for (intptr_t reg_idx = kNumberOfCpuRegisters - 1; reg_idx >= 0; --reg_idx) {
1346 Register reg = static_cast<Register>(reg_idx); 1356 Register reg = static_cast<Register>(reg_idx);
1347 if (locs->live_registers()->ContainsRegister(reg)) { 1357 if (locs->live_registers()->ContainsRegister(reg)) {
1348 __ popl(reg); 1358 __ popl(reg);
1349 } 1359 }
1350 } 1360 }
1351 1361
1352 const intptr_t xmm_regs_count = locs->live_registers()->xmm_regs_count(); 1362 const intptr_t xmm_regs_count = locs->live_registers()->fpu_regs_count();
1353 if (xmm_regs_count > 0) { 1363 if (xmm_regs_count > 0) {
1354 // XMM registers have the lowest register number at the lowest address. 1364 // XMM registers have the lowest register number at the lowest address.
1355 intptr_t offset = 0; 1365 intptr_t offset = 0;
1356 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) { 1366 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1357 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx); 1367 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1358 if (locs->live_registers()->ContainsXmmRegister(xmm_reg)) { 1368 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
1359 __ movsd(xmm_reg, Address(ESP, offset)); 1369 __ movsd(xmm_reg, Address(ESP, offset));
1360 offset += kDoubleSize; 1370 offset += kDoubleSize;
1361 } 1371 }
1362 } 1372 }
1363 ASSERT(offset == (xmm_regs_count * kDoubleSize)); 1373 ASSERT(offset == (xmm_regs_count * kDoubleSize));
1364 __ addl(ESP, Immediate(offset)); 1374 __ addl(ESP, Immediate(offset));
1365 } 1375 }
1366 } 1376 }
1367 1377
1368 1378
1379 void FlowGraphCompiler::EmitTestAndCall(const ICData& ic_data,
1380 Register class_id_reg,
1381 intptr_t arg_count,
1382 const Array& arg_names,
1383 Label* deopt,
1384 intptr_t deopt_id,
1385 intptr_t token_index,
1386 LocationSummary* locs) {
1387 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
1388 Label match_found;
1389 const intptr_t len = ic_data.NumberOfChecks();
1390 GrowableArray<CidTarget> sorted(len);
1391 SortICDataByCount(ic_data, &sorted);
1392 for (intptr_t i = 0; i < len; i++) {
1393 const bool is_last_check = (i == (len - 1));
1394 Label next_test;
1395 assembler()->cmpl(class_id_reg, Immediate(sorted[i].cid));
1396 if (is_last_check) {
1397 assembler()->j(NOT_EQUAL, deopt);
1398 } else {
1399 assembler()->j(NOT_EQUAL, &next_test);
1400 }
1401 GenerateStaticCall(deopt_id,
1402 token_index,
1403 *sorted[i].target,
1404 arg_count,
1405 arg_names,
1406 locs);
1407 if (!is_last_check) {
1408 assembler()->jmp(&match_found);
1409 }
1410 assembler()->Bind(&next_test);
1411 }
1412 assembler()->Bind(&match_found);
1413 }
1414
1415
1416 void FlowGraphCompiler::EmitDoubleCompareBranch(Condition true_condition,
1417 FpuRegister left,
1418 FpuRegister right,
1419 BranchInstr* branch) {
1420 ASSERT(branch != NULL);
1421 assembler()->comisd(left, right);
1422 BlockEntryInstr* nan_result = (true_condition == NOT_EQUAL) ?
1423 branch->true_successor() : branch->false_successor();
1424 assembler()->j(PARITY_EVEN, GetBlockLabel(nan_result));
1425 branch->EmitBranchOnCondition(this, true_condition);
1426 }
1427
1428
1429
1430 void FlowGraphCompiler::EmitDoubleCompareBool(Condition true_condition,
1431 FpuRegister left,
1432 FpuRegister right,
1433 Register result) {
1434 assembler()->comisd(left, right);
1435 Label is_false, is_true, done;
1436 assembler()->j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN false;
1437 assembler()->j(true_condition, &is_true, Assembler::kNearJump);
1438 assembler()->Bind(&is_false);
1439 assembler()->LoadObject(result, Bool::False());
1440 assembler()->jmp(&done);
1441 assembler()->Bind(&is_true);
1442 assembler()->LoadObject(result, Bool::True());
1443 assembler()->Bind(&done);
1444 }
1445
1446
1447 Condition FlowGraphCompiler::FlipCondition(Condition condition) {
1448 switch (condition) {
1449 case EQUAL: return EQUAL;
1450 case NOT_EQUAL: return NOT_EQUAL;
1451 case LESS: return GREATER;
1452 case LESS_EQUAL: return GREATER_EQUAL;
1453 case GREATER: return LESS;
1454 case GREATER_EQUAL: return LESS_EQUAL;
1455 case BELOW: return ABOVE;
1456 case BELOW_EQUAL: return ABOVE_EQUAL;
1457 case ABOVE: return BELOW;
1458 case ABOVE_EQUAL: return BELOW_EQUAL;
1459 default:
1460 UNIMPLEMENTED();
1461 return EQUAL;
1462 }
1463 }
1464
1465
1466 bool FlowGraphCompiler::EvaluateCondition(Condition condition,
1467 intptr_t left,
1468 intptr_t right) {
1469 const uintptr_t unsigned_left = static_cast<uintptr_t>(left);
1470 const uintptr_t unsigned_right = static_cast<uintptr_t>(right);
1471 switch (condition) {
1472 case EQUAL: return left == right;
1473 case NOT_EQUAL: return left != right;
1474 case LESS: return left < right;
1475 case LESS_EQUAL: return left <= right;
1476 case GREATER: return left > right;
1477 case GREATER_EQUAL: return left >= right;
1478 case BELOW: return unsigned_left < unsigned_right;
1479 case BELOW_EQUAL: return unsigned_left <= unsigned_right;
1480 case ABOVE: return unsigned_left > unsigned_right;
1481 case ABOVE_EQUAL: return unsigned_left >= unsigned_right;
1482 default:
1483 UNIMPLEMENTED();
1484 return false;
1485 }
1486 }
1487
1488
1489 FieldAddress FlowGraphCompiler::ElementAddressForIntIndex(intptr_t cid,
1490 Register array,
1491 intptr_t index) {
1492 const int64_t disp =
1493 static_cast<int64_t>(index) * ElementSizeFor(cid) + DataOffsetFor(cid);
1494 ASSERT(Utils::IsInt(32, disp));
1495 return FieldAddress(array, static_cast<int32_t>(disp));
1496 }
1497
1498
1499 FieldAddress FlowGraphCompiler::ElementAddressForRegIndex(intptr_t cid,
1500 Register array,
1501 Register index) {
1502 // Note that index is smi-tagged, (i.e, times 2) for all arrays with element
1503 // size > 1. For Uint8Array and OneByteString the index is expected to be
1504 // untagged before accessing.
1505 ASSERT(kSmiTagShift == 1);
1506 switch (cid) {
1507 case kArrayCid:
1508 case kImmutableArrayCid:
1509 return FieldAddress(
1510 array, index, TIMES_HALF_WORD_SIZE, Array::data_offset());
1511 case kFloat32ArrayCid:
1512 return FieldAddress(array, index, TIMES_2, Float32Array::data_offset());
1513 case kFloat64ArrayCid:
1514 return FieldAddress(array, index, TIMES_4, Float64Array::data_offset());
1515 case kInt8ArrayCid:
1516 return FieldAddress(array, index, TIMES_1, Int8Array::data_offset());
1517 case kUint8ArrayCid:
1518 return FieldAddress(array, index, TIMES_1, Uint8Array::data_offset());
1519 case kUint8ClampedArrayCid:
1520 return
1521 FieldAddress(array, index, TIMES_1, Uint8ClampedArray::data_offset());
1522 case kInt16ArrayCid:
1523 return FieldAddress(array, index, TIMES_1, Int16Array::data_offset());
1524 case kUint16ArrayCid:
1525 return FieldAddress(array, index, TIMES_1, Uint16Array::data_offset());
1526 case kOneByteStringCid:
1527 return FieldAddress(array, index, TIMES_1, OneByteString::data_offset());
1528 case kTwoByteStringCid:
1529 return FieldAddress(array, index, TIMES_1, TwoByteString::data_offset());
1530 default:
1531 UNIMPLEMENTED();
1532 return FieldAddress(SPREG, 0);
1533 }
1534 }
1535
1536
1537 Address FlowGraphCompiler::ExternalElementAddressForIntIndex(intptr_t cid,
1538 Register array,
1539 intptr_t index) {
1540 return Address(array, index * ElementSizeFor(cid));
1541 }
1542
1543
1544 Address FlowGraphCompiler::ExternalElementAddressForRegIndex(intptr_t cid,
1545 Register array,
1546 Register index) {
1547 switch (cid) {
1548 case kExternalUint8ArrayCid:
1549 return Address(array, index, TIMES_1, 0);
1550 default:
1551 UNIMPLEMENTED();
1552 return Address(SPREG, 0);
1553 }
1554 }
1555
1556
1369 #undef __ 1557 #undef __
1370 #define __ compiler_->assembler()-> 1558 #define __ compiler_->assembler()->
1371 1559
1372 1560
1373 void ParallelMoveResolver::EmitMove(int index) { 1561 void ParallelMoveResolver::EmitMove(int index) {
1374 MoveOperands* move = moves_[index]; 1562 MoveOperands* move = moves_[index];
1375 const Location source = move->src(); 1563 const Location source = move->src();
1376 const Location destination = move->dest(); 1564 const Location destination = move->dest();
1377 1565
1378 if (source.IsRegister()) { 1566 if (source.IsRegister()) {
1379 if (destination.IsRegister()) { 1567 if (destination.IsRegister()) {
1380 __ movl(destination.reg(), source.reg()); 1568 __ movl(destination.reg(), source.reg());
1381 } else { 1569 } else {
1382 ASSERT(destination.IsStackSlot()); 1570 ASSERT(destination.IsStackSlot());
1383 __ movl(destination.ToStackSlotAddress(), source.reg()); 1571 __ movl(destination.ToStackSlotAddress(), source.reg());
1384 } 1572 }
1385 } else if (source.IsStackSlot()) { 1573 } else if (source.IsStackSlot()) {
1386 if (destination.IsRegister()) { 1574 if (destination.IsRegister()) {
1387 __ movl(destination.reg(), source.ToStackSlotAddress()); 1575 __ movl(destination.reg(), source.ToStackSlotAddress());
1388 } else { 1576 } else {
1389 ASSERT(destination.IsStackSlot()); 1577 ASSERT(destination.IsStackSlot());
1390 MoveMemoryToMemory(destination.ToStackSlotAddress(), 1578 MoveMemoryToMemory(destination.ToStackSlotAddress(),
1391 source.ToStackSlotAddress()); 1579 source.ToStackSlotAddress());
1392 } 1580 }
1393 } else if (source.IsXmmRegister()) { 1581 } else if (source.IsFpuRegister()) {
1394 if (destination.IsXmmRegister()) { 1582 if (destination.IsFpuRegister()) {
1395 // Optimization manual recommends using MOVAPS for register 1583 // Optimization manual recommends using MOVAPS for register
1396 // to register moves. 1584 // to register moves.
1397 __ movaps(destination.xmm_reg(), source.xmm_reg()); 1585 __ movaps(destination.fpu_reg(), source.fpu_reg());
1398 } else { 1586 } else {
1399 ASSERT(destination.IsDoubleStackSlot()); 1587 ASSERT(destination.IsDoubleStackSlot());
1400 __ movsd(destination.ToStackSlotAddress(), source.xmm_reg()); 1588 __ movsd(destination.ToStackSlotAddress(), source.fpu_reg());
1401 } 1589 }
1402 } else if (source.IsDoubleStackSlot()) { 1590 } else if (source.IsDoubleStackSlot()) {
1403 if (destination.IsXmmRegister()) { 1591 if (destination.IsFpuRegister()) {
1404 __ movsd(destination.xmm_reg(), source.ToStackSlotAddress()); 1592 __ movsd(destination.fpu_reg(), source.ToStackSlotAddress());
1405 } else { 1593 } else {
1406 ASSERT(destination.IsDoubleStackSlot()); 1594 ASSERT(destination.IsDoubleStackSlot());
1407 __ movsd(XMM0, source.ToStackSlotAddress()); 1595 __ movsd(XMM0, source.ToStackSlotAddress());
1408 __ movsd(destination.ToStackSlotAddress(), XMM0); 1596 __ movsd(destination.ToStackSlotAddress(), XMM0);
1409 } 1597 }
1410 } else { 1598 } else {
1411 ASSERT(source.IsConstant()); 1599 ASSERT(source.IsConstant());
1412 if (destination.IsRegister()) { 1600 if (destination.IsRegister()) {
1413 const Object& constant = source.constant(); 1601 const Object& constant = source.constant();
1414 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) { 1602 if (constant.IsSmi() && (Smi::Cast(constant).Value() == 0)) {
(...skipping 17 matching lines...) Expand all
1432 const Location destination = move->dest(); 1620 const Location destination = move->dest();
1433 1621
1434 if (source.IsRegister() && destination.IsRegister()) { 1622 if (source.IsRegister() && destination.IsRegister()) {
1435 __ xchgl(destination.reg(), source.reg()); 1623 __ xchgl(destination.reg(), source.reg());
1436 } else if (source.IsRegister() && destination.IsStackSlot()) { 1624 } else if (source.IsRegister() && destination.IsStackSlot()) {
1437 Exchange(source.reg(), destination.ToStackSlotAddress()); 1625 Exchange(source.reg(), destination.ToStackSlotAddress());
1438 } else if (source.IsStackSlot() && destination.IsRegister()) { 1626 } else if (source.IsStackSlot() && destination.IsRegister()) {
1439 Exchange(destination.reg(), source.ToStackSlotAddress()); 1627 Exchange(destination.reg(), source.ToStackSlotAddress());
1440 } else if (source.IsStackSlot() && destination.IsStackSlot()) { 1628 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1441 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress()); 1629 Exchange(destination.ToStackSlotAddress(), source.ToStackSlotAddress());
1442 } else if (source.IsXmmRegister() && destination.IsXmmRegister()) { 1630 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1443 __ movaps(XMM0, source.xmm_reg()); 1631 __ movaps(XMM0, source.fpu_reg());
1444 __ movaps(source.xmm_reg(), destination.xmm_reg()); 1632 __ movaps(source.fpu_reg(), destination.fpu_reg());
1445 __ movaps(destination.xmm_reg(), XMM0); 1633 __ movaps(destination.fpu_reg(), XMM0);
1446 } else if (source.IsXmmRegister() || destination.IsXmmRegister()) { 1634 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1447 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot()); 1635 ASSERT(destination.IsDoubleStackSlot() || source.IsDoubleStackSlot());
1448 XmmRegister reg = source.IsXmmRegister() ? source.xmm_reg() 1636 XmmRegister reg = source.IsFpuRegister() ? source.fpu_reg()
1449 : destination.xmm_reg(); 1637 : destination.fpu_reg();
1450 Address slot_address = source.IsXmmRegister() 1638 const Address& slot_address = source.IsFpuRegister()
1451 ? destination.ToStackSlotAddress() 1639 ? destination.ToStackSlotAddress()
1452 : source.ToStackSlotAddress(); 1640 : source.ToStackSlotAddress();
1453 1641
1454 __ movsd(XMM0, slot_address); 1642 __ movsd(XMM0, slot_address);
1455 __ movsd(slot_address, reg); 1643 __ movsd(slot_address, reg);
1456 __ movaps(reg, XMM0); 1644 __ movaps(reg, XMM0);
1457 } else { 1645 } else {
1458 UNREACHABLE(); 1646 UNREACHABLE();
1459 } 1647 }
1460 1648
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1521 __ popl(ECX); 1709 __ popl(ECX);
1522 __ popl(EAX); 1710 __ popl(EAX);
1523 } 1711 }
1524 1712
1525 1713
1526 #undef __ 1714 #undef __
1527 1715
1528 } // namespace dart 1716 } // namespace dart
1529 1717
1530 #endif // defined TARGET_ARCH_IA32 1718 #endif // defined TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler_ia32.h ('k') | runtime/vm/flow_graph_compiler_x64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698