Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(697)

Side by Side Diff: runtime/vm/intermediate_language_x64.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intermediate_language_ia32.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6 #if defined(TARGET_ARCH_X64) 6 #if defined(TARGET_ARCH_X64)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/dart_entry.h"
11 #include "vm/flow_graph_compiler.h" 12 #include "vm/flow_graph_compiler.h"
12 #include "vm/locations.h" 13 #include "vm/locations.h"
13 #include "vm/object_store.h" 14 #include "vm/object_store.h"
14 #include "vm/parser.h" 15 #include "vm/parser.h"
15 #include "vm/stub_code.h" 16 #include "vm/stub_code.h"
16 #include "vm/symbols.h" 17 #include "vm/symbols.h"
17 18
18 #define __ compiler->assembler()-> 19 #define __ compiler->assembler()->
19 20
20 namespace dart { 21 namespace dart {
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after
267 268
268 269
269 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const { 270 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const {
270 const intptr_t kNumInputs = 2; 271 const intptr_t kNumInputs = 2;
271 const bool is_checked_strict_equal = 272 const bool is_checked_strict_equal =
272 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid); 273 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid);
273 if (receiver_class_id() == kDoubleCid) { 274 if (receiver_class_id() == kDoubleCid) {
274 const intptr_t kNumTemps = 0; 275 const intptr_t kNumTemps = 0;
275 LocationSummary* locs = 276 LocationSummary* locs =
276 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 277 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
277 locs->set_in(0, Location::RequiresXmmRegister()); 278 locs->set_in(0, Location::RequiresFpuRegister());
278 locs->set_in(1, Location::RequiresXmmRegister()); 279 locs->set_in(1, Location::RequiresFpuRegister());
279 locs->set_out(Location::RequiresRegister()); 280 locs->set_out(Location::RequiresRegister());
280 return locs; 281 return locs;
281 } 282 }
282 if (receiver_class_id() == kSmiCid) { 283 if (receiver_class_id() == kSmiCid) {
283 const intptr_t kNumTemps = 0; 284 const intptr_t kNumTemps = 0;
284 LocationSummary* locs = 285 LocationSummary* locs =
285 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 286 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
286 locs->set_in(0, Location::RegisterOrConstant(left())); 287 locs->set_in(0, Location::RegisterOrConstant(left()));
287 // Only one input can be a constant operand. The case of two constant 288 // Only one input can be a constant operand. The case of two constant
288 // operands should be handled by constant propagation. 289 // operands should be handled by constant propagation.
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
331 const ICData& original_ic_data) { 332 const ICData& original_ic_data) {
332 if (!compiler->is_optimizing()) { 333 if (!compiler->is_optimizing()) {
333 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore, 334 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
334 deopt_id, 335 deopt_id,
335 token_pos); 336 token_pos);
336 } 337 }
337 const int kNumberOfArguments = 2; 338 const int kNumberOfArguments = 2;
338 const Array& kNoArgumentNames = Array::Handle(); 339 const Array& kNoArgumentNames = Array::Handle();
339 const int kNumArgumentsChecked = 2; 340 const int kNumArgumentsChecked = 2;
340 341
341 const Immediate raw_null = 342 const Immediate& raw_null =
342 Immediate(reinterpret_cast<intptr_t>(Object::null())); 343 Immediate(reinterpret_cast<intptr_t>(Object::null()));
343 Label check_identity; 344 Label check_identity;
344 __ cmpq(Address(RSP, 0 * kWordSize), raw_null); 345 __ cmpq(Address(RSP, 0 * kWordSize), raw_null);
345 __ j(EQUAL, &check_identity); 346 __ j(EQUAL, &check_identity);
346 __ cmpq(Address(RSP, 1 * kWordSize), raw_null); 347 __ cmpq(Address(RSP, 1 * kWordSize), raw_null);
347 __ j(EQUAL, &check_identity); 348 __ j(EQUAL, &check_identity);
348 349
349 ICData& equality_ic_data = ICData::ZoneHandle(original_ic_data.raw()); 350 ICData& equality_ic_data = ICData::ZoneHandle(original_ic_data.raw());
350 if (compiler->is_optimizing() && FLAG_propagate_ic_data) { 351 if (compiler->is_optimizing() && FLAG_propagate_ic_data) {
351 ASSERT(!original_ic_data.IsNull()); 352 ASSERT(!original_ic_data.IsNull());
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
518 BranchInstr* branch, 519 BranchInstr* branch,
519 intptr_t deopt_id) { 520 intptr_t deopt_id) {
520 ASSERT((kind == Token::kEQ) || (kind == Token::kNE)); 521 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
521 Register left = locs.in(0).reg(); 522 Register left = locs.in(0).reg();
522 Register right = locs.in(1).reg(); 523 Register right = locs.in(1).reg();
523 Register temp = locs.temp(0).reg(); 524 Register temp = locs.temp(0).reg();
524 Label* deopt = compiler->AddDeoptStub(deopt_id, kDeoptEquality); 525 Label* deopt = compiler->AddDeoptStub(deopt_id, kDeoptEquality);
525 __ testq(left, Immediate(kSmiTagMask)); 526 __ testq(left, Immediate(kSmiTagMask));
526 __ j(ZERO, deopt); 527 __ j(ZERO, deopt);
527 // 'left' is not Smi. 528 // 'left' is not Smi.
528 const Immediate raw_null = 529 const Immediate& raw_null =
529 Immediate(reinterpret_cast<intptr_t>(Object::null())); 530 Immediate(reinterpret_cast<intptr_t>(Object::null()));
530 Label identity_compare; 531 Label identity_compare;
531 __ cmpq(right, raw_null); 532 __ cmpq(right, raw_null);
532 __ j(EQUAL, &identity_compare); 533 __ j(EQUAL, &identity_compare);
533 __ cmpq(left, raw_null); 534 __ cmpq(left, raw_null);
534 __ j(EQUAL, &identity_compare); 535 __ j(EQUAL, &identity_compare);
535 536
536 __ LoadClassId(temp, left); 537 __ LoadClassId(temp, left);
537 const intptr_t len = ic_data.NumberOfChecks(); 538 const intptr_t len = ic_data.NumberOfChecks();
538 for (intptr_t i = 0; i < len; i++) { 539 for (intptr_t i = 0; i < len; i++) {
(...skipping 30 matching lines...) Expand all
569 LocationSummary* locs, 570 LocationSummary* locs,
570 Token::Kind kind, 571 Token::Kind kind,
571 BranchInstr* branch, 572 BranchInstr* branch,
572 const ICData& ic_data, 573 const ICData& ic_data,
573 intptr_t deopt_id, 574 intptr_t deopt_id,
574 intptr_t token_pos) { 575 intptr_t token_pos) {
575 ASSERT((kind == Token::kEQ) || (kind == Token::kNE)); 576 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
576 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0)); 577 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
577 Register left = locs->in(0).reg(); 578 Register left = locs->in(0).reg();
578 Register right = locs->in(1).reg(); 579 Register right = locs->in(1).reg();
579 const Immediate raw_null = 580 const Immediate& raw_null =
580 Immediate(reinterpret_cast<intptr_t>(Object::null())); 581 Immediate(reinterpret_cast<intptr_t>(Object::null()));
581 Label done, identity_compare, non_null_compare; 582 Label done, identity_compare, non_null_compare;
582 __ cmpq(right, raw_null); 583 __ cmpq(right, raw_null);
583 __ j(EQUAL, &identity_compare, Assembler::kNearJump); 584 __ j(EQUAL, &identity_compare, Assembler::kNearJump);
584 __ cmpq(left, raw_null); 585 __ cmpq(left, raw_null);
585 __ j(NOT_EQUAL, &non_null_compare, Assembler::kNearJump); 586 __ j(NOT_EQUAL, &non_null_compare, Assembler::kNearJump);
586 // Comparison with NULL is "===". 587 // Comparison with NULL is "===".
587 __ Bind(&identity_compare); 588 __ Bind(&identity_compare);
588 __ cmpq(left, right); 589 __ cmpq(left, right);
589 Condition cond = TokenKindToSmiCondition(kind); 590 Condition cond = TokenKindToSmiCondition(kind);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
654 UNREACHABLE(); 655 UNREACHABLE();
655 return OVERFLOW; 656 return OVERFLOW;
656 } 657 }
657 } 658 }
658 659
659 660
660 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler, 661 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
661 const LocationSummary& locs, 662 const LocationSummary& locs,
662 Token::Kind kind, 663 Token::Kind kind,
663 BranchInstr* branch) { 664 BranchInstr* branch) {
664 XmmRegister left = locs.in(0).xmm_reg(); 665 XmmRegister left = locs.in(0).fpu_reg();
665 XmmRegister right = locs.in(1).xmm_reg(); 666 XmmRegister right = locs.in(1).fpu_reg();
666 667
667 Condition true_condition = TokenKindToDoubleCondition(kind); 668 Condition true_condition = TokenKindToDoubleCondition(kind);
668 if (branch != NULL) { 669 if (branch != NULL) {
669 compiler->EmitDoubleCompareBranch( 670 compiler->EmitDoubleCompareBranch(
670 true_condition, left, right, branch); 671 true_condition, left, right, branch);
671 } else { 672 } else {
672 compiler->EmitDoubleCompareBool( 673 compiler->EmitDoubleCompareBool(
673 true_condition, left, right, locs.out().reg()); 674 true_condition, left, right, locs.out().reg());
674 } 675 }
675 } 676 }
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 branch->EmitBranchOnCondition(compiler, branch_condition); 758 branch->EmitBranchOnCondition(compiler, branch_condition);
758 } 759 }
759 760
760 761
761 LocationSummary* RelationalOpInstr::MakeLocationSummary() const { 762 LocationSummary* RelationalOpInstr::MakeLocationSummary() const {
762 const intptr_t kNumInputs = 2; 763 const intptr_t kNumInputs = 2;
763 const intptr_t kNumTemps = 0; 764 const intptr_t kNumTemps = 0;
764 if (operands_class_id() == kDoubleCid) { 765 if (operands_class_id() == kDoubleCid) {
765 LocationSummary* summary = 766 LocationSummary* summary =
766 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 767 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
767 summary->set_in(0, Location::RequiresXmmRegister()); 768 summary->set_in(0, Location::RequiresFpuRegister());
768 summary->set_in(1, Location::RequiresXmmRegister()); 769 summary->set_in(1, Location::RequiresFpuRegister());
769 summary->set_out(Location::RequiresRegister()); 770 summary->set_out(Location::RequiresRegister());
770 return summary; 771 return summary;
771 } else if (operands_class_id() == kSmiCid) { 772 } else if (operands_class_id() == kSmiCid) {
772 LocationSummary* summary = 773 LocationSummary* summary =
773 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 774 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
774 summary->set_in(0, Location::RegisterOrConstant(left())); 775 summary->set_in(0, Location::RegisterOrConstant(left()));
775 // Only one input can be a constant operand. The case of two constant 776 // Only one input can be a constant operand. The case of two constant
776 // operands should be handled by constant propagation. 777 // operands should be handled by constant propagation.
777 summary->set_in(1, summary->in(0).IsConstant() 778 summary->set_in(1, summary->in(0).IsConstant()
778 ? Location::RequiresRegister() 779 ? Location::RequiresRegister()
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
1008 LocationSummary* locs = 1009 LocationSummary* locs =
1009 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 1010 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
1010 locs->set_in(0, Location::RequiresRegister()); 1011 locs->set_in(0, Location::RequiresRegister());
1011 // The smi index is either untagged and tagged again at the end of the 1012 // The smi index is either untagged and tagged again at the end of the
1012 // operation (element size == 1), or it is left smi tagged (for all element 1013 // operation (element size == 1), or it is left smi tagged (for all element
1013 // sizes > 1). 1014 // sizes > 1).
1014 locs->set_in(1, CanBeImmediateIndex(index(), class_id()) 1015 locs->set_in(1, CanBeImmediateIndex(index(), class_id())
1015 ? Location::RegisterOrSmiConstant(index()) 1016 ? Location::RegisterOrSmiConstant(index())
1016 : Location::RequiresRegister()); 1017 : Location::RequiresRegister());
1017 if (representation() == kUnboxedDouble) { 1018 if (representation() == kUnboxedDouble) {
1018 locs->set_out(Location::RequiresXmmRegister()); 1019 locs->set_out(Location::RequiresFpuRegister());
1019 } else { 1020 } else {
1020 locs->set_out(Location::RequiresRegister()); 1021 locs->set_out(Location::RequiresRegister());
1021 } 1022 }
1022 return locs; 1023 return locs;
1023 } 1024 }
1024 1025
1025 1026
1026 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1027 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1027 Register array = locs()->in(0).reg(); 1028 Register array = locs()->in(0).reg();
1028 Location index = locs()->in(1); 1029 Location index = locs()->in(1);
(...skipping 20 matching lines...) Expand all
1049 return; 1050 return;
1050 } 1051 }
1051 1052
1052 FieldAddress element_address = index.IsRegister() ? 1053 FieldAddress element_address = index.IsRegister() ?
1053 FlowGraphCompiler::ElementAddressForRegIndex( 1054 FlowGraphCompiler::ElementAddressForRegIndex(
1054 class_id(), array, index.reg()) : 1055 class_id(), array, index.reg()) :
1055 FlowGraphCompiler::ElementAddressForIntIndex( 1056 FlowGraphCompiler::ElementAddressForIntIndex(
1056 class_id(), array, Smi::Cast(index.constant()).Value()); 1057 class_id(), array, Smi::Cast(index.constant()).Value());
1057 1058
1058 if (representation() == kUnboxedDouble) { 1059 if (representation() == kUnboxedDouble) {
1059 XmmRegister result = locs()->out().xmm_reg(); 1060 XmmRegister result = locs()->out().fpu_reg();
1060 if (class_id() == kFloat32ArrayCid) { 1061 if (class_id() == kFloat32ArrayCid) {
1061 // Load single precision float. 1062 // Load single precision float.
1062 __ movss(result, element_address); 1063 __ movss(result, element_address);
1063 // Promote to double. 1064 // Promote to double.
1064 __ cvtss2sd(result, locs()->out().xmm_reg()); 1065 __ cvtss2sd(result, locs()->out().fpu_reg());
1065 } else { 1066 } else {
1066 ASSERT(class_id() == kFloat64ArrayCid); 1067 ASSERT(class_id() == kFloat64ArrayCid);
1067 __ movsd(result, element_address); 1068 __ movsd(result, element_address);
1068 } 1069 }
1069 return; 1070 return;
1070 } 1071 }
1071 1072
1072 Register result = locs()->out().reg(); 1073 Register result = locs()->out().reg();
1073 switch (class_id()) { 1074 switch (class_id()) {
1074 case kInt8ArrayCid: 1075 case kInt8ArrayCid:
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1128 // RBX, RCX, RDX) instead of using a fixed register. 1129 // RBX, RCX, RDX) instead of using a fixed register.
1129 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), RAX)); 1130 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), RAX));
1130 break; 1131 break;
1131 case kInt16ArrayCid: 1132 case kInt16ArrayCid:
1132 case kUint16ArrayCid: 1133 case kUint16ArrayCid:
1133 // Writable register because the value must be untagged before storing. 1134 // Writable register because the value must be untagged before storing.
1134 locs->set_in(2, Location::WritableRegister()); 1135 locs->set_in(2, Location::WritableRegister());
1135 break; 1136 break;
1136 case kFloat32ArrayCid: 1137 case kFloat32ArrayCid:
1137 // Need temp register for float-to-double conversion. 1138 // Need temp register for float-to-double conversion.
1138 locs->AddTemp(Location::RequiresXmmRegister()); 1139 locs->AddTemp(Location::RequiresFpuRegister());
1139 // Fall through. 1140 // Fall through.
1140 case kFloat64ArrayCid: 1141 case kFloat64ArrayCid:
1141 // TODO(srdjan): Support Float64 constants. 1142 // TODO(srdjan): Support Float64 constants.
1142 locs->set_in(2, Location::RequiresXmmRegister()); 1143 locs->set_in(2, Location::RequiresFpuRegister());
1143 break; 1144 break;
1144 default: 1145 default:
1145 UNREACHABLE(); 1146 UNREACHABLE();
1146 return NULL; 1147 return NULL;
1147 } 1148 }
1148 return locs; 1149 return locs;
1149 } 1150 }
1150 1151
1151 1152
1152 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1153 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1227 } 1228 }
1228 case kInt16ArrayCid: 1229 case kInt16ArrayCid:
1229 case kUint16ArrayCid: { 1230 case kUint16ArrayCid: {
1230 Register value = locs()->in(2).reg(); 1231 Register value = locs()->in(2).reg();
1231 __ SmiUntag(value); 1232 __ SmiUntag(value);
1232 __ movw(element_address, value); 1233 __ movw(element_address, value);
1233 break; 1234 break;
1234 } 1235 }
1235 case kFloat32ArrayCid: 1236 case kFloat32ArrayCid:
1236 // Convert to single precision. 1237 // Convert to single precision.
1237 __ cvtsd2ss(locs()->temp(0).xmm_reg(), locs()->in(2).xmm_reg()); 1238 __ cvtsd2ss(locs()->temp(0).fpu_reg(), locs()->in(2).fpu_reg());
1238 // Store. 1239 // Store.
1239 __ movss(element_address, locs()->temp(0).xmm_reg()); 1240 __ movss(element_address, locs()->temp(0).fpu_reg());
1240 break; 1241 break;
1241 case kFloat64ArrayCid: 1242 case kFloat64ArrayCid:
1242 __ movsd(element_address, locs()->in(2).xmm_reg()); 1243 __ movsd(element_address, locs()->in(2).fpu_reg());
1243 break; 1244 break;
1244 default: 1245 default:
1245 UNREACHABLE(); 1246 UNREACHABLE();
1246 } 1247 }
1247 } 1248 }
1248 1249
1249 1250
1250 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const { 1251 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const {
1251 const intptr_t kNumInputs = 2; 1252 const intptr_t kNumInputs = 2;
1252 const intptr_t num_temps = 0; 1253 const intptr_t num_temps = 0;
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1437 Register result_reg = locs()->out().reg(); 1438 Register result_reg = locs()->out().reg();
1438 1439
1439 // 'instantiator_reg' is the instantiator AbstractTypeArguments object 1440 // 'instantiator_reg' is the instantiator AbstractTypeArguments object
1440 // (or null). 1441 // (or null).
1441 // If the instantiator is null and if the type argument vector 1442 // If the instantiator is null and if the type argument vector
1442 // instantiated from null becomes a vector of dynamic, then use null as 1443 // instantiated from null becomes a vector of dynamic, then use null as
1443 // the type arguments. 1444 // the type arguments.
1444 Label type_arguments_instantiated; 1445 Label type_arguments_instantiated;
1445 const intptr_t len = type_arguments().Length(); 1446 const intptr_t len = type_arguments().Length();
1446 if (type_arguments().IsRawInstantiatedRaw(len)) { 1447 if (type_arguments().IsRawInstantiatedRaw(len)) {
1447 const Immediate raw_null = 1448 const Immediate& raw_null =
1448 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1449 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1449 __ cmpq(instantiator_reg, raw_null); 1450 __ cmpq(instantiator_reg, raw_null);
1450 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1451 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1451 } 1452 }
1452 // Instantiate non-null type arguments. 1453 // Instantiate non-null type arguments.
1453 if (type_arguments().IsUninstantiatedIdentity()) { 1454 if (type_arguments().IsUninstantiatedIdentity()) {
1454 // Check if the instantiator type argument vector is a TypeArguments of a 1455 // Check if the instantiator type argument vector is a TypeArguments of a
1455 // matching length and, if so, use it as the instantiated type_arguments. 1456 // matching length and, if so, use it as the instantiated type_arguments.
1456 // No need to check the instantiator ('instantiator_reg') for null here, 1457 // No need to check the instantiator ('instantiator_reg') for null here,
1457 // because a null instantiator will have the wrong class (Null instead of 1458 // because a null instantiator will have the wrong class (Null instead of
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1498 ASSERT(instantiator_reg == result_reg); 1499 ASSERT(instantiator_reg == result_reg);
1499 1500
1500 // instantiator_reg is the instantiator type argument vector, i.e. an 1501 // instantiator_reg is the instantiator type argument vector, i.e. an
1501 // AbstractTypeArguments object (or null). 1502 // AbstractTypeArguments object (or null).
1502 // If the instantiator is null and if the type argument vector 1503 // If the instantiator is null and if the type argument vector
1503 // instantiated from null becomes a vector of dynamic, then use null as 1504 // instantiated from null becomes a vector of dynamic, then use null as
1504 // the type arguments. 1505 // the type arguments.
1505 Label type_arguments_instantiated; 1506 Label type_arguments_instantiated;
1506 const intptr_t len = type_arguments().Length(); 1507 const intptr_t len = type_arguments().Length();
1507 if (type_arguments().IsRawInstantiatedRaw(len)) { 1508 if (type_arguments().IsRawInstantiatedRaw(len)) {
1508 const Immediate raw_null = 1509 const Immediate& raw_null =
1509 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1510 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1510 __ cmpq(instantiator_reg, raw_null); 1511 __ cmpq(instantiator_reg, raw_null);
1511 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1512 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1512 } 1513 }
1513 // Instantiate non-null type arguments. 1514 // Instantiate non-null type arguments.
1514 if (type_arguments().IsUninstantiatedIdentity()) { 1515 if (type_arguments().IsUninstantiatedIdentity()) {
1515 // Check if the instantiator type argument vector is a TypeArguments of a 1516 // Check if the instantiator type argument vector is a TypeArguments of a
1516 // matching length and, if so, use it as the instantiated type_arguments. 1517 // matching length and, if so, use it as the instantiated type_arguments.
1517 // No need to check instantiator_reg for null here, because a null 1518 // No need to check instantiator_reg for null here, because a null
1518 // instantiator will have the wrong class (Null instead of TypeArguments). 1519 // instantiator will have the wrong class (Null instead of TypeArguments).
1519 Label type_arguments_uninstantiated; 1520 Label type_arguments_uninstantiated;
1520 __ CompareClassId(instantiator_reg, kTypeArgumentsCid); 1521 __ CompareClassId(instantiator_reg, kTypeArgumentsCid);
1521 __ j(NOT_EQUAL, &type_arguments_uninstantiated, Assembler::kNearJump); 1522 __ j(NOT_EQUAL, &type_arguments_uninstantiated, Assembler::kNearJump);
1522 Immediate arguments_length = 1523 const Immediate& arguments_length =
1523 Immediate(Smi::RawValue(type_arguments().Length())); 1524 Immediate(Smi::RawValue(type_arguments().Length()));
1524 __ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()), 1525 __ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
1525 arguments_length); 1526 arguments_length);
1526 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1527 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1527 __ Bind(&type_arguments_uninstantiated); 1528 __ Bind(&type_arguments_uninstantiated);
1528 } 1529 }
1529 // In the non-factory case, we rely on the allocation stub to 1530 // In the non-factory case, we rely on the allocation stub to
1530 // instantiate the type arguments. 1531 // instantiate the type arguments.
1531 __ LoadObject(result_reg, type_arguments()); 1532 __ LoadObject(result_reg, type_arguments());
1532 // result_reg: uninstantiated type arguments. 1533 // result_reg: uninstantiated type arguments.
(...skipping 19 matching lines...) Expand all
1552 Register instantiator_reg = locs()->in(0).reg(); 1553 Register instantiator_reg = locs()->in(0).reg();
1553 ASSERT(locs()->out().reg() == instantiator_reg); 1554 ASSERT(locs()->out().reg() == instantiator_reg);
1554 1555
1555 // instantiator_reg is the instantiator AbstractTypeArguments object 1556 // instantiator_reg is the instantiator AbstractTypeArguments object
1556 // (or null). If the instantiator is null and if the type argument vector 1557 // (or null). If the instantiator is null and if the type argument vector
1557 // instantiated from null becomes a vector of dynamic, then use null as 1558 // instantiated from null becomes a vector of dynamic, then use null as
1558 // the type arguments and do not pass the instantiator. 1559 // the type arguments and do not pass the instantiator.
1559 Label done; 1560 Label done;
1560 const intptr_t len = type_arguments().Length(); 1561 const intptr_t len = type_arguments().Length();
1561 if (type_arguments().IsRawInstantiatedRaw(len)) { 1562 if (type_arguments().IsRawInstantiatedRaw(len)) {
1562 const Immediate raw_null = 1563 const Immediate& raw_null =
1563 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1564 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1564 Label instantiator_not_null; 1565 Label instantiator_not_null;
1565 __ cmpq(instantiator_reg, raw_null); 1566 __ cmpq(instantiator_reg, raw_null);
1566 __ j(NOT_EQUAL, &instantiator_not_null, Assembler::kNearJump); 1567 __ j(NOT_EQUAL, &instantiator_not_null, Assembler::kNearJump);
1567 // Null was used in VisitExtractConstructorTypeArguments as the 1568 // Null was used in VisitExtractConstructorTypeArguments as the
1568 // instantiated type arguments, no proper instantiator needed. 1569 // instantiated type arguments, no proper instantiator needed.
1569 __ movq(instantiator_reg, 1570 __ movq(instantiator_reg,
1570 Immediate(Smi::RawValue(StubCode::kNoInstantiator))); 1571 Immediate(Smi::RawValue(StubCode::kNoInstantiator)));
1571 __ jmp(&done); 1572 __ jmp(&done);
1572 __ Bind(&instantiator_not_null); 1573 __ Bind(&instantiator_not_null);
1573 } 1574 }
1574 // Instantiate non-null type arguments. 1575 // Instantiate non-null type arguments.
1575 if (type_arguments().IsUninstantiatedIdentity()) { 1576 if (type_arguments().IsUninstantiatedIdentity()) {
1576 // TODO(regis): The following emitted code is duplicated in 1577 // TODO(regis): The following emitted code is duplicated in
1577 // VisitExtractConstructorTypeArguments above. The reason is that the code 1578 // VisitExtractConstructorTypeArguments above. The reason is that the code
1578 // is split between two computations, so that each one produces a 1579 // is split between two computations, so that each one produces a
1579 // single value, rather than producing a pair of values. 1580 // single value, rather than producing a pair of values.
1580 // If this becomes an issue, we should expose these tests at the IL level. 1581 // If this becomes an issue, we should expose these tests at the IL level.
1581 1582
1582 // Check if the instantiator type argument vector is a TypeArguments of a 1583 // Check if the instantiator type argument vector is a TypeArguments of a
1583 // matching length and, if so, use it as the instantiated type_arguments. 1584 // matching length and, if so, use it as the instantiated type_arguments.
1584 // No need to check the instantiator (RAX) for null here, because a null 1585 // No need to check the instantiator (RAX) for null here, because a null
1585 // instantiator will have the wrong class (Null instead of TypeArguments). 1586 // instantiator will have the wrong class (Null instead of TypeArguments).
1586 __ CompareClassId(instantiator_reg, kTypeArgumentsCid); 1587 __ CompareClassId(instantiator_reg, kTypeArgumentsCid);
1587 __ j(NOT_EQUAL, &done, Assembler::kNearJump); 1588 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
1588 Immediate arguments_length = 1589 const Immediate& arguments_length =
1589 Immediate(Smi::RawValue(type_arguments().Length())); 1590 Immediate(Smi::RawValue(type_arguments().Length()));
1590 __ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()), 1591 __ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
1591 arguments_length); 1592 arguments_length);
1592 __ j(NOT_EQUAL, &done, Assembler::kNearJump); 1593 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
1593 // The instantiator was used in VisitExtractConstructorTypeArguments as the 1594 // The instantiator was used in VisitExtractConstructorTypeArguments as the
1594 // instantiated type arguments, no proper instantiator needed. 1595 // instantiated type arguments, no proper instantiator needed.
1595 __ movq(instantiator_reg, 1596 __ movq(instantiator_reg,
1596 Immediate(Smi::RawValue(StubCode::kNoInstantiator))); 1597 Immediate(Smi::RawValue(StubCode::kNoInstantiator)));
1597 } 1598 }
1598 __ Bind(&done); 1599 __ Bind(&done);
(...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after
2038 } 2039 }
2039 2040
2040 2041
2041 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const { 2042 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const {
2042 const intptr_t kNumInputs = 1; 2043 const intptr_t kNumInputs = 1;
2043 const intptr_t kNumTemps = 0; 2044 const intptr_t kNumTemps = 0;
2044 LocationSummary* summary = 2045 LocationSummary* summary =
2045 new LocationSummary(kNumInputs, 2046 new LocationSummary(kNumInputs,
2046 kNumTemps, 2047 kNumTemps,
2047 LocationSummary::kCallOnSlowPath); 2048 LocationSummary::kCallOnSlowPath);
2048 summary->set_in(0, Location::RequiresXmmRegister()); 2049 summary->set_in(0, Location::RequiresFpuRegister());
2049 summary->set_out(Location::RequiresRegister()); 2050 summary->set_out(Location::RequiresRegister());
2050 return summary; 2051 return summary;
2051 } 2052 }
2052 2053
2053 2054
2054 class BoxDoubleSlowPath : public SlowPathCode { 2055 class BoxDoubleSlowPath : public SlowPathCode {
2055 public: 2056 public:
2056 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction) 2057 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction)
2057 : instruction_(instruction) { } 2058 : instruction_(instruction) { }
2058 2059
(...skipping 22 matching lines...) Expand all
2081 private: 2082 private:
2082 BoxDoubleInstr* instruction_; 2083 BoxDoubleInstr* instruction_;
2083 }; 2084 };
2084 2085
2085 2086
2086 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2087 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2087 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2088 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2088 compiler->AddSlowPathCode(slow_path); 2089 compiler->AddSlowPathCode(slow_path);
2089 2090
2090 Register out_reg = locs()->out().reg(); 2091 Register out_reg = locs()->out().reg();
2091 XmmRegister value = locs()->in(0).xmm_reg(); 2092 XmmRegister value = locs()->in(0).fpu_reg();
2092 2093
2093 AssemblerMacros::TryAllocate(compiler->assembler(), 2094 AssemblerMacros::TryAllocate(compiler->assembler(),
2094 compiler->double_class(), 2095 compiler->double_class(),
2095 slow_path->entry_label(), 2096 slow_path->entry_label(),
2096 Assembler::kFarJump, 2097 Assembler::kFarJump,
2097 out_reg); 2098 out_reg);
2098 __ Bind(slow_path->exit_label()); 2099 __ Bind(slow_path->exit_label());
2099 __ movsd(FieldAddress(out_reg, Double::value_offset()), value); 2100 __ movsd(FieldAddress(out_reg, Double::value_offset()), value);
2100 } 2101 }
2101 2102
2102 2103
2103 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const { 2104 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const {
2104 const intptr_t kNumInputs = 1; 2105 const intptr_t kNumInputs = 1;
2105 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0; 2106 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
2106 LocationSummary* summary = 2107 LocationSummary* summary =
2107 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2108 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2108 summary->set_in(0, Location::RequiresRegister()); 2109 summary->set_in(0, Location::RequiresRegister());
2109 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister()); 2110 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister());
2110 summary->set_out(Location::RequiresXmmRegister()); 2111 summary->set_out(Location::RequiresFpuRegister());
2111 return summary; 2112 return summary;
2112 } 2113 }
2113 2114
2114 2115
2115 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2116 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2116 const intptr_t value_cid = value()->ResultCid(); 2117 const intptr_t value_cid = value()->ResultCid();
2117 const Register value = locs()->in(0).reg(); 2118 const Register value = locs()->in(0).reg();
2118 const XmmRegister result = locs()->out().xmm_reg(); 2119 const XmmRegister result = locs()->out().fpu_reg();
2119 2120
2120 if (value_cid == kDoubleCid) { 2121 if (value_cid == kDoubleCid) {
2121 __ movsd(result, FieldAddress(value, Double::value_offset())); 2122 __ movsd(result, FieldAddress(value, Double::value_offset()));
2122 } else if (value_cid == kSmiCid) { 2123 } else if (value_cid == kSmiCid) {
2123 __ SmiUntag(value); // Untag input before conversion. 2124 __ SmiUntag(value); // Untag input before conversion.
2124 __ cvtsi2sd(result, value); 2125 __ cvtsi2sd(result, value);
2125 __ SmiTag(value); // Restore input register. 2126 __ SmiTag(value); // Restore input register.
2126 } else { 2127 } else {
2127 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp); 2128 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp);
2128 compiler->LoadDoubleOrSmiToXmm(result, 2129 compiler->LoadDoubleOrSmiToFpu(result,
2129 value, 2130 value,
2130 locs()->temp(0).reg(), 2131 locs()->temp(0).reg(),
2131 deopt); 2132 deopt);
2132 } 2133 }
2133 } 2134 }
2134 2135
2135 2136
2136 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const { 2137 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const {
2137 const intptr_t kNumInputs = 2; 2138 const intptr_t kNumInputs = 2;
2138 const intptr_t kNumTemps = 0; 2139 const intptr_t kNumTemps = 0;
2139 LocationSummary* summary = 2140 LocationSummary* summary =
2140 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2141 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2141 summary->set_in(0, Location::RequiresXmmRegister()); 2142 summary->set_in(0, Location::RequiresFpuRegister());
2142 summary->set_in(1, Location::RequiresXmmRegister()); 2143 summary->set_in(1, Location::RequiresFpuRegister());
2143 summary->set_out(Location::SameAsFirstInput()); 2144 summary->set_out(Location::SameAsFirstInput());
2144 return summary; 2145 return summary;
2145 } 2146 }
2146 2147
2147 2148
2148 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2149 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2149 XmmRegister left = locs()->in(0).xmm_reg(); 2150 XmmRegister left = locs()->in(0).fpu_reg();
2150 XmmRegister right = locs()->in(1).xmm_reg(); 2151 XmmRegister right = locs()->in(1).fpu_reg();
2151 2152
2152 ASSERT(locs()->out().xmm_reg() == left); 2153 ASSERT(locs()->out().fpu_reg() == left);
2153 2154
2154 switch (op_kind()) { 2155 switch (op_kind()) {
2155 case Token::kADD: __ addsd(left, right); break; 2156 case Token::kADD: __ addsd(left, right); break;
2156 case Token::kSUB: __ subsd(left, right); break; 2157 case Token::kSUB: __ subsd(left, right); break;
2157 case Token::kMUL: __ mulsd(left, right); break; 2158 case Token::kMUL: __ mulsd(left, right); break;
2158 case Token::kDIV: __ divsd(left, right); break; 2159 case Token::kDIV: __ divsd(left, right); break;
2159 default: UNREACHABLE(); 2160 default: UNREACHABLE();
2160 } 2161 }
2161 } 2162 }
2162 2163
2163 2164
2164 LocationSummary* MathSqrtInstr::MakeLocationSummary() const { 2165 LocationSummary* MathSqrtInstr::MakeLocationSummary() const {
2165 const intptr_t kNumInputs = 1; 2166 const intptr_t kNumInputs = 1;
2166 const intptr_t kNumTemps = 0; 2167 const intptr_t kNumTemps = 0;
2167 LocationSummary* summary = 2168 LocationSummary* summary =
2168 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2169 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2169 summary->set_in(0, Location::RequiresXmmRegister()); 2170 summary->set_in(0, Location::RequiresFpuRegister());
2170 summary->set_out(Location::RequiresXmmRegister()); 2171 summary->set_out(Location::RequiresFpuRegister());
2171 return summary; 2172 return summary;
2172 } 2173 }
2173 2174
2174 2175
2175 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2176 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2176 __ sqrtsd(locs()->out().xmm_reg(), locs()->in(0).xmm_reg()); 2177 __ sqrtsd(locs()->out().fpu_reg(), locs()->in(0).fpu_reg());
2177 } 2178 }
2178 2179
2179 2180
2180 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const { 2181 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const {
2181 const intptr_t kNumInputs = 1; 2182 const intptr_t kNumInputs = 1;
2182 const intptr_t kNumTemps = 0; 2183 const intptr_t kNumTemps = 0;
2183 LocationSummary* summary = 2184 LocationSummary* summary =
2184 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2185 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2185 summary->set_in(0, Location::RequiresRegister()); 2186 summary->set_in(0, Location::RequiresRegister());
2186 summary->set_out(Location::SameAsFirstInput()); 2187 summary->set_out(Location::SameAsFirstInput());
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
2291 locs()); 2292 locs());
2292 __ Bind(&done); 2293 __ Bind(&done);
2293 } 2294 }
2294 2295
2295 2296
2296 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const { 2297 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const {
2297 const intptr_t kNumInputs = 1; 2298 const intptr_t kNumInputs = 1;
2298 const intptr_t kNumTemps = 1; 2299 const intptr_t kNumTemps = 1;
2299 LocationSummary* result = new LocationSummary( 2300 LocationSummary* result = new LocationSummary(
2300 kNumInputs, kNumTemps, LocationSummary::kNoCall); 2301 kNumInputs, kNumTemps, LocationSummary::kNoCall);
2301 result->set_in(0, Location::RequiresXmmRegister()); 2302 result->set_in(0, Location::RequiresFpuRegister());
2302 result->set_out(Location:: Location::RequiresRegister()); 2303 result->set_out(Location:: Location::RequiresRegister());
2303 result->set_temp(0, Location::RequiresRegister()); 2304 result->set_temp(0, Location::RequiresRegister());
2304 return result; 2305 return result;
2305 } 2306 }
2306 2307
2307 2308
2308 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2309 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2309 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi); 2310 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi);
2310 Register result = locs()->out().reg(); 2311 Register result = locs()->out().reg();
2311 XmmRegister value = locs()->in(0).xmm_reg(); 2312 XmmRegister value = locs()->in(0).fpu_reg();
2312 Register temp = locs()->temp(0).reg(); 2313 Register temp = locs()->temp(0).reg();
2313 2314
2314 __ cvttsd2siq(result, value); 2315 __ cvttsd2siq(result, value);
2315 // Overflow is signalled with minint. 2316 // Overflow is signalled with minint.
2316 Label do_call, done; 2317 Label do_call, done;
2317 // Check for overflow and that it fits into Smi. 2318 // Check for overflow and that it fits into Smi.
2318 __ movq(temp, result); 2319 __ movq(temp, result);
2319 __ shlq(temp, Immediate(1)); 2320 __ shlq(temp, Immediate(1));
2320 __ j(OVERFLOW, deopt); 2321 __ j(OVERFLOW, deopt);
2321 __ SmiTag(result); 2322 __ SmiTag(result);
2322 } 2323 }
2323 2324
2324 2325
2325 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const { 2326 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const {
2326 const intptr_t kNumInputs = 1; 2327 const intptr_t kNumInputs = 1;
2327 const intptr_t kNumTemps = 2328 const intptr_t kNumTemps =
2328 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0; 2329 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0;
2329 LocationSummary* result = 2330 LocationSummary* result =
2330 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2331 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2331 result->set_in(0, Location::RequiresXmmRegister()); 2332 result->set_in(0, Location::RequiresFpuRegister());
2332 result->set_out(Location::RequiresXmmRegister()); 2333 result->set_out(Location::RequiresFpuRegister());
2333 if (recognized_kind() == MethodRecognizer::kDoubleRound) { 2334 if (recognized_kind() == MethodRecognizer::kDoubleRound) {
2334 result->set_temp(0, Location::RequiresXmmRegister()); 2335 result->set_temp(0, Location::RequiresFpuRegister());
2335 } 2336 }
2336 return result; 2337 return result;
2337 } 2338 }
2338 2339
2339 2340
2340 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2341 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2341 XmmRegister value = locs()->in(0).xmm_reg(); 2342 XmmRegister value = locs()->in(0).fpu_reg();
2342 XmmRegister result = locs()->out().xmm_reg(); 2343 XmmRegister result = locs()->out().fpu_reg();
2343 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) { 2344 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) {
2344 __ roundsd(result, value, Assembler::kRoundToZero); 2345 __ roundsd(result, value, Assembler::kRoundToZero);
2345 } else { 2346 } else {
2346 XmmRegister temp = locs()->temp(0).xmm_reg(); 2347 XmmRegister temp = locs()->temp(0).fpu_reg();
2347 __ DoubleRound(result, value, temp); 2348 __ DoubleRound(result, value, temp);
2348 } 2349 }
2349 } 2350 }
2350 2351
2351 2352
2352 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const { 2353 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const {
2353 return MakeCallSummary(); 2354 return MakeCallSummary();
2354 } 2355 }
2355 2356
2356 2357
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after
2575 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const { 2576 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const {
2576 UNIMPLEMENTED(); 2577 UNIMPLEMENTED();
2577 return NULL; 2578 return NULL;
2578 } 2579 }
2579 2580
2580 2581
2581 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2582 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2582 UNIMPLEMENTED(); 2583 UNIMPLEMENTED();
2583 } 2584 }
2584 2585
2586
2587 LocationSummary* ThrowInstr::MakeLocationSummary() const {
2588 return new LocationSummary(0, 0, LocationSummary::kCall);
2589 }
2590
2591
2592 void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2593 compiler->GenerateCallRuntime(token_pos(),
2594 kThrowRuntimeEntry,
2595 locs());
2596 __ int3();
2597 }
2598
2599
2600 LocationSummary* ReThrowInstr::MakeLocationSummary() const {
2601 return new LocationSummary(0, 0, LocationSummary::kCall);
2602 }
2603
2604
2605 void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2606 compiler->GenerateCallRuntime(token_pos(),
2607 kReThrowRuntimeEntry,
2608 locs());
2609 __ int3();
2610 }
2611
2612
2613 LocationSummary* GotoInstr::MakeLocationSummary() const {
2614 return new LocationSummary(0, 0, LocationSummary::kNoCall);
2615 }
2616
2617
2618 void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2619 // Add deoptimization descriptor for deoptimizing instructions
2620 // that may be inserted before this instruction.
2621 if (!compiler->is_optimizing()) {
2622 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
2623 GetDeoptId(),
2624 0); // No token position.
2625 }
2626
2627 if (HasParallelMove()) {
2628 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
2629 }
2630
2631 // We can fall through if the successor is the next block in the list.
2632 // Otherwise, we need a jump.
2633 if (!compiler->IsNextBlock(successor())) {
2634 __ jmp(compiler->GetBlockLabel(successor()));
2635 }
2636 }
2637
2638
2639 static Condition NegateCondition(Condition condition) {
2640 switch (condition) {
2641 case EQUAL: return NOT_EQUAL;
2642 case NOT_EQUAL: return EQUAL;
2643 case LESS: return GREATER_EQUAL;
2644 case LESS_EQUAL: return GREATER;
2645 case GREATER: return LESS_EQUAL;
2646 case GREATER_EQUAL: return LESS;
2647 case BELOW: return ABOVE_EQUAL;
2648 case BELOW_EQUAL: return ABOVE;
2649 case ABOVE: return BELOW_EQUAL;
2650 case ABOVE_EQUAL: return BELOW;
2651 default:
2652 OS::Print("Error %d\n", condition);
2653 UNIMPLEMENTED();
2654 return EQUAL;
2655 }
2656 }
2657
2658
2659 void ControlInstruction::EmitBranchOnValue(FlowGraphCompiler* compiler,
2660 bool value) {
2661 if (value && compiler->IsNextBlock(false_successor())) {
2662 __ jmp(compiler->GetBlockLabel(true_successor()));
2663 } else if (!value && compiler->IsNextBlock(true_successor())) {
2664 __ jmp(compiler->GetBlockLabel(false_successor()));
2665 }
2666 }
2667
2668
2669 void ControlInstruction::EmitBranchOnCondition(FlowGraphCompiler* compiler,
2670 Condition true_condition) {
2671 if (compiler->IsNextBlock(false_successor())) {
2672 // If the next block is the false successor we will fall through to it.
2673 __ j(true_condition, compiler->GetBlockLabel(true_successor()));
2674 } else {
2675 // If the next block is the true successor we negate comparison and fall
2676 // through to it.
2677 ASSERT(compiler->IsNextBlock(true_successor()));
2678 Condition false_condition = NegateCondition(true_condition);
2679 __ j(false_condition, compiler->GetBlockLabel(false_successor()));
2680 }
2681 }
2682
2683
2684 LocationSummary* CurrentContextInstr::MakeLocationSummary() const {
2685 return LocationSummary::Make(0,
2686 Location::RequiresRegister(),
2687 LocationSummary::kNoCall);
2688 }
2689
2690
2691 void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2692 __ MoveRegister(locs()->out().reg(), CTX);
2693 }
2694
2695
2696 LocationSummary* StrictCompareInstr::MakeLocationSummary() const {
2697 const intptr_t kNumInputs = 2;
2698 const intptr_t kNumTemps = 0;
2699 LocationSummary* locs =
2700 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2701 locs->set_in(0, Location::RegisterOrConstant(left()));
2702 locs->set_in(1, Location::RegisterOrConstant(right()));
2703 locs->set_out(Location::RequiresRegister());
2704 return locs;
2705 }
2706
2707
2708 // Special code for numbers (compare values instead of references.)
2709 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2710 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
2711 Location left = locs()->in(0);
2712 Location right = locs()->in(1);
2713 if (left.IsConstant() && right.IsConstant()) {
2714 // TODO(vegorov): should be eliminated earlier by constant propagation.
2715 const bool result = (kind() == Token::kEQ_STRICT) ?
2716 left.constant().raw() == right.constant().raw() :
2717 left.constant().raw() != right.constant().raw();
2718 __ LoadObject(locs()->out().reg(), result ? Bool::True() : Bool::False());
2719 return;
2720 }
2721 if (left.IsConstant()) {
2722 compiler->EmitEqualityRegConstCompare(right.reg(),
2723 left.constant(),
2724 needs_number_check());
2725 } else if (right.IsConstant()) {
2726 compiler->EmitEqualityRegConstCompare(left.reg(),
2727 right.constant(),
2728 needs_number_check());
2729 } else {
2730 compiler->EmitEqualityRegRegCompare(left.reg(),
2731 right.reg(),
2732 needs_number_check());
2733 }
2734
2735 Register result = locs()->out().reg();
2736 Label load_true, done;
2737 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
2738 __ j(true_condition, &load_true, Assembler::kNearJump);
2739 __ LoadObject(result, Bool::False());
2740 __ jmp(&done, Assembler::kNearJump);
2741 __ Bind(&load_true);
2742 __ LoadObject(result, Bool::True());
2743 __ Bind(&done);
2744 }
2745
2746
2747 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
2748 BranchInstr* branch) {
2749 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
2750 Location left = locs()->in(0);
2751 Location right = locs()->in(1);
2752 if (left.IsConstant() && right.IsConstant()) {
2753 // TODO(vegorov): should be eliminated earlier by constant propagation.
2754 const bool result = (kind() == Token::kEQ_STRICT) ?
2755 left.constant().raw() == right.constant().raw() :
2756 left.constant().raw() != right.constant().raw();
2757 branch->EmitBranchOnValue(compiler, result);
2758 return;
2759 }
2760 if (left.IsConstant()) {
2761 compiler->EmitEqualityRegConstCompare(right.reg(),
2762 left.constant(),
2763 needs_number_check());
2764 } else if (right.IsConstant()) {
2765 compiler->EmitEqualityRegConstCompare(left.reg(),
2766 right.constant(),
2767 needs_number_check());
2768 } else {
2769 compiler->EmitEqualityRegRegCompare(left.reg(),
2770 right.reg(),
2771 needs_number_check());
2772 }
2773
2774 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
2775 branch->EmitBranchOnCondition(compiler, true_condition);
2776 }
2777
2778
2779 void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2780 // The arguments to the stub include the closure, as does the arguments
2781 // descriptor.
2782 Register temp_reg = locs()->temp(0).reg();
2783 int argument_count = ArgumentCount();
2784 const Array& arguments_descriptor =
2785 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
2786 argument_names()));
2787 __ LoadObject(temp_reg, arguments_descriptor);
2788 compiler->GenerateDartCall(deopt_id(),
2789 token_pos(),
2790 &StubCode::CallClosureFunctionLabel(),
2791 PcDescriptors::kOther,
2792 locs());
2793 __ Drop(argument_count);
2794 }
2795
2796
2797 LocationSummary* BooleanNegateInstr::MakeLocationSummary() const {
2798 return LocationSummary::Make(1,
2799 Location::RequiresRegister(),
2800 LocationSummary::kNoCall);
2801 }
2802
2803
2804 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2805 Register value = locs()->in(0).reg();
2806 Register result = locs()->out().reg();
2807
2808 Label done;
2809 __ LoadObject(result, Bool::True());
2810 __ CompareRegisters(result, value);
2811 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
2812 __ LoadObject(result, Bool::False());
2813 __ Bind(&done);
2814 }
2815
2816
2817 LocationSummary* ChainContextInstr::MakeLocationSummary() const {
2818 return LocationSummary::Make(1,
2819 Location::NoLocation(),
2820 LocationSummary::kNoCall);
2821 }
2822
2823
2824 void ChainContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2825 Register context_value = locs()->in(0).reg();
2826
2827 // Chain the new context in context_value to its parent in CTX.
2828 __ StoreIntoObject(context_value,
2829 FieldAddress(context_value, Context::parent_offset()),
2830 CTX);
2831 // Set new context as current context.
2832 __ MoveRegister(CTX, context_value);
2833 }
2834
2835
2836 LocationSummary* StoreVMFieldInstr::MakeLocationSummary() const {
2837 const intptr_t kNumInputs = 2;
2838 const intptr_t kNumTemps = 0;
2839 LocationSummary* locs =
2840 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2841 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
2842 : Location::RequiresRegister());
2843 locs->set_in(1, Location::RequiresRegister());
2844 return locs;
2845 }
2846
2847
2848 void StoreVMFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2849 Register value_reg = locs()->in(0).reg();
2850 Register dest_reg = locs()->in(1).reg();
2851
2852 if (value()->NeedsStoreBuffer()) {
2853 __ StoreIntoObject(dest_reg, FieldAddress(dest_reg, offset_in_bytes()),
2854 value_reg);
2855 } else {
2856 __ StoreIntoObjectNoBarrier(
2857 dest_reg, FieldAddress(dest_reg, offset_in_bytes()), value_reg);
2858 }
2859 }
2860
2861
2862 LocationSummary* AllocateObjectInstr::MakeLocationSummary() const {
2863 return MakeCallSummary();
2864 }
2865
2866
2867 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2868 const Class& cls = Class::ZoneHandle(constructor().Owner());
2869 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls));
2870 const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
2871 compiler->GenerateCall(token_pos(),
2872 &label,
2873 PcDescriptors::kOther,
2874 locs());
2875 __ Drop(ArgumentCount()); // Discard arguments.
2876 }
2877
2878
2879 LocationSummary* CreateClosureInstr::MakeLocationSummary() const {
2880 return MakeCallSummary();
2881 }
2882
2883
2884 void CreateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2885 const Function& closure_function = function();
2886 ASSERT(!closure_function.IsImplicitStaticClosureFunction());
2887 const Code& stub = Code::Handle(
2888 StubCode::GetAllocationStubForClosure(closure_function));
2889 const ExternalLabel label(closure_function.ToCString(), stub.EntryPoint());
2890 compiler->GenerateCall(token_pos(),
2891 &label,
2892 PcDescriptors::kOther,
2893 locs());
2894 __ Drop(2); // Discard type arguments and receiver.
2895 }
2896
2585 } // namespace dart 2897 } // namespace dart
2586 2898
2587 #undef __ 2899 #undef __
2588 2900
2589 #endif // defined TARGET_ARCH_X64 2901 #endif // defined TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_ia32.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698