Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: runtime/vm/intermediate_language_ia32.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6 #if defined(TARGET_ARCH_IA32) 6 #if defined(TARGET_ARCH_IA32)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "lib/error.h" 10 #include "lib/error.h"
11 #include "vm/dart_entry.h"
11 #include "vm/flow_graph_compiler.h" 12 #include "vm/flow_graph_compiler.h"
12 #include "vm/locations.h" 13 #include "vm/locations.h"
13 #include "vm/object_store.h" 14 #include "vm/object_store.h"
14 #include "vm/parser.h" 15 #include "vm/parser.h"
15 #include "vm/stub_code.h" 16 #include "vm/stub_code.h"
16 #include "vm/symbols.h" 17 #include "vm/symbols.h"
17 18
18 #define __ compiler->assembler()-> 19 #define __ compiler->assembler()->
19 20
20 namespace dart { 21 namespace dart {
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 261
261 262
262 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const { 263 LocationSummary* EqualityCompareInstr::MakeLocationSummary() const {
263 const intptr_t kNumInputs = 2; 264 const intptr_t kNumInputs = 2;
264 const bool is_checked_strict_equal = 265 const bool is_checked_strict_equal =
265 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid); 266 HasICData() && ic_data()->AllTargetsHaveSameOwner(kInstanceCid);
266 if (receiver_class_id() == kMintCid) { 267 if (receiver_class_id() == kMintCid) {
267 const intptr_t kNumTemps = 1; 268 const intptr_t kNumTemps = 1;
268 LocationSummary* locs = 269 LocationSummary* locs =
269 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 270 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
270 locs->set_in(0, Location::RequiresXmmRegister()); 271 locs->set_in(0, Location::RequiresFpuRegister());
271 locs->set_in(1, Location::RequiresXmmRegister()); 272 locs->set_in(1, Location::RequiresFpuRegister());
272 locs->set_temp(0, Location::RequiresRegister()); 273 locs->set_temp(0, Location::RequiresRegister());
273 locs->set_out(Location::RequiresRegister()); 274 locs->set_out(Location::RequiresRegister());
274 return locs; 275 return locs;
275 } 276 }
276 if (receiver_class_id() == kDoubleCid) { 277 if (receiver_class_id() == kDoubleCid) {
277 const intptr_t kNumTemps = 0; 278 const intptr_t kNumTemps = 0;
278 LocationSummary* locs = 279 LocationSummary* locs =
279 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 280 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
280 locs->set_in(0, Location::RequiresXmmRegister()); 281 locs->set_in(0, Location::RequiresFpuRegister());
281 locs->set_in(1, Location::RequiresXmmRegister()); 282 locs->set_in(1, Location::RequiresFpuRegister());
282 locs->set_out(Location::RequiresRegister()); 283 locs->set_out(Location::RequiresRegister());
283 return locs; 284 return locs;
284 } 285 }
285 if (receiver_class_id() == kSmiCid) { 286 if (receiver_class_id() == kSmiCid) {
286 const intptr_t kNumTemps = 0; 287 const intptr_t kNumTemps = 0;
287 LocationSummary* locs = 288 LocationSummary* locs =
288 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 289 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
289 locs->set_in(0, Location::RegisterOrConstant(left())); 290 locs->set_in(0, Location::RegisterOrConstant(left()));
290 // Only one input can be a constant operand. The case of two constant 291 // Only one input can be a constant operand. The case of two constant
291 // operands should be handled by constant propagation. 292 // operands should be handled by constant propagation.
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
334 const ICData& original_ic_data) { 335 const ICData& original_ic_data) {
335 if (!compiler->is_optimizing()) { 336 if (!compiler->is_optimizing()) {
336 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore, 337 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
337 deopt_id, 338 deopt_id,
338 token_pos); 339 token_pos);
339 } 340 }
340 const int kNumberOfArguments = 2; 341 const int kNumberOfArguments = 2;
341 const Array& kNoArgumentNames = Array::Handle(); 342 const Array& kNoArgumentNames = Array::Handle();
342 const int kNumArgumentsChecked = 2; 343 const int kNumArgumentsChecked = 2;
343 344
344 const Immediate raw_null = 345 const Immediate& raw_null =
345 Immediate(reinterpret_cast<intptr_t>(Object::null())); 346 Immediate(reinterpret_cast<intptr_t>(Object::null()));
346 Label check_identity; 347 Label check_identity;
347 __ cmpl(Address(ESP, 0 * kWordSize), raw_null); 348 __ cmpl(Address(ESP, 0 * kWordSize), raw_null);
348 __ j(EQUAL, &check_identity); 349 __ j(EQUAL, &check_identity);
349 __ cmpl(Address(ESP, 1 * kWordSize), raw_null); 350 __ cmpl(Address(ESP, 1 * kWordSize), raw_null);
350 __ j(EQUAL, &check_identity); 351 __ j(EQUAL, &check_identity);
351 352
352 ICData& equality_ic_data = ICData::ZoneHandle(); 353 ICData& equality_ic_data = ICData::ZoneHandle();
353 if (compiler->is_optimizing() && FLAG_propagate_ic_data) { 354 if (compiler->is_optimizing() && FLAG_propagate_ic_data) {
354 ASSERT(!original_ic_data.IsNull()); 355 ASSERT(!original_ic_data.IsNull());
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
520 BranchInstr* branch, 521 BranchInstr* branch,
521 intptr_t deopt_id) { 522 intptr_t deopt_id) {
522 ASSERT((kind == Token::kEQ) || (kind == Token::kNE)); 523 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
523 Register left = locs.in(0).reg(); 524 Register left = locs.in(0).reg();
524 Register right = locs.in(1).reg(); 525 Register right = locs.in(1).reg();
525 Register temp = locs.temp(0).reg(); 526 Register temp = locs.temp(0).reg();
526 Label* deopt = compiler->AddDeoptStub(deopt_id, kDeoptEquality); 527 Label* deopt = compiler->AddDeoptStub(deopt_id, kDeoptEquality);
527 __ testl(left, Immediate(kSmiTagMask)); 528 __ testl(left, Immediate(kSmiTagMask));
528 __ j(ZERO, deopt); 529 __ j(ZERO, deopt);
529 // 'left' is not Smi. 530 // 'left' is not Smi.
530 const Immediate raw_null = 531 const Immediate& raw_null =
531 Immediate(reinterpret_cast<intptr_t>(Object::null())); 532 Immediate(reinterpret_cast<intptr_t>(Object::null()));
532 Label identity_compare; 533 Label identity_compare;
533 __ cmpl(right, raw_null); 534 __ cmpl(right, raw_null);
534 __ j(EQUAL, &identity_compare); 535 __ j(EQUAL, &identity_compare);
535 __ cmpl(left, raw_null); 536 __ cmpl(left, raw_null);
536 __ j(EQUAL, &identity_compare); 537 __ j(EQUAL, &identity_compare);
537 538
538 __ LoadClassId(temp, left); 539 __ LoadClassId(temp, left);
539 const intptr_t len = ic_data.NumberOfChecks(); 540 const intptr_t len = ic_data.NumberOfChecks();
540 for (intptr_t i = 0; i < len; i++) { 541 for (intptr_t i = 0; i < len; i++) {
(...skipping 30 matching lines...) Expand all
571 LocationSummary* locs, 572 LocationSummary* locs,
572 Token::Kind kind, 573 Token::Kind kind,
573 BranchInstr* branch, 574 BranchInstr* branch,
574 const ICData& ic_data, 575 const ICData& ic_data,
575 intptr_t deopt_id, 576 intptr_t deopt_id,
576 intptr_t token_pos) { 577 intptr_t token_pos) {
577 ASSERT((kind == Token::kEQ) || (kind == Token::kNE)); 578 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
578 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0)); 579 ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
579 Register left = locs->in(0).reg(); 580 Register left = locs->in(0).reg();
580 Register right = locs->in(1).reg(); 581 Register right = locs->in(1).reg();
581 const Immediate raw_null = 582 const Immediate& raw_null =
582 Immediate(reinterpret_cast<intptr_t>(Object::null())); 583 Immediate(reinterpret_cast<intptr_t>(Object::null()));
583 Label done, identity_compare, non_null_compare; 584 Label done, identity_compare, non_null_compare;
584 __ cmpl(right, raw_null); 585 __ cmpl(right, raw_null);
585 __ j(EQUAL, &identity_compare, Assembler::kNearJump); 586 __ j(EQUAL, &identity_compare, Assembler::kNearJump);
586 __ cmpl(left, raw_null); 587 __ cmpl(left, raw_null);
587 __ j(NOT_EQUAL, &non_null_compare, Assembler::kNearJump); 588 __ j(NOT_EQUAL, &non_null_compare, Assembler::kNearJump);
588 // Comparison with NULL is "===". 589 // Comparison with NULL is "===".
589 __ Bind(&identity_compare); 590 __ Bind(&identity_compare);
590 __ cmpl(left, right); 591 __ cmpl(left, right);
591 Condition cond = TokenKindToSmiCondition(kind); 592 Condition cond = TokenKindToSmiCondition(kind);
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
657 return OVERFLOW; 658 return OVERFLOW;
658 } 659 }
659 } 660 }
660 661
661 662
662 static void EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, 663 static void EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
663 const LocationSummary& locs, 664 const LocationSummary& locs,
664 Token::Kind kind, 665 Token::Kind kind,
665 BranchInstr* branch) { 666 BranchInstr* branch) {
666 ASSERT(Token::IsEqualityOperator(kind)); 667 ASSERT(Token::IsEqualityOperator(kind));
667 XmmRegister left = locs.in(0).xmm_reg(); 668 XmmRegister left = locs.in(0).fpu_reg();
668 XmmRegister right = locs.in(1).xmm_reg(); 669 XmmRegister right = locs.in(1).fpu_reg();
669 Register temp = locs.temp(0).reg(); 670 Register temp = locs.temp(0).reg();
670 __ movaps(XMM0, left); 671 __ movaps(XMM0, left);
671 __ pcmpeqq(XMM0, right); 672 __ pcmpeqq(XMM0, right);
672 __ movd(temp, XMM0); 673 __ movd(temp, XMM0);
673 674
674 Condition true_condition = TokenKindToMintCondition(kind); 675 Condition true_condition = TokenKindToMintCondition(kind);
675 __ cmpl(temp, Immediate(-1)); 676 __ cmpl(temp, Immediate(-1));
676 677
677 if (branch != NULL) { 678 if (branch != NULL) {
678 branch->EmitBranchOnCondition(compiler, true_condition); 679 branch->EmitBranchOnCondition(compiler, true_condition);
679 } else { 680 } else {
680 Register result = locs.out().reg(); 681 Register result = locs.out().reg();
681 Label done, is_true; 682 Label done, is_true;
682 __ j(true_condition, &is_true); 683 __ j(true_condition, &is_true);
683 __ LoadObject(result, Bool::False()); 684 __ LoadObject(result, Bool::False());
684 __ jmp(&done); 685 __ jmp(&done);
685 __ Bind(&is_true); 686 __ Bind(&is_true);
686 __ LoadObject(result, Bool::True()); 687 __ LoadObject(result, Bool::True());
687 __ Bind(&done); 688 __ Bind(&done);
688 } 689 }
689 } 690 }
690 691
691 692
692 static void EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, 693 static void EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
693 const LocationSummary& locs, 694 const LocationSummary& locs,
694 Token::Kind kind, 695 Token::Kind kind,
695 BranchInstr* branch) { 696 BranchInstr* branch) {
696 XmmRegister left = locs.in(0).xmm_reg(); 697 XmmRegister left = locs.in(0).fpu_reg();
697 XmmRegister right = locs.in(1).xmm_reg(); 698 XmmRegister right = locs.in(1).fpu_reg();
698 Register left_tmp = locs.temp(0).reg(); 699 Register left_tmp = locs.temp(0).reg();
699 Register right_tmp = locs.temp(1).reg(); 700 Register right_tmp = locs.temp(1).reg();
700 Register result = branch == NULL ? locs.out().reg() : kNoRegister; 701 Register result = branch == NULL ? locs.out().reg() : kNoRegister;
701 702
702 Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW; 703 Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW;
703 switch (kind) { 704 switch (kind) {
704 case Token::kLT: 705 case Token::kLT:
705 hi_cond = LESS; 706 hi_cond = LESS;
706 lo_cond = BELOW; 707 lo_cond = BELOW;
707 break; 708 break;
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
766 UNREACHABLE(); 767 UNREACHABLE();
767 return OVERFLOW; 768 return OVERFLOW;
768 } 769 }
769 } 770 }
770 771
771 772
772 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler, 773 static void EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
773 const LocationSummary& locs, 774 const LocationSummary& locs,
774 Token::Kind kind, 775 Token::Kind kind,
775 BranchInstr* branch) { 776 BranchInstr* branch) {
776 XmmRegister left = locs.in(0).xmm_reg(); 777 XmmRegister left = locs.in(0).fpu_reg();
777 XmmRegister right = locs.in(1).xmm_reg(); 778 XmmRegister right = locs.in(1).fpu_reg();
778 779
779 Condition true_condition = TokenKindToDoubleCondition(kind); 780 Condition true_condition = TokenKindToDoubleCondition(kind);
780 if (branch != NULL) { 781 if (branch != NULL) {
781 compiler->EmitDoubleCompareBranch( 782 compiler->EmitDoubleCompareBranch(
782 true_condition, left, right, branch); 783 true_condition, left, right, branch);
783 } else { 784 } else {
784 compiler->EmitDoubleCompareBool( 785 compiler->EmitDoubleCompareBool(
785 true_condition, left, right, locs.out().reg()); 786 true_condition, left, right, locs.out().reg());
786 } 787 }
787 } 788 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 } 877 }
877 878
878 879
879 LocationSummary* RelationalOpInstr::MakeLocationSummary() const { 880 LocationSummary* RelationalOpInstr::MakeLocationSummary() const {
880 const intptr_t kNumInputs = 2; 881 const intptr_t kNumInputs = 2;
881 const intptr_t kNumTemps = 0; 882 const intptr_t kNumTemps = 0;
882 if (operands_class_id() == kMintCid) { 883 if (operands_class_id() == kMintCid) {
883 const intptr_t kNumTemps = 2; 884 const intptr_t kNumTemps = 2;
884 LocationSummary* locs = 885 LocationSummary* locs =
885 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 886 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
886 locs->set_in(0, Location::RequiresXmmRegister()); 887 locs->set_in(0, Location::RequiresFpuRegister());
887 locs->set_in(1, Location::RequiresXmmRegister()); 888 locs->set_in(1, Location::RequiresFpuRegister());
888 locs->set_temp(0, Location::RequiresRegister()); 889 locs->set_temp(0, Location::RequiresRegister());
889 locs->set_temp(1, Location::RequiresRegister()); 890 locs->set_temp(1, Location::RequiresRegister());
890 locs->set_out(Location::RequiresRegister()); 891 locs->set_out(Location::RequiresRegister());
891 return locs; 892 return locs;
892 } 893 }
893 if (operands_class_id() == kDoubleCid) { 894 if (operands_class_id() == kDoubleCid) {
894 LocationSummary* summary = 895 LocationSummary* summary =
895 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 896 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
896 summary->set_in(0, Location::RequiresXmmRegister()); 897 summary->set_in(0, Location::RequiresFpuRegister());
897 summary->set_in(1, Location::RequiresXmmRegister()); 898 summary->set_in(1, Location::RequiresFpuRegister());
898 summary->set_out(Location::RequiresRegister()); 899 summary->set_out(Location::RequiresRegister());
899 return summary; 900 return summary;
900 } else if (operands_class_id() == kSmiCid) { 901 } else if (operands_class_id() == kSmiCid) {
901 LocationSummary* summary = 902 LocationSummary* summary =
902 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 903 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
903 summary->set_in(0, Location::RegisterOrConstant(left())); 904 summary->set_in(0, Location::RegisterOrConstant(left()));
904 // Only one input can be a constant operand. The case of two constant 905 // Only one input can be a constant operand. The case of two constant
905 // operands should be handled by constant propagation. 906 // operands should be handled by constant propagation.
906 summary->set_in(1, summary->in(0).IsConstant() 907 summary->set_in(1, summary->in(0).IsConstant()
907 ? Location::RequiresRegister() 908 ? Location::RequiresRegister()
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after
1144 LocationSummary* locs = 1145 LocationSummary* locs =
1145 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 1146 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
1146 locs->set_in(0, Location::RequiresRegister()); 1147 locs->set_in(0, Location::RequiresRegister());
1147 // The smi index is either untagged and tagged again at the end of the 1148 // The smi index is either untagged and tagged again at the end of the
1148 // operation (element size == 1), or it is left smi tagged (for all element 1149 // operation (element size == 1), or it is left smi tagged (for all element
1149 // sizes > 1). 1150 // sizes > 1).
1150 locs->set_in(1, CanBeImmediateIndex(index(), class_id()) 1151 locs->set_in(1, CanBeImmediateIndex(index(), class_id())
1151 ? Location::RegisterOrSmiConstant(index()) 1152 ? Location::RegisterOrSmiConstant(index())
1152 : Location::RequiresRegister()); 1153 : Location::RequiresRegister());
1153 if (representation() == kUnboxedDouble) { 1154 if (representation() == kUnboxedDouble) {
1154 locs->set_out(Location::RequiresXmmRegister()); 1155 locs->set_out(Location::RequiresFpuRegister());
1155 } else { 1156 } else {
1156 locs->set_out(Location::RequiresRegister()); 1157 locs->set_out(Location::RequiresRegister());
1157 } 1158 }
1158 return locs; 1159 return locs;
1159 } 1160 }
1160 1161
1161 1162
1162 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1163 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1163 Register array = locs()->in(0).reg(); 1164 Register array = locs()->in(0).reg();
1164 Location index = locs()->in(1); 1165 Location index = locs()->in(1);
1165 1166
1166 if (class_id() == kExternalUint8ArrayCid) { 1167 if (class_id() == kExternalUint8ArrayCid) {
1167 Register result = locs()->out().reg(); 1168 Register result = locs()->out().reg();
1168 Address element_address = index.IsRegister() 1169 const Address& element_address = index.IsRegister()
1169 ? FlowGraphCompiler::ExternalElementAddressForRegIndex( 1170 ? FlowGraphCompiler::ExternalElementAddressForRegIndex(
1170 class_id(), result, index.reg()) 1171 class_id(), result, index.reg())
1171 : FlowGraphCompiler::ExternalElementAddressForIntIndex( 1172 : FlowGraphCompiler::ExternalElementAddressForIntIndex(
1172 class_id(), result, Smi::Cast(index.constant()).Value()); 1173 class_id(), result, Smi::Cast(index.constant()).Value());
1173 if (index.IsRegister()) { 1174 if (index.IsRegister()) {
1174 __ SmiUntag(index.reg()); 1175 __ SmiUntag(index.reg());
1175 } 1176 }
1176 __ movl(result, 1177 __ movl(result,
1177 FieldAddress(array, ExternalUint8Array::external_data_offset())); 1178 FieldAddress(array, ExternalUint8Array::external_data_offset()));
1178 __ movl(result, 1179 __ movl(result,
1179 Address(result, ExternalByteArrayData<uint8_t>::data_offset())); 1180 Address(result, ExternalByteArrayData<uint8_t>::data_offset()));
1180 __ movzxb(result, element_address); 1181 __ movzxb(result, element_address);
1181 __ SmiTag(result); 1182 __ SmiTag(result);
1182 if (index.IsRegister()) { 1183 if (index.IsRegister()) {
1183 __ SmiTag(index.reg()); // Re-tag. 1184 __ SmiTag(index.reg()); // Re-tag.
1184 } 1185 }
1185 return; 1186 return;
1186 } 1187 }
1187 1188
1188 FieldAddress element_address = index.IsRegister() 1189 FieldAddress element_address = index.IsRegister()
1189 ? FlowGraphCompiler::ElementAddressForRegIndex( 1190 ? FlowGraphCompiler::ElementAddressForRegIndex(
1190 class_id(), array, index.reg()) 1191 class_id(), array, index.reg())
1191 : FlowGraphCompiler::ElementAddressForIntIndex( 1192 : FlowGraphCompiler::ElementAddressForIntIndex(
1192 class_id(), array, Smi::Cast(index.constant()).Value()); 1193 class_id(), array, Smi::Cast(index.constant()).Value());
1193 1194
1194 if (representation() == kUnboxedDouble) { 1195 if (representation() == kUnboxedDouble) {
1195 XmmRegister result = locs()->out().xmm_reg(); 1196 XmmRegister result = locs()->out().fpu_reg();
1196 if (class_id() == kFloat32ArrayCid) { 1197 if (class_id() == kFloat32ArrayCid) {
1197 // Load single precision float. 1198 // Load single precision float.
1198 __ movss(result, element_address); 1199 __ movss(result, element_address);
1199 // Promote to double. 1200 // Promote to double.
1200 __ cvtss2sd(result, locs()->out().xmm_reg()); 1201 __ cvtss2sd(result, locs()->out().fpu_reg());
1201 } else { 1202 } else {
1202 ASSERT(class_id() == kFloat64ArrayCid); 1203 ASSERT(class_id() == kFloat64ArrayCid);
1203 __ movsd(result, element_address); 1204 __ movsd(result, element_address);
1204 } 1205 }
1205 return; 1206 return;
1206 } 1207 }
1207 1208
1208 Register result = locs()->out().reg(); 1209 Register result = locs()->out().reg();
1209 switch (class_id()) { 1210 switch (class_id()) {
1210 case kInt8ArrayCid: 1211 case kInt8ArrayCid:
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1264 // EBX, ECX, EDX) instead of using a fixed register. 1265 // EBX, ECX, EDX) instead of using a fixed register.
1265 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), EAX)); 1266 locs->set_in(2, Location::FixedRegisterOrSmiConstant(value(), EAX));
1266 break; 1267 break;
1267 case kInt16ArrayCid: 1268 case kInt16ArrayCid:
1268 case kUint16ArrayCid: 1269 case kUint16ArrayCid:
1269 // Writable register because the value must be untagged before storing. 1270 // Writable register because the value must be untagged before storing.
1270 locs->set_in(2, Location::WritableRegister()); 1271 locs->set_in(2, Location::WritableRegister());
1271 break; 1272 break;
1272 case kFloat32ArrayCid: 1273 case kFloat32ArrayCid:
1273 // Need temp register for float-to-double conversion. 1274 // Need temp register for float-to-double conversion.
1274 locs->AddTemp(Location::RequiresXmmRegister()); 1275 locs->AddTemp(Location::RequiresFpuRegister());
1275 // Fall through. 1276 // Fall through.
1276 case kFloat64ArrayCid: 1277 case kFloat64ArrayCid:
1277 // TODO(srdjan): Support Float64 constants. 1278 // TODO(srdjan): Support Float64 constants.
1278 locs->set_in(2, Location::RequiresXmmRegister()); 1279 locs->set_in(2, Location::RequiresFpuRegister());
1279 break; 1280 break;
1280 default: 1281 default:
1281 UNREACHABLE(); 1282 UNREACHABLE();
1282 return NULL; 1283 return NULL;
1283 } 1284 }
1284 return locs; 1285 return locs;
1285 } 1286 }
1286 1287
1287 1288
1288 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1289 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1363 } 1364 }
1364 case kInt16ArrayCid: 1365 case kInt16ArrayCid:
1365 case kUint16ArrayCid: { 1366 case kUint16ArrayCid: {
1366 Register value = locs()->in(2).reg(); 1367 Register value = locs()->in(2).reg();
1367 __ SmiUntag(value); 1368 __ SmiUntag(value);
1368 __ movw(element_address, value); 1369 __ movw(element_address, value);
1369 break; 1370 break;
1370 } 1371 }
1371 case kFloat32ArrayCid: 1372 case kFloat32ArrayCid:
1372 // Convert to single precision. 1373 // Convert to single precision.
1373 __ cvtsd2ss(locs()->temp(0).xmm_reg(), locs()->in(2).xmm_reg()); 1374 __ cvtsd2ss(locs()->temp(0).fpu_reg(), locs()->in(2).fpu_reg());
1374 // Store. 1375 // Store.
1375 __ movss(element_address, locs()->temp(0).xmm_reg()); 1376 __ movss(element_address, locs()->temp(0).fpu_reg());
1376 break; 1377 break;
1377 case kFloat64ArrayCid: 1378 case kFloat64ArrayCid:
1378 __ movsd(element_address, locs()->in(2).xmm_reg()); 1379 __ movsd(element_address, locs()->in(2).fpu_reg());
1379 break; 1380 break;
1380 default: 1381 default:
1381 UNREACHABLE(); 1382 UNREACHABLE();
1382 } 1383 }
1383 } 1384 }
1384 1385
1385 1386
1386 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const { 1387 LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary() const {
1387 const intptr_t kNumInputs = 2; 1388 const intptr_t kNumInputs = 2;
1388 const intptr_t num_temps = 0; 1389 const intptr_t num_temps = 0;
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
1578 Register result_reg = locs()->out().reg(); 1579 Register result_reg = locs()->out().reg();
1579 1580
1580 // 'instantiator_reg' is the instantiator AbstractTypeArguments object 1581 // 'instantiator_reg' is the instantiator AbstractTypeArguments object
1581 // (or null). 1582 // (or null).
1582 // If the instantiator is null and if the type argument vector 1583 // If the instantiator is null and if the type argument vector
1583 // instantiated from null becomes a vector of dynamic, then use null as 1584 // instantiated from null becomes a vector of dynamic, then use null as
1584 // the type arguments. 1585 // the type arguments.
1585 Label type_arguments_instantiated; 1586 Label type_arguments_instantiated;
1586 const intptr_t len = type_arguments().Length(); 1587 const intptr_t len = type_arguments().Length();
1587 if (type_arguments().IsRawInstantiatedRaw(len)) { 1588 if (type_arguments().IsRawInstantiatedRaw(len)) {
1588 const Immediate raw_null = 1589 const Immediate& raw_null =
1589 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1590 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1590 __ cmpl(instantiator_reg, raw_null); 1591 __ cmpl(instantiator_reg, raw_null);
1591 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1592 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1592 } 1593 }
1593 // Instantiate non-null type arguments. 1594 // Instantiate non-null type arguments.
1594 if (type_arguments().IsUninstantiatedIdentity()) { 1595 if (type_arguments().IsUninstantiatedIdentity()) {
1595 // Check if the instantiator type argument vector is a TypeArguments of a 1596 // Check if the instantiator type argument vector is a TypeArguments of a
1596 // matching length and, if so, use it as the instantiated type_arguments. 1597 // matching length and, if so, use it as the instantiated type_arguments.
1597 // No need to check the instantiator ('instantiator_reg') for null here, 1598 // No need to check the instantiator ('instantiator_reg') for null here,
1598 // because a null instantiator will have the wrong class (Null instead of 1599 // because a null instantiator will have the wrong class (Null instead of
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1641 Register temp_reg = locs()->temp(0).reg(); 1642 Register temp_reg = locs()->temp(0).reg();
1642 1643
1643 // instantiator_reg is the instantiator type argument vector, i.e. an 1644 // instantiator_reg is the instantiator type argument vector, i.e. an
1644 // AbstractTypeArguments object (or null). 1645 // AbstractTypeArguments object (or null).
1645 // If the instantiator is null and if the type argument vector 1646 // If the instantiator is null and if the type argument vector
1646 // instantiated from null becomes a vector of dynamic, then use null as 1647 // instantiated from null becomes a vector of dynamic, then use null as
1647 // the type arguments. 1648 // the type arguments.
1648 Label type_arguments_instantiated; 1649 Label type_arguments_instantiated;
1649 const intptr_t len = type_arguments().Length(); 1650 const intptr_t len = type_arguments().Length();
1650 if (type_arguments().IsRawInstantiatedRaw(len)) { 1651 if (type_arguments().IsRawInstantiatedRaw(len)) {
1651 const Immediate raw_null = 1652 const Immediate& raw_null =
1652 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1653 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1653 __ cmpl(instantiator_reg, raw_null); 1654 __ cmpl(instantiator_reg, raw_null);
1654 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1655 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1655 } 1656 }
1656 // Instantiate non-null type arguments. 1657 // Instantiate non-null type arguments.
1657 if (type_arguments().IsUninstantiatedIdentity()) { 1658 if (type_arguments().IsUninstantiatedIdentity()) {
1658 // Check if the instantiator type argument vector is a TypeArguments of a 1659 // Check if the instantiator type argument vector is a TypeArguments of a
1659 // matching length and, if so, use it as the instantiated type_arguments. 1660 // matching length and, if so, use it as the instantiated type_arguments.
1660 // No need to check instantiator_reg for null here, because a null 1661 // No need to check instantiator_reg for null here, because a null
1661 // instantiator will have the wrong class (Null instead of TypeArguments). 1662 // instantiator will have the wrong class (Null instead of TypeArguments).
1662 Label type_arguments_uninstantiated; 1663 Label type_arguments_uninstantiated;
1663 __ CompareClassId(instantiator_reg, kTypeArgumentsCid, temp_reg); 1664 __ CompareClassId(instantiator_reg, kTypeArgumentsCid, temp_reg);
1664 __ j(NOT_EQUAL, &type_arguments_uninstantiated, Assembler::kNearJump); 1665 __ j(NOT_EQUAL, &type_arguments_uninstantiated, Assembler::kNearJump);
1665 Immediate arguments_length = 1666 const Immediate& arguments_length =
1666 Immediate(Smi::RawValue(type_arguments().Length())); 1667 Immediate(Smi::RawValue(type_arguments().Length()));
1667 __ cmpl(FieldAddress(instantiator_reg, TypeArguments::length_offset()), 1668 __ cmpl(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
1668 arguments_length); 1669 arguments_length);
1669 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump); 1670 __ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
1670 __ Bind(&type_arguments_uninstantiated); 1671 __ Bind(&type_arguments_uninstantiated);
1671 } 1672 }
1672 // In the non-factory case, we rely on the allocation stub to 1673 // In the non-factory case, we rely on the allocation stub to
1673 // instantiate the type arguments. 1674 // instantiate the type arguments.
1674 __ LoadObject(result_reg, type_arguments()); 1675 __ LoadObject(result_reg, type_arguments());
1675 // result_reg: uninstantiated type arguments. 1676 // result_reg: uninstantiated type arguments.
(...skipping 21 matching lines...) Expand all
1697 ASSERT(locs()->out().reg() == instantiator_reg); 1698 ASSERT(locs()->out().reg() == instantiator_reg);
1698 Register temp_reg = locs()->temp(0).reg(); 1699 Register temp_reg = locs()->temp(0).reg();
1699 1700
1700 // instantiator_reg is the instantiator AbstractTypeArguments object 1701 // instantiator_reg is the instantiator AbstractTypeArguments object
1701 // (or null). If the instantiator is null and if the type argument vector 1702 // (or null). If the instantiator is null and if the type argument vector
1702 // instantiated from null becomes a vector of dynamic, then use null as 1703 // instantiated from null becomes a vector of dynamic, then use null as
1703 // the type arguments and do not pass the instantiator. 1704 // the type arguments and do not pass the instantiator.
1704 Label done; 1705 Label done;
1705 const intptr_t len = type_arguments().Length(); 1706 const intptr_t len = type_arguments().Length();
1706 if (type_arguments().IsRawInstantiatedRaw(len)) { 1707 if (type_arguments().IsRawInstantiatedRaw(len)) {
1707 const Immediate raw_null = 1708 const Immediate& raw_null =
1708 Immediate(reinterpret_cast<intptr_t>(Object::null())); 1709 Immediate(reinterpret_cast<intptr_t>(Object::null()));
1709 Label instantiator_not_null; 1710 Label instantiator_not_null;
1710 __ cmpl(instantiator_reg, raw_null); 1711 __ cmpl(instantiator_reg, raw_null);
1711 __ j(NOT_EQUAL, &instantiator_not_null, Assembler::kNearJump); 1712 __ j(NOT_EQUAL, &instantiator_not_null, Assembler::kNearJump);
1712 // Null was used in VisitExtractConstructorTypeArguments as the 1713 // Null was used in VisitExtractConstructorTypeArguments as the
1713 // instantiated type arguments, no proper instantiator needed. 1714 // instantiated type arguments, no proper instantiator needed.
1714 __ movl(instantiator_reg, 1715 __ movl(instantiator_reg,
1715 Immediate(Smi::RawValue(StubCode::kNoInstantiator))); 1716 Immediate(Smi::RawValue(StubCode::kNoInstantiator)));
1716 __ jmp(&done); 1717 __ jmp(&done);
1717 __ Bind(&instantiator_not_null); 1718 __ Bind(&instantiator_not_null);
1718 } 1719 }
1719 // Instantiate non-null type arguments. 1720 // Instantiate non-null type arguments.
1720 if (type_arguments().IsUninstantiatedIdentity()) { 1721 if (type_arguments().IsUninstantiatedIdentity()) {
1721 // TODO(regis): The following emitted code is duplicated in 1722 // TODO(regis): The following emitted code is duplicated in
1722 // VisitExtractConstructorTypeArguments above. The reason is that the code 1723 // VisitExtractConstructorTypeArguments above. The reason is that the code
1723 // is split between two computations, so that each one produces a 1724 // is split between two computations, so that each one produces a
1724 // single value, rather than producing a pair of values. 1725 // single value, rather than producing a pair of values.
1725 // If this becomes an issue, we should expose these tests at the IL level. 1726 // If this becomes an issue, we should expose these tests at the IL level.
1726 1727
1727 // Check if the instantiator type argument vector is a TypeArguments of a 1728 // Check if the instantiator type argument vector is a TypeArguments of a
1728 // matching length and, if so, use it as the instantiated type_arguments. 1729 // matching length and, if so, use it as the instantiated type_arguments.
1729 // No need to check the instantiator (RAX) for null here, because a null 1730 // No need to check the instantiator (RAX) for null here, because a null
1730 // instantiator will have the wrong class (Null instead of TypeArguments). 1731 // instantiator will have the wrong class (Null instead of TypeArguments).
1731 __ CompareClassId(instantiator_reg, kTypeArgumentsCid, temp_reg); 1732 __ CompareClassId(instantiator_reg, kTypeArgumentsCid, temp_reg);
1732 __ j(NOT_EQUAL, &done, Assembler::kNearJump); 1733 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
1733 Immediate arguments_length = 1734 const Immediate& arguments_length =
1734 Immediate(Smi::RawValue(type_arguments().Length())); 1735 Immediate(Smi::RawValue(type_arguments().Length()));
1735 __ cmpl(FieldAddress(instantiator_reg, TypeArguments::length_offset()), 1736 __ cmpl(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
1736 arguments_length); 1737 arguments_length);
1737 __ j(NOT_EQUAL, &done, Assembler::kNearJump); 1738 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
1738 // The instantiator was used in VisitExtractConstructorTypeArguments as the 1739 // The instantiator was used in VisitExtractConstructorTypeArguments as the
1739 // instantiated type arguments, no proper instantiator needed. 1740 // instantiated type arguments, no proper instantiator needed.
1740 __ movl(instantiator_reg, 1741 __ movl(instantiator_reg,
1741 Immediate(Smi::RawValue(StubCode::kNoInstantiator))); 1742 Immediate(Smi::RawValue(StubCode::kNoInstantiator)));
1742 } 1743 }
1743 __ Bind(&done); 1744 __ Bind(&done);
(...skipping 413 matching lines...) Expand 10 before | Expand all | Expand 10 after
2157 } 2158 }
2158 2159
2159 2160
2160 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const { 2161 LocationSummary* BoxDoubleInstr::MakeLocationSummary() const {
2161 const intptr_t kNumInputs = 1; 2162 const intptr_t kNumInputs = 1;
2162 const intptr_t kNumTemps = 0; 2163 const intptr_t kNumTemps = 0;
2163 LocationSummary* summary = 2164 LocationSummary* summary =
2164 new LocationSummary(kNumInputs, 2165 new LocationSummary(kNumInputs,
2165 kNumTemps, 2166 kNumTemps,
2166 LocationSummary::kCallOnSlowPath); 2167 LocationSummary::kCallOnSlowPath);
2167 summary->set_in(0, Location::RequiresXmmRegister()); 2168 summary->set_in(0, Location::RequiresFpuRegister());
2168 summary->set_out(Location::RequiresRegister()); 2169 summary->set_out(Location::RequiresRegister());
2169 return summary; 2170 return summary;
2170 } 2171 }
2171 2172
2172 2173
2173 class BoxDoubleSlowPath : public SlowPathCode { 2174 class BoxDoubleSlowPath : public SlowPathCode {
2174 public: 2175 public:
2175 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction) 2176 explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction)
2176 : instruction_(instruction) { } 2177 : instruction_(instruction) { }
2177 2178
(...skipping 22 matching lines...) Expand all
2200 private: 2201 private:
2201 BoxDoubleInstr* instruction_; 2202 BoxDoubleInstr* instruction_;
2202 }; 2203 };
2203 2204
2204 2205
2205 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2206 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2206 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2207 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2207 compiler->AddSlowPathCode(slow_path); 2208 compiler->AddSlowPathCode(slow_path);
2208 2209
2209 Register out_reg = locs()->out().reg(); 2210 Register out_reg = locs()->out().reg();
2210 XmmRegister value = locs()->in(0).xmm_reg(); 2211 XmmRegister value = locs()->in(0).fpu_reg();
2211 2212
2212 AssemblerMacros::TryAllocate(compiler->assembler(), 2213 AssemblerMacros::TryAllocate(compiler->assembler(),
2213 compiler->double_class(), 2214 compiler->double_class(),
2214 slow_path->entry_label(), 2215 slow_path->entry_label(),
2215 Assembler::kFarJump, 2216 Assembler::kFarJump,
2216 out_reg); 2217 out_reg);
2217 __ Bind(slow_path->exit_label()); 2218 __ Bind(slow_path->exit_label());
2218 __ movsd(FieldAddress(out_reg, Double::value_offset()), value); 2219 __ movsd(FieldAddress(out_reg, Double::value_offset()), value);
2219 } 2220 }
2220 2221
2221 2222
2222 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const { 2223 LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const {
2223 const intptr_t kNumInputs = 1; 2224 const intptr_t kNumInputs = 1;
2224 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0; 2225 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
2225 LocationSummary* summary = 2226 LocationSummary* summary =
2226 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2227 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2227 summary->set_in(0, Location::RequiresRegister()); 2228 summary->set_in(0, Location::RequiresRegister());
2228 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister()); 2229 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister());
2229 summary->set_out(Location::RequiresXmmRegister()); 2230 summary->set_out(Location::RequiresFpuRegister());
2230 return summary; 2231 return summary;
2231 } 2232 }
2232 2233
2233 2234
2234 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2235 void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2235 const intptr_t value_cid = value()->ResultCid(); 2236 const intptr_t value_cid = value()->ResultCid();
2236 const Register value = locs()->in(0).reg(); 2237 const Register value = locs()->in(0).reg();
2237 const XmmRegister result = locs()->out().xmm_reg(); 2238 const XmmRegister result = locs()->out().fpu_reg();
2238 2239
2239 if (value_cid == kDoubleCid) { 2240 if (value_cid == kDoubleCid) {
2240 __ movsd(result, FieldAddress(value, Double::value_offset())); 2241 __ movsd(result, FieldAddress(value, Double::value_offset()));
2241 } else if (value_cid == kSmiCid) { 2242 } else if (value_cid == kSmiCid) {
2242 __ SmiUntag(value); // Untag input before conversion. 2243 __ SmiUntag(value); // Untag input before conversion.
2243 __ cvtsi2sd(result, value); 2244 __ cvtsi2sd(result, value);
2244 __ SmiTag(value); // Restore input register. 2245 __ SmiTag(value); // Restore input register.
2245 } else { 2246 } else {
2246 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp); 2247 Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp);
2247 compiler->LoadDoubleOrSmiToXmm(result, 2248 compiler->LoadDoubleOrSmiToFpu(result,
2248 value, 2249 value,
2249 locs()->temp(0).reg(), 2250 locs()->temp(0).reg(),
2250 deopt); 2251 deopt);
2251 } 2252 }
2252 } 2253 }
2253 2254
2254 2255
2255 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const { 2256 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary() const {
2256 const intptr_t kNumInputs = 2; 2257 const intptr_t kNumInputs = 2;
2257 const intptr_t kNumTemps = 0; 2258 const intptr_t kNumTemps = 0;
2258 LocationSummary* summary = 2259 LocationSummary* summary =
2259 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2260 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2260 summary->set_in(0, Location::RequiresXmmRegister()); 2261 summary->set_in(0, Location::RequiresFpuRegister());
2261 summary->set_in(1, Location::RequiresXmmRegister()); 2262 summary->set_in(1, Location::RequiresFpuRegister());
2262 summary->set_out(Location::SameAsFirstInput()); 2263 summary->set_out(Location::SameAsFirstInput());
2263 return summary; 2264 return summary;
2264 } 2265 }
2265 2266
2266 2267
2267 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2268 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2268 XmmRegister left = locs()->in(0).xmm_reg(); 2269 XmmRegister left = locs()->in(0).fpu_reg();
2269 XmmRegister right = locs()->in(1).xmm_reg(); 2270 XmmRegister right = locs()->in(1).fpu_reg();
2270 2271
2271 ASSERT(locs()->out().xmm_reg() == left); 2272 ASSERT(locs()->out().fpu_reg() == left);
2272 2273
2273 switch (op_kind()) { 2274 switch (op_kind()) {
2274 case Token::kADD: __ addsd(left, right); break; 2275 case Token::kADD: __ addsd(left, right); break;
2275 case Token::kSUB: __ subsd(left, right); break; 2276 case Token::kSUB: __ subsd(left, right); break;
2276 case Token::kMUL: __ mulsd(left, right); break; 2277 case Token::kMUL: __ mulsd(left, right); break;
2277 case Token::kDIV: __ divsd(left, right); break; 2278 case Token::kDIV: __ divsd(left, right); break;
2278 default: UNREACHABLE(); 2279 default: UNREACHABLE();
2279 } 2280 }
2280 } 2281 }
2281 2282
2282 2283
2283 LocationSummary* MathSqrtInstr::MakeLocationSummary() const { 2284 LocationSummary* MathSqrtInstr::MakeLocationSummary() const {
2284 const intptr_t kNumInputs = 1; 2285 const intptr_t kNumInputs = 1;
2285 const intptr_t kNumTemps = 0; 2286 const intptr_t kNumTemps = 0;
2286 LocationSummary* summary = 2287 LocationSummary* summary =
2287 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2288 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2288 summary->set_in(0, Location::RequiresXmmRegister()); 2289 summary->set_in(0, Location::RequiresFpuRegister());
2289 summary->set_out(Location::RequiresXmmRegister()); 2290 summary->set_out(Location::RequiresFpuRegister());
2290 return summary; 2291 return summary;
2291 } 2292 }
2292 2293
2293 2294
2294 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2295 void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2295 __ sqrtsd(locs()->out().xmm_reg(), locs()->in(0).xmm_reg()); 2296 __ sqrtsd(locs()->out().fpu_reg(), locs()->in(0).fpu_reg());
2296 } 2297 }
2297 2298
2298 2299
2299 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const { 2300 LocationSummary* UnarySmiOpInstr::MakeLocationSummary() const {
2300 const intptr_t kNumInputs = 1; 2301 const intptr_t kNumInputs = 1;
2301 const intptr_t kNumTemps = 0; 2302 const intptr_t kNumTemps = 0;
2302 LocationSummary* summary = 2303 LocationSummary* summary =
2303 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2304 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2304 summary->set_in(0, Location::RequiresRegister()); 2305 summary->set_in(0, Location::RequiresRegister());
2305 summary->set_out(Location::SameAsFirstInput()); 2306 summary->set_out(Location::SameAsFirstInput());
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2405 locs()); 2406 locs());
2406 __ Bind(&done); 2407 __ Bind(&done);
2407 } 2408 }
2408 2409
2409 2410
2410 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const { 2411 LocationSummary* DoubleToSmiInstr::MakeLocationSummary() const {
2411 const intptr_t kNumInputs = 1; 2412 const intptr_t kNumInputs = 1;
2412 const intptr_t kNumTemps = 0; 2413 const intptr_t kNumTemps = 0;
2413 LocationSummary* result = new LocationSummary( 2414 LocationSummary* result = new LocationSummary(
2414 kNumInputs, kNumTemps, LocationSummary::kNoCall); 2415 kNumInputs, kNumTemps, LocationSummary::kNoCall);
2415 result->set_in(0, Location::RequiresXmmRegister()); 2416 result->set_in(0, Location::RequiresFpuRegister());
2416 result->set_out(Location::RequiresRegister()); 2417 result->set_out(Location::RequiresRegister());
2417 return result; 2418 return result;
2418 } 2419 }
2419 2420
2420 2421
2421 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2422 void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2422 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi); 2423 Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi);
2423 Register result = locs()->out().reg(); 2424 Register result = locs()->out().reg();
2424 XmmRegister value = locs()->in(0).xmm_reg(); 2425 XmmRegister value = locs()->in(0).fpu_reg();
2425 __ cvttsd2si(result, value); 2426 __ cvttsd2si(result, value);
2426 // Overflow is signalled with minint. 2427 // Overflow is signalled with minint.
2427 Label do_call, done; 2428 Label do_call, done;
2428 // Check for overflow and that it fits into Smi. 2429 // Check for overflow and that it fits into Smi.
2429 __ cmpl(result, Immediate(0xC0000000)); 2430 __ cmpl(result, Immediate(0xC0000000));
2430 __ j(NEGATIVE, deopt); 2431 __ j(NEGATIVE, deopt);
2431 __ SmiTag(result); 2432 __ SmiTag(result);
2432 } 2433 }
2433 2434
2434 2435
2435 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const { 2436 LocationSummary* DoubleToDoubleInstr::MakeLocationSummary() const {
2436 const intptr_t kNumInputs = 1; 2437 const intptr_t kNumInputs = 1;
2437 const intptr_t kNumTemps = 2438 const intptr_t kNumTemps =
2438 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0; 2439 (recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0;
2439 LocationSummary* result = 2440 LocationSummary* result =
2440 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2441 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2441 result->set_in(0, Location::RequiresXmmRegister()); 2442 result->set_in(0, Location::RequiresFpuRegister());
2442 result->set_out(Location::RequiresXmmRegister()); 2443 result->set_out(Location::RequiresFpuRegister());
2443 if (recognized_kind() == MethodRecognizer::kDoubleRound) { 2444 if (recognized_kind() == MethodRecognizer::kDoubleRound) {
2444 result->set_temp(0, Location::RequiresXmmRegister()); 2445 result->set_temp(0, Location::RequiresFpuRegister());
2445 } 2446 }
2446 return result; 2447 return result;
2447 } 2448 }
2448 2449
2449 2450
2450 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2451 void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2451 XmmRegister value = locs()->in(0).xmm_reg(); 2452 XmmRegister value = locs()->in(0).fpu_reg();
2452 XmmRegister result = locs()->out().xmm_reg(); 2453 XmmRegister result = locs()->out().fpu_reg();
2453 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) { 2454 if (recognized_kind() == MethodRecognizer::kDoubleTruncate) {
2454 __ roundsd(result, value, Assembler::kRoundToZero); 2455 __ roundsd(result, value, Assembler::kRoundToZero);
2455 } else { 2456 } else {
2456 XmmRegister temp = locs()->temp(0).xmm_reg(); 2457 XmmRegister temp = locs()->temp(0).fpu_reg();
2457 __ DoubleRound(result, value, temp); 2458 __ DoubleRound(result, value, temp);
2458 } 2459 }
2459 } 2460 }
2460 2461
2461 2462
2462 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const { 2463 LocationSummary* PolymorphicInstanceCallInstr::MakeLocationSummary() const {
2463 return MakeCallSummary(); 2464 return MakeCallSummary();
2464 } 2465 }
2465 2466
2466 2467
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
2639 } 2640 }
2640 2641
2641 2642
2642 LocationSummary* UnboxIntegerInstr::MakeLocationSummary() const { 2643 LocationSummary* UnboxIntegerInstr::MakeLocationSummary() const {
2643 const intptr_t kNumInputs = 1; 2644 const intptr_t kNumInputs = 1;
2644 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0; 2645 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
2645 LocationSummary* summary = 2646 LocationSummary* summary =
2646 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2647 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2647 summary->set_in(0, Location::RequiresRegister()); 2648 summary->set_in(0, Location::RequiresRegister());
2648 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister()); 2649 if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister());
2649 summary->set_out(Location::RequiresXmmRegister()); 2650 summary->set_out(Location::RequiresFpuRegister());
2650 return summary; 2651 return summary;
2651 } 2652 }
2652 2653
2653 2654
2654 void UnboxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2655 void UnboxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2655 const intptr_t value_cid = value()->ResultCid(); 2656 const intptr_t value_cid = value()->ResultCid();
2656 const Register value = locs()->in(0).reg(); 2657 const Register value = locs()->in(0).reg();
2657 const XmmRegister result = locs()->out().xmm_reg(); 2658 const XmmRegister result = locs()->out().fpu_reg();
2658 2659
2659 if (value_cid == kMintCid) { 2660 if (value_cid == kMintCid) {
2660 __ movsd(result, FieldAddress(value, Mint::value_offset())); 2661 __ movsd(result, FieldAddress(value, Mint::value_offset()));
2661 } else if (value_cid == kSmiCid) { 2662 } else if (value_cid == kSmiCid) {
2662 __ SmiUntag(value); // Untag input before conversion. 2663 __ SmiUntag(value); // Untag input before conversion.
2663 __ movd(result, value); 2664 __ movd(result, value);
2664 __ pmovsxdq(result, result); 2665 __ pmovsxdq(result, result);
2665 __ SmiTag(value); // Restore input register. 2666 __ SmiTag(value); // Restore input register.
2666 } else { 2667 } else {
2667 Register temp = locs()->temp(0).reg(); 2668 Register temp = locs()->temp(0).reg();
(...skipping 15 matching lines...) Expand all
2683 } 2684 }
2684 2685
2685 2686
2686 LocationSummary* BoxIntegerInstr::MakeLocationSummary() const { 2687 LocationSummary* BoxIntegerInstr::MakeLocationSummary() const {
2687 const intptr_t kNumInputs = 1; 2688 const intptr_t kNumInputs = 1;
2688 const intptr_t kNumTemps = 2; 2689 const intptr_t kNumTemps = 2;
2689 LocationSummary* summary = 2690 LocationSummary* summary =
2690 new LocationSummary(kNumInputs, 2691 new LocationSummary(kNumInputs,
2691 kNumTemps, 2692 kNumTemps,
2692 LocationSummary::kCallOnSlowPath); 2693 LocationSummary::kCallOnSlowPath);
2693 summary->set_in(0, Location::RequiresXmmRegister()); 2694 summary->set_in(0, Location::RequiresFpuRegister());
2694 summary->set_temp(0, Location::RegisterLocation(EAX)); 2695 summary->set_temp(0, Location::RegisterLocation(EAX));
2695 summary->set_temp(1, Location::RegisterLocation(EDX)); 2696 summary->set_temp(1, Location::RegisterLocation(EDX));
2696 // TODO(fschneider): Save one temp by using result register as a temp. 2697 // TODO(fschneider): Save one temp by using result register as a temp.
2697 summary->set_out(Location::RequiresRegister()); 2698 summary->set_out(Location::RequiresRegister());
2698 return summary; 2699 return summary;
2699 } 2700 }
2700 2701
2701 2702
2702 class BoxIntegerSlowPath : public SlowPathCode { 2703 class BoxIntegerSlowPath : public SlowPathCode {
2703 public: 2704 public:
(...skipping 26 matching lines...) Expand all
2730 private: 2731 private:
2731 BoxIntegerInstr* instruction_; 2732 BoxIntegerInstr* instruction_;
2732 }; 2733 };
2733 2734
2734 2735
2735 void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2736 void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2736 BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this); 2737 BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this);
2737 compiler->AddSlowPathCode(slow_path); 2738 compiler->AddSlowPathCode(slow_path);
2738 2739
2739 Register out_reg = locs()->out().reg(); 2740 Register out_reg = locs()->out().reg();
2740 XmmRegister value = locs()->in(0).xmm_reg(); 2741 XmmRegister value = locs()->in(0).fpu_reg();
2741 2742
2742 // Unboxed operations produce smis or mint-sized values. 2743 // Unboxed operations produce smis or mint-sized values.
2743 // Check if value fits into a smi. 2744 // Check if value fits into a smi.
2744 Label not_smi, done; 2745 Label not_smi, done;
2745 __ pextrd(EDX, value, Immediate(1)); // Upper half. 2746 __ pextrd(EDX, value, Immediate(1)); // Upper half.
2746 __ pextrd(EAX, value, Immediate(0)); // Lower half. 2747 __ pextrd(EAX, value, Immediate(0)); // Lower half.
2747 // 1. Compute (x + -kMinSmi) which has to be in the range 2748 // 1. Compute (x + -kMinSmi) which has to be in the range
2748 // 0 .. -kMinSmi+kMaxSmi for x to fit into a smi. 2749 // 0 .. -kMinSmi+kMaxSmi for x to fit into a smi.
2749 __ addl(EAX, Immediate(0x40000000)); 2750 __ addl(EAX, Immediate(0x40000000));
2750 __ adcl(EDX, Immediate(0)); 2751 __ adcl(EDX, Immediate(0));
(...skipping 23 matching lines...) Expand all
2774 2775
2775 LocationSummary* BinaryMintOpInstr::MakeLocationSummary() const { 2776 LocationSummary* BinaryMintOpInstr::MakeLocationSummary() const {
2776 const intptr_t kNumInputs = 2; 2777 const intptr_t kNumInputs = 2;
2777 switch (op_kind()) { 2778 switch (op_kind()) {
2778 case Token::kBIT_AND: 2779 case Token::kBIT_AND:
2779 case Token::kBIT_OR: 2780 case Token::kBIT_OR:
2780 case Token::kBIT_XOR: { 2781 case Token::kBIT_XOR: {
2781 const intptr_t kNumTemps = 0; 2782 const intptr_t kNumTemps = 0;
2782 LocationSummary* summary = 2783 LocationSummary* summary =
2783 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2784 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2784 summary->set_in(0, Location::RequiresXmmRegister()); 2785 summary->set_in(0, Location::RequiresFpuRegister());
2785 summary->set_in(1, Location::RequiresXmmRegister()); 2786 summary->set_in(1, Location::RequiresFpuRegister());
2786 summary->set_out(Location::SameAsFirstInput()); 2787 summary->set_out(Location::SameAsFirstInput());
2787 return summary; 2788 return summary;
2788 } 2789 }
2789 case Token::kADD: 2790 case Token::kADD:
2790 case Token::kSUB: { 2791 case Token::kSUB: {
2791 const intptr_t kNumTemps = 2; 2792 const intptr_t kNumTemps = 2;
2792 LocationSummary* summary = 2793 LocationSummary* summary =
2793 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2794 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2794 summary->set_in(0, Location::RequiresXmmRegister()); 2795 summary->set_in(0, Location::RequiresFpuRegister());
2795 summary->set_in(1, Location::RequiresXmmRegister()); 2796 summary->set_in(1, Location::RequiresFpuRegister());
2796 summary->set_temp(0, Location::RequiresRegister()); 2797 summary->set_temp(0, Location::RequiresRegister());
2797 summary->set_temp(1, Location::RequiresRegister()); 2798 summary->set_temp(1, Location::RequiresRegister());
2798 summary->set_out(Location::SameAsFirstInput()); 2799 summary->set_out(Location::SameAsFirstInput());
2799 return summary; 2800 return summary;
2800 } 2801 }
2801 default: 2802 default:
2802 UNREACHABLE(); 2803 UNREACHABLE();
2803 return NULL; 2804 return NULL;
2804 } 2805 }
2805 } 2806 }
2806 2807
2807 2808
2808 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2809 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2809 XmmRegister left = locs()->in(0).xmm_reg(); 2810 XmmRegister left = locs()->in(0).fpu_reg();
2810 XmmRegister right = locs()->in(1).xmm_reg(); 2811 XmmRegister right = locs()->in(1).fpu_reg();
2811 2812
2812 ASSERT(locs()->out().xmm_reg() == left); 2813 ASSERT(locs()->out().fpu_reg() == left);
2813 2814
2814 switch (op_kind()) { 2815 switch (op_kind()) {
2815 case Token::kBIT_AND: __ andpd(left, right); break; 2816 case Token::kBIT_AND: __ andpd(left, right); break;
2816 case Token::kBIT_OR: __ orpd(left, right); break; 2817 case Token::kBIT_OR: __ orpd(left, right); break;
2817 case Token::kBIT_XOR: __ xorpd(left, right); break; 2818 case Token::kBIT_XOR: __ xorpd(left, right); break;
2818 case Token::kADD: 2819 case Token::kADD:
2819 case Token::kSUB: { 2820 case Token::kSUB: {
2820 Register lo = locs()->temp(0).reg(); 2821 Register lo = locs()->temp(0).reg();
2821 Register hi = locs()->temp(1).reg(); 2822 Register hi = locs()->temp(1).reg();
2822 Label* deopt = compiler->AddDeoptStub(deopt_id(), 2823 Label* deopt = compiler->AddDeoptStub(deopt_id(),
(...skipping 23 matching lines...) Expand all
2846 default: UNREACHABLE(); 2847 default: UNREACHABLE();
2847 } 2848 }
2848 } 2849 }
2849 2850
2850 2851
2851 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const { 2852 LocationSummary* ShiftMintOpInstr::MakeLocationSummary() const {
2852 const intptr_t kNumInputs = 2; 2853 const intptr_t kNumInputs = 2;
2853 const intptr_t kNumTemps = op_kind() == Token::kSHL ? 2 : 1; 2854 const intptr_t kNumTemps = op_kind() == Token::kSHL ? 2 : 1;
2854 LocationSummary* summary = 2855 LocationSummary* summary =
2855 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2856 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2856 summary->set_in(0, Location::RequiresXmmRegister()); 2857 summary->set_in(0, Location::RequiresFpuRegister());
2857 summary->set_in(1, Location::RegisterLocation(ECX)); 2858 summary->set_in(1, Location::RegisterLocation(ECX));
2858 summary->set_temp(0, Location::RequiresRegister()); 2859 summary->set_temp(0, Location::RequiresRegister());
2859 if (op_kind() == Token::kSHL) { 2860 if (op_kind() == Token::kSHL) {
2860 summary->set_temp(1, Location::RequiresRegister()); 2861 summary->set_temp(1, Location::RequiresRegister());
2861 } 2862 }
2862 summary->set_out(Location::SameAsFirstInput()); 2863 summary->set_out(Location::SameAsFirstInput());
2863 return summary; 2864 return summary;
2864 } 2865 }
2865 2866
2866 2867
2867 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2868 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2868 XmmRegister left = locs()->in(0).xmm_reg(); 2869 XmmRegister left = locs()->in(0).fpu_reg();
2869 ASSERT(locs()->in(1).reg() == ECX); 2870 ASSERT(locs()->in(1).reg() == ECX);
2870 ASSERT(locs()->out().xmm_reg() == left); 2871 ASSERT(locs()->out().fpu_reg() == left);
2871 2872
2872 Label* deopt = compiler->AddDeoptStub(deopt_id(), 2873 Label* deopt = compiler->AddDeoptStub(deopt_id(),
2873 kDeoptShiftMintOp); 2874 kDeoptShiftMintOp);
2874 Label done; 2875 Label done;
2875 __ testl(ECX, ECX); 2876 __ testl(ECX, ECX);
2876 __ j(ZERO, &done); // Shift by 0 is a nop. 2877 __ j(ZERO, &done); // Shift by 0 is a nop.
2877 __ subl(ESP, Immediate(2 * kWordSize)); 2878 __ subl(ESP, Immediate(2 * kWordSize));
2878 __ movq(Address(ESP, 0), left); 2879 __ movq(Address(ESP, 0), left);
2879 // Deoptimize if shift count is > 31. 2880 // Deoptimize if shift count is > 31.
2880 // sarl operation masks the count to 5 bits and 2881 // sarl operation masks the count to 5 bits and
2881 // shrd is undefined with count > operand size (32) 2882 // shrd is undefined with count > operand size (32)
2882 // TODO(fschneider): Support shift counts > 31 without deoptimization. 2883 // TODO(fschneider): Support shift counts > 31 without deoptimization.
2883 __ SmiUntag(ECX); 2884 __ SmiUntag(ECX);
2884 const Immediate kCountLimit = Immediate(31); 2885 const Immediate& kCountLimit = Immediate(31);
2885 __ cmpl(ECX, kCountLimit); 2886 __ cmpl(ECX, kCountLimit);
2886 __ j(ABOVE, deopt); 2887 __ j(ABOVE, deopt);
2887 switch (op_kind()) { 2888 switch (op_kind()) {
2888 case Token::kSHR: { 2889 case Token::kSHR: {
2889 Register temp = locs()->temp(0).reg(); 2890 Register temp = locs()->temp(0).reg();
2890 __ movl(temp, Address(ESP, 1 * kWordSize)); // High half. 2891 __ movl(temp, Address(ESP, 1 * kWordSize)); // High half.
2891 __ shrd(Address(ESP, 0), temp); // Shift count in CL. 2892 __ shrd(Address(ESP, 0), temp); // Shift count in CL.
2892 __ sarl(Address(ESP, 1 * kWordSize), ECX); // Shift count in CL. 2893 __ sarl(Address(ESP, 1 * kWordSize), ECX); // Shift count in CL.
2893 break; 2894 break;
2894 } 2895 }
(...skipping 21 matching lines...) Expand all
2916 __ addl(ESP, Immediate(2 * kWordSize)); 2917 __ addl(ESP, Immediate(2 * kWordSize));
2917 __ Bind(&done); 2918 __ Bind(&done);
2918 } 2919 }
2919 2920
2920 2921
2921 LocationSummary* UnaryMintOpInstr::MakeLocationSummary() const { 2922 LocationSummary* UnaryMintOpInstr::MakeLocationSummary() const {
2922 const intptr_t kNumInputs = 1; 2923 const intptr_t kNumInputs = 1;
2923 const intptr_t kNumTemps = 0; 2924 const intptr_t kNumTemps = 0;
2924 LocationSummary* summary = 2925 LocationSummary* summary =
2925 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2926 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2926 summary->set_in(0, Location::RequiresXmmRegister()); 2927 summary->set_in(0, Location::RequiresFpuRegister());
2927 summary->set_out(Location::SameAsFirstInput()); 2928 summary->set_out(Location::SameAsFirstInput());
2928 return summary; 2929 return summary;
2929 } 2930 }
2930 2931
2931 2932
2932 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2933 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2933 ASSERT(op_kind() == Token::kBIT_NOT); 2934 ASSERT(op_kind() == Token::kBIT_NOT);
2934 XmmRegister value = locs()->in(0).xmm_reg(); 2935 XmmRegister value = locs()->in(0).fpu_reg();
2935 ASSERT(value == locs()->out().xmm_reg()); 2936 ASSERT(value == locs()->out().fpu_reg());
2936 __ pcmpeqq(XMM0, XMM0); // Generate all 1's. 2937 __ pcmpeqq(XMM0, XMM0); // Generate all 1's.
2937 __ pxor(value, XMM0); 2938 __ pxor(value, XMM0);
2938 } 2939 }
2939 2940
2940 2941
2942 LocationSummary* ThrowInstr::MakeLocationSummary() const {
2943 return new LocationSummary(0, 0, LocationSummary::kCall);
2944 }
2945
2946
2947
2948 void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2949 compiler->GenerateCallRuntime(token_pos(),
2950 kThrowRuntimeEntry,
2951 locs());
2952 __ int3();
2953 }
2954
2955
2956 LocationSummary* ReThrowInstr::MakeLocationSummary() const {
2957 return new LocationSummary(0, 0, LocationSummary::kCall);
2958 }
2959
2960
2961 void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2962 compiler->GenerateCallRuntime(token_pos(),
2963 kReThrowRuntimeEntry,
2964 locs());
2965 __ int3();
2966 }
2967
2968
2969 LocationSummary* GotoInstr::MakeLocationSummary() const {
2970 return new LocationSummary(0, 0, LocationSummary::kNoCall);
2971 }
2972
2973
2974 void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2975 // Add deoptimization descriptor for deoptimizing instructions
2976 // that may be inserted before this instruction.
2977 if (!compiler->is_optimizing()) {
2978 compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
2979 GetDeoptId(),
2980 0); // No token position.
2981 }
2982
2983 if (HasParallelMove()) {
2984 compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
2985 }
2986
2987 // We can fall through if the successor is the next block in the list.
2988 // Otherwise, we need a jump.
2989 if (!compiler->IsNextBlock(successor())) {
2990 __ jmp(compiler->GetBlockLabel(successor()));
2991 }
2992 }
2993
2994
2995 static Condition NegateCondition(Condition condition) {
2996 switch (condition) {
2997 case EQUAL: return NOT_EQUAL;
2998 case NOT_EQUAL: return EQUAL;
2999 case LESS: return GREATER_EQUAL;
3000 case LESS_EQUAL: return GREATER;
3001 case GREATER: return LESS_EQUAL;
3002 case GREATER_EQUAL: return LESS;
3003 case BELOW: return ABOVE_EQUAL;
3004 case BELOW_EQUAL: return ABOVE;
3005 case ABOVE: return BELOW_EQUAL;
3006 case ABOVE_EQUAL: return BELOW;
3007 default:
3008 OS::Print("Error %d\n", condition);
3009 UNIMPLEMENTED();
3010 return EQUAL;
3011 }
3012 }
3013
3014
3015 void ControlInstruction::EmitBranchOnValue(FlowGraphCompiler* compiler,
3016 bool value) {
3017 if (value && compiler->IsNextBlock(false_successor())) {
3018 __ jmp(compiler->GetBlockLabel(true_successor()));
3019 } else if (!value && compiler->IsNextBlock(true_successor())) {
3020 __ jmp(compiler->GetBlockLabel(false_successor()));
3021 }
3022 }
3023
3024
3025 void ControlInstruction::EmitBranchOnCondition(FlowGraphCompiler* compiler,
3026 Condition true_condition) {
3027 if (compiler->IsNextBlock(false_successor())) {
3028 // If the next block is the false successor we will fall through to it.
3029 __ j(true_condition, compiler->GetBlockLabel(true_successor()));
3030 } else {
3031 // If the next block is the true successor we negate comparison and fall
3032 // through to it.
3033 ASSERT(compiler->IsNextBlock(true_successor()));
3034 Condition false_condition = NegateCondition(true_condition);
3035 __ j(false_condition, compiler->GetBlockLabel(false_successor()));
3036 }
3037 }
3038
3039
3040 LocationSummary* CurrentContextInstr::MakeLocationSummary() const {
3041 return LocationSummary::Make(0,
3042 Location::RequiresRegister(),
3043 LocationSummary::kNoCall);
3044 }
3045
3046
3047 void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3048 __ MoveRegister(locs()->out().reg(), CTX);
3049 }
3050
3051
3052 LocationSummary* StrictCompareInstr::MakeLocationSummary() const {
3053 const intptr_t kNumInputs = 2;
3054 const intptr_t kNumTemps = 0;
3055 LocationSummary* locs =
3056 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3057 locs->set_in(0, Location::RegisterOrConstant(left()));
3058 locs->set_in(1, Location::RegisterOrConstant(right()));
3059 locs->set_out(Location::RequiresRegister());
3060 return locs;
3061 }
3062
3063
3064 // Special code for numbers (compare values instead of references.)
3065 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3066 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
3067 Location left = locs()->in(0);
3068 Location right = locs()->in(1);
3069 if (left.IsConstant() && right.IsConstant()) {
3070 // TODO(vegorov): should be eliminated earlier by constant propagation.
3071 const bool result = (kind() == Token::kEQ_STRICT) ?
3072 left.constant().raw() == right.constant().raw() :
3073 left.constant().raw() != right.constant().raw();
3074 __ LoadObject(locs()->out().reg(), result ? Bool::True() : Bool::False());
3075 return;
3076 }
3077 if (left.IsConstant()) {
3078 compiler->EmitEqualityRegConstCompare(right.reg(),
3079 left.constant(),
3080 needs_number_check());
3081 } else if (right.IsConstant()) {
3082 compiler->EmitEqualityRegConstCompare(left.reg(),
3083 right.constant(),
3084 needs_number_check());
3085 } else {
3086 compiler->EmitEqualityRegRegCompare(left.reg(),
3087 right.reg(),
3088 needs_number_check());
3089 }
3090
3091 Register result = locs()->out().reg();
3092 Label load_true, done;
3093 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
3094 __ j(true_condition, &load_true, Assembler::kNearJump);
3095 __ LoadObject(result, Bool::False());
3096 __ jmp(&done, Assembler::kNearJump);
3097 __ Bind(&load_true);
3098 __ LoadObject(result, Bool::True());
3099 __ Bind(&done);
3100 }
3101
3102
3103 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
3104 BranchInstr* branch) {
3105 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
3106 Location left = locs()->in(0);
3107 Location right = locs()->in(1);
3108 if (left.IsConstant() && right.IsConstant()) {
3109 // TODO(vegorov): should be eliminated earlier by constant propagation.
3110 const bool result = (kind() == Token::kEQ_STRICT) ?
3111 left.constant().raw() == right.constant().raw() :
3112 left.constant().raw() != right.constant().raw();
3113 branch->EmitBranchOnValue(compiler, result);
3114 return;
3115 }
3116 if (left.IsConstant()) {
3117 compiler->EmitEqualityRegConstCompare(right.reg(),
3118 left.constant(),
3119 needs_number_check());
3120 } else if (right.IsConstant()) {
3121 compiler->EmitEqualityRegConstCompare(left.reg(),
3122 right.constant(),
3123 needs_number_check());
3124 } else {
3125 compiler->EmitEqualityRegRegCompare(left.reg(),
3126 right.reg(),
3127 needs_number_check());
3128 }
3129
3130 Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
3131 branch->EmitBranchOnCondition(compiler, true_condition);
3132 }
3133
3134
3135 void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3136 // The arguments to the stub include the closure, as does the arguments
3137 // descriptor.
3138 Register temp_reg = locs()->temp(0).reg();
3139 int argument_count = ArgumentCount();
3140 const Array& arguments_descriptor =
3141 Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
3142 argument_names()));
3143 __ LoadObject(temp_reg, arguments_descriptor);
3144 compiler->GenerateDartCall(deopt_id(),
3145 token_pos(),
3146 &StubCode::CallClosureFunctionLabel(),
3147 PcDescriptors::kOther,
3148 locs());
3149 __ Drop(argument_count);
3150 }
3151
3152
3153 LocationSummary* BooleanNegateInstr::MakeLocationSummary() const {
3154 return LocationSummary::Make(1,
3155 Location::RequiresRegister(),
3156 LocationSummary::kNoCall);
3157 }
3158
3159
3160 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3161 Register value = locs()->in(0).reg();
3162 Register result = locs()->out().reg();
3163
3164 Label done;
3165 __ LoadObject(result, Bool::True());
3166 __ CompareRegisters(result, value);
3167 __ j(NOT_EQUAL, &done, Assembler::kNearJump);
3168 __ LoadObject(result, Bool::False());
3169 __ Bind(&done);
3170 }
3171
3172
3173 LocationSummary* ChainContextInstr::MakeLocationSummary() const {
3174 return LocationSummary::Make(1,
3175 Location::NoLocation(),
3176 LocationSummary::kNoCall);
3177 }
3178
3179
3180 void ChainContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3181 Register context_value = locs()->in(0).reg();
3182
3183 // Chain the new context in context_value to its parent in CTX.
3184 __ StoreIntoObject(context_value,
3185 FieldAddress(context_value, Context::parent_offset()),
3186 CTX);
3187 // Set new context as current context.
3188 __ MoveRegister(CTX, context_value);
3189 }
3190
3191
3192 LocationSummary* StoreVMFieldInstr::MakeLocationSummary() const {
3193 const intptr_t kNumInputs = 2;
3194 const intptr_t kNumTemps = 0;
3195 LocationSummary* locs =
3196 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3197 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
3198 : Location::RequiresRegister());
3199 locs->set_in(1, Location::RequiresRegister());
3200 return locs;
3201 }
3202
3203
3204 void StoreVMFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3205 Register value_reg = locs()->in(0).reg();
3206 Register dest_reg = locs()->in(1).reg();
3207
3208 if (value()->NeedsStoreBuffer()) {
3209 __ StoreIntoObject(dest_reg, FieldAddress(dest_reg, offset_in_bytes()),
3210 value_reg);
3211 } else {
3212 __ StoreIntoObjectNoBarrier(
3213 dest_reg, FieldAddress(dest_reg, offset_in_bytes()), value_reg);
3214 }
3215 }
3216
3217
3218 LocationSummary* AllocateObjectInstr::MakeLocationSummary() const {
3219 return MakeCallSummary();
3220 }
3221
3222
3223 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3224 const Class& cls = Class::ZoneHandle(constructor().Owner());
3225 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls));
3226 const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
3227 compiler->GenerateCall(token_pos(),
3228 &label,
3229 PcDescriptors::kOther,
3230 locs());
3231 __ Drop(ArgumentCount()); // Discard arguments.
3232 }
3233
3234
3235 LocationSummary* CreateClosureInstr::MakeLocationSummary() const {
3236 return MakeCallSummary();
3237 }
3238
3239
3240 void CreateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3241 const Function& closure_function = function();
3242 ASSERT(!closure_function.IsImplicitStaticClosureFunction());
3243 const Code& stub = Code::Handle(
3244 StubCode::GetAllocationStubForClosure(closure_function));
3245 const ExternalLabel label(closure_function.ToCString(), stub.EntryPoint());
3246 compiler->GenerateCall(token_pos(),
3247 &label,
3248 PcDescriptors::kOther,
3249 locs());
3250 __ Drop(2); // Discard type arguments and receiver.
3251 }
3252
2941 } // namespace dart 3253 } // namespace dart
2942 3254
2943 #undef __ 3255 #undef __
2944 3256
2945 #endif // defined TARGET_ARCH_X64 3257 #endif // defined TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698