Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(67)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 293993013: Beings adding SIMD support to arm64. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/flow_graph_compiler_arm64.cc ('k') | runtime/vm/intermediate_language_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 71 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
72 locs->set_in(0, Location::RegisterLocation(R0)); 72 locs->set_in(0, Location::RegisterLocation(R0));
73 return locs; 73 return locs;
74 } 74 }
75 75
76 76
77 // Attempt optimized compilation at return instruction instead of at the entry. 77 // Attempt optimized compilation at return instruction instead of at the entry.
78 // The entry needs to be patchable, no inlined objects are allowed in the area 78 // The entry needs to be patchable, no inlined objects are allowed in the area
79 // that will be overwritten by the patch instructions: a branch macro sequence. 79 // that will be overwritten by the patch instructions: a branch macro sequence.
80 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 80 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
81 Register result = locs()->in(0).reg(); 81 const Register result = locs()->in(0).reg();
82 ASSERT(result == R0); 82 ASSERT(result == R0);
83 #if defined(DEBUG) 83 #if defined(DEBUG)
84 Label stack_ok; 84 Label stack_ok;
85 __ Comment("Stack Check"); 85 __ Comment("Stack Check");
86 const intptr_t fp_sp_dist = 86 const intptr_t fp_sp_dist =
87 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; 87 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize;
88 ASSERT(fp_sp_dist <= 0); 88 ASSERT(fp_sp_dist <= 0);
89 __ sub(R2, SP, ShifterOperand(FP)); 89 __ sub(R2, SP, ShifterOperand(FP));
90 __ CompareImmediate(R2, fp_sp_dist); 90 __ CompareImmediate(R2, fp_sp_dist);
91 __ b(&stack_ok, EQ); 91 __ b(&stack_ok, EQ);
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
233 233
234 234
235 LocationSummary* LoadLocalInstr::MakeLocationSummary(bool opt) const { 235 LocationSummary* LoadLocalInstr::MakeLocationSummary(bool opt) const {
236 return LocationSummary::Make(0, 236 return LocationSummary::Make(0,
237 Location::RequiresRegister(), 237 Location::RequiresRegister(),
238 LocationSummary::kNoCall); 238 LocationSummary::kNoCall);
239 } 239 }
240 240
241 241
242 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 242 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
243 Register result = locs()->out(0).reg(); 243 const Register result = locs()->out(0).reg();
244 __ LoadFromOffset(kWord, result, FP, local().index() * kWordSize); 244 __ LoadFromOffset(kWord, result, FP, local().index() * kWordSize);
245 } 245 }
246 246
247 247
248 LocationSummary* StoreLocalInstr::MakeLocationSummary(bool opt) const { 248 LocationSummary* StoreLocalInstr::MakeLocationSummary(bool opt) const {
249 return LocationSummary::Make(1, 249 return LocationSummary::Make(1,
250 Location::SameAsFirstInput(), 250 Location::SameAsFirstInput(),
251 LocationSummary::kNoCall); 251 LocationSummary::kNoCall);
252 } 252 }
253 253
254 254
255 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 255 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
256 Register value = locs()->in(0).reg(); 256 const Register value = locs()->in(0).reg();
257 Register result = locs()->out(0).reg(); 257 const Register result = locs()->out(0).reg();
258 ASSERT(result == value); // Assert that register assignment is correct. 258 ASSERT(result == value); // Assert that register assignment is correct.
259 __ str(value, Address(FP, local().index() * kWordSize)); 259 __ str(value, Address(FP, local().index() * kWordSize));
260 } 260 }
261 261
262 262
263 LocationSummary* ConstantInstr::MakeLocationSummary(bool opt) const { 263 LocationSummary* ConstantInstr::MakeLocationSummary(bool opt) const {
264 return LocationSummary::Make(0, 264 return LocationSummary::Make(0,
265 Location::RequiresRegister(), 265 Location::RequiresRegister(),
266 LocationSummary::kNoCall); 266 LocationSummary::kNoCall);
267 } 267 }
268 268
269 269
270 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 270 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
271 // The register allocator drops constant definitions that have no uses. 271 // The register allocator drops constant definitions that have no uses.
272 if (!locs()->out(0).IsInvalid()) { 272 if (!locs()->out(0).IsInvalid()) {
273 Register result = locs()->out(0).reg(); 273 const Register result = locs()->out(0).reg();
274 __ LoadObject(result, value()); 274 __ LoadObject(result, value());
275 } 275 }
276 } 276 }
277 277
278 278
279 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(bool opt) const { 279 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(bool opt) const {
280 const intptr_t kNumInputs = 0; 280 const intptr_t kNumInputs = 0;
281 const intptr_t kNumTemps = 1; 281 const intptr_t kNumTemps = 1;
282 LocationSummary* locs = 282 LocationSummary* locs =
283 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 283 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 kNonBoolTypeErrorRuntimeEntry, 347 kNonBoolTypeErrorRuntimeEntry,
348 1, 348 1,
349 locs); 349 locs);
350 // We should never return here. 350 // We should never return here.
351 __ bkpt(0); 351 __ bkpt(0);
352 __ Bind(&done); 352 __ Bind(&done);
353 } 353 }
354 354
355 355
356 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 356 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
357 Register obj = locs()->in(0).reg(); 357 const Register obj = locs()->in(0).reg();
358 Register result = locs()->out(0).reg(); 358 const Register result = locs()->out(0).reg();
359 359
360 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); 360 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
361 ASSERT(obj == result); 361 ASSERT(obj == result);
362 } 362 }
363 363
364 364
365 static Condition TokenKindToSmiCondition(Token::Kind kind) { 365 static Condition TokenKindToSmiCondition(Token::Kind kind) {
366 switch (kind) { 366 switch (kind) {
367 case Token::kEQ: return EQ; 367 case Token::kEQ: return EQ;
368 case Token::kNE: return NE; 368 case Token::kNE: return NE;
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
508 UNREACHABLE(); 508 UNREACHABLE();
509 return VS; 509 return VS;
510 } 510 }
511 } 511 }
512 512
513 513
514 static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler, 514 static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
515 LocationSummary* locs, 515 LocationSummary* locs,
516 Token::Kind kind) { 516 Token::Kind kind) {
517 ASSERT(Token::IsEqualityOperator(kind)); 517 ASSERT(Token::IsEqualityOperator(kind));
518 QRegister left = locs->in(0).fpu_reg(); 518 const QRegister left = locs->in(0).fpu_reg();
519 QRegister right = locs->in(1).fpu_reg(); 519 const QRegister right = locs->in(1).fpu_reg();
520 QRegister tmpq = locs->temp(0).fpu_reg(); 520 const QRegister tmpq = locs->temp(0).fpu_reg();
521 Register tmp_lo = locs->temp(1).reg(); 521 const Register tmp_lo = locs->temp(1).reg();
522 Register tmp_hi = locs->temp(2).reg(); 522 const Register tmp_hi = locs->temp(2).reg();
523 523
524 __ vceqqi(kWord, tmpq, left, right); 524 __ vceqqi(kWord, tmpq, left, right);
525 __ vmovrrd(tmp_lo, tmp_hi, EvenDRegisterOf(tmpq)); 525 __ vmovrrd(tmp_lo, tmp_hi, EvenDRegisterOf(tmpq));
526 // tmp_lo and tmp_hi must both be 0xffffffff. 526 // tmp_lo and tmp_hi must both be 0xffffffff.
527 __ and_(tmp_lo, tmp_lo, ShifterOperand(tmp_hi)); 527 __ and_(tmp_lo, tmp_lo, ShifterOperand(tmp_hi));
528 528
529 Condition true_condition = TokenKindToMintCondition(kind); 529 Condition true_condition = TokenKindToMintCondition(kind);
530 __ CompareImmediate(tmp_lo, 0xffffffff); 530 __ CompareImmediate(tmp_lo, 0xffffffff);
531 return true_condition; 531 return true_condition;
532 } 532 }
533 533
534 534
535 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, 535 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
536 LocationSummary* locs, 536 LocationSummary* locs,
537 Token::Kind kind) { 537 Token::Kind kind) {
538 QRegister left = locs->in(0).fpu_reg(); 538 const QRegister left = locs->in(0).fpu_reg();
539 QRegister right = locs->in(1).fpu_reg(); 539 const QRegister right = locs->in(1).fpu_reg();
540 DRegister dleft0 = EvenDRegisterOf(left); 540 const DRegister dleft0 = EvenDRegisterOf(left);
541 DRegister dright0 = EvenDRegisterOf(right); 541 const DRegister dright0 = EvenDRegisterOf(right);
542 SRegister sleft0 = EvenSRegisterOf(dleft0); 542 const SRegister sleft0 = EvenSRegisterOf(dleft0);
543 SRegister sleft1 = OddSRegisterOf(dleft0); 543 const SRegister sleft1 = OddSRegisterOf(dleft0);
544 SRegister sright0 = EvenSRegisterOf(dright0); 544 const SRegister sright0 = EvenSRegisterOf(dright0);
545 SRegister sright1 = OddSRegisterOf(dright0); 545 const SRegister sright1 = OddSRegisterOf(dright0);
546 546
547 Register tmp_left = locs->temp(0).reg(); 547 const Register tmp_left = locs->temp(0).reg();
548 Register tmp_right = locs->temp(1).reg(); 548 const Register tmp_right = locs->temp(1).reg();
549 549
550 // 64-bit comparison 550 // 64-bit comparison
551 Condition hi_true_cond, hi_false_cond, lo_false_cond; 551 Condition hi_true_cond, hi_false_cond, lo_false_cond;
552 switch (kind) { 552 switch (kind) {
553 case Token::kLT: 553 case Token::kLT:
554 case Token::kLTE: 554 case Token::kLTE:
555 hi_true_cond = LT; 555 hi_true_cond = LT;
556 hi_false_cond = GT; 556 hi_false_cond = GT;
557 lo_false_cond = (kind == Token::kLT) ? CS : HI; 557 lo_false_cond = (kind == Token::kLT) ? CS : HI;
558 break; 558 break;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
602 default: 602 default:
603 UNREACHABLE(); 603 UNREACHABLE();
604 return VS; 604 return VS;
605 } 605 }
606 } 606 }
607 607
608 608
609 static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, 609 static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
610 LocationSummary* locs, 610 LocationSummary* locs,
611 Token::Kind kind) { 611 Token::Kind kind) {
612 QRegister left = locs->in(0).fpu_reg(); 612 const QRegister left = locs->in(0).fpu_reg();
613 QRegister right = locs->in(1).fpu_reg(); 613 const QRegister right = locs->in(1).fpu_reg();
614 DRegister dleft = EvenDRegisterOf(left); 614 const DRegister dleft = EvenDRegisterOf(left);
615 DRegister dright = EvenDRegisterOf(right); 615 const DRegister dright = EvenDRegisterOf(right);
616 __ vcmpd(dleft, dright); 616 __ vcmpd(dleft, dright);
617 __ vmstat(); 617 __ vmstat();
618 Condition true_condition = TokenKindToDoubleCondition(kind); 618 Condition true_condition = TokenKindToDoubleCondition(kind);
619 return true_condition; 619 return true_condition;
620 } 620 }
621 621
622 622
623 Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 623 Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
624 BranchLabels labels) { 624 BranchLabels labels) {
625 if (operation_cid() == kSmiCid) { 625 if (operation_cid() == kSmiCid) {
626 return EmitSmiComparisonOp(compiler, locs(), kind()); 626 return EmitSmiComparisonOp(compiler, locs(), kind());
627 } else if (operation_cid() == kMintCid) { 627 } else if (operation_cid() == kMintCid) {
628 return EmitUnboxedMintEqualityOp(compiler, locs(), kind()); 628 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
629 } else { 629 } else {
630 ASSERT(operation_cid() == kDoubleCid); 630 ASSERT(operation_cid() == kDoubleCid);
631 return EmitDoubleComparisonOp(compiler, locs(), kind()); 631 return EmitDoubleComparisonOp(compiler, locs(), kind());
632 } 632 }
633 } 633 }
634 634
635 635
636 void EqualityCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 636 void EqualityCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
637 ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ)); 637 ASSERT((kind() == Token::kNE) || (kind() == Token::kEQ));
638 638
639 // The ARM code does not use true- and false-labels here. 639 // The ARM code does not use true- and false-labels here.
640 BranchLabels labels = { NULL, NULL, NULL }; 640 BranchLabels labels = { NULL, NULL, NULL };
641 Condition true_condition = EmitComparisonCode(compiler, labels); 641 Condition true_condition = EmitComparisonCode(compiler, labels);
642 642
643 Register result = locs()->out(0).reg(); 643 const Register result = locs()->out(0).reg();
644 if ((operation_cid() == kSmiCid) || (operation_cid() == kMintCid)) { 644 if ((operation_cid() == kSmiCid) || (operation_cid() == kMintCid)) {
645 __ LoadObject(result, Bool::True(), true_condition); 645 __ LoadObject(result, Bool::True(), true_condition);
646 __ LoadObject(result, Bool::False(), NegateCondition(true_condition)); 646 __ LoadObject(result, Bool::False(), NegateCondition(true_condition));
647 } else { 647 } else {
648 ASSERT(operation_cid() == kDoubleCid); 648 ASSERT(operation_cid() == kDoubleCid);
649 Label done; 649 Label done;
650 __ LoadObject(result, Bool::False()); 650 __ LoadObject(result, Bool::False());
651 if (true_condition != NE) { 651 if (true_condition != NE) {
652 __ b(&done, VS); // x == NaN -> false, x != NaN -> true. 652 __ b(&done, VS); // x == NaN -> false, x != NaN -> true.
653 } 653 }
(...skipping 27 matching lines...) Expand all
681 locs->set_in(0, Location::RequiresRegister()); 681 locs->set_in(0, Location::RequiresRegister());
682 // Only one input can be a constant operand. The case of two constant 682 // Only one input can be a constant operand. The case of two constant
683 // operands should be handled by constant propagation. 683 // operands should be handled by constant propagation.
684 locs->set_in(1, Location::RegisterOrConstant(right())); 684 locs->set_in(1, Location::RegisterOrConstant(right()));
685 return locs; 685 return locs;
686 } 686 }
687 687
688 688
689 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 689 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
690 BranchLabels labels) { 690 BranchLabels labels) {
691 Register left = locs()->in(0).reg(); 691 const Register left = locs()->in(0).reg();
692 Location right = locs()->in(1); 692 Location right = locs()->in(1);
693 if (right.IsConstant()) { 693 if (right.IsConstant()) {
694 ASSERT(right.constant().IsSmi()); 694 ASSERT(right.constant().IsSmi());
695 const int32_t imm = 695 const int32_t imm =
696 reinterpret_cast<int32_t>(right.constant().raw()); 696 reinterpret_cast<int32_t>(right.constant().raw());
697 __ TestImmediate(left, imm); 697 __ TestImmediate(left, imm);
698 } else { 698 } else {
699 __ tst(left, ShifterOperand(right.reg())); 699 __ tst(left, ShifterOperand(right.reg()));
700 } 700 }
701 Condition true_condition = (kind() == Token::kNE) ? NE : EQ; 701 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
(...skipping 23 matching lines...) Expand all
725 locs->set_in(0, Location::RequiresRegister()); 725 locs->set_in(0, Location::RequiresRegister());
726 locs->set_temp(0, Location::RequiresRegister()); 726 locs->set_temp(0, Location::RequiresRegister());
727 locs->set_out(0, Location::RequiresRegister()); 727 locs->set_out(0, Location::RequiresRegister());
728 return locs; 728 return locs;
729 } 729 }
730 730
731 731
732 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 732 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
733 BranchLabels labels) { 733 BranchLabels labels) {
734 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); 734 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
735 Register val_reg = locs()->in(0).reg(); 735 const Register val_reg = locs()->in(0).reg();
736 Register cid_reg = locs()->temp(0).reg(); 736 const Register cid_reg = locs()->temp(0).reg();
737 737
738 Label* deopt = CanDeoptimize() ? 738 Label* deopt = CanDeoptimize() ?
739 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL; 739 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
740 740
741 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; 741 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
742 const ZoneGrowableArray<intptr_t>& data = cid_results(); 742 const ZoneGrowableArray<intptr_t>& data = cid_results();
743 ASSERT(data[0] == kSmiCid); 743 ASSERT(data[0] == kSmiCid);
744 bool result = data[1] == true_result; 744 bool result = data[1] == true_result;
745 __ tst(val_reg, ShifterOperand(kSmiTagMask)); 745 __ tst(val_reg, ShifterOperand(kSmiTagMask));
746 __ b(result ? labels.true_label : labels.false_label, EQ); 746 __ b(result ? labels.true_label : labels.false_label, EQ);
(...skipping 22 matching lines...) Expand all
769 769
770 770
771 void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler, 771 void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler,
772 BranchInstr* branch) { 772 BranchInstr* branch) {
773 BranchLabels labels = compiler->CreateBranchLabels(branch); 773 BranchLabels labels = compiler->CreateBranchLabels(branch);
774 EmitComparisonCode(compiler, labels); 774 EmitComparisonCode(compiler, labels);
775 } 775 }
776 776
777 777
778 void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 778 void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
779 Register result_reg = locs()->out(0).reg(); 779 const Register result_reg = locs()->out(0).reg();
780 Label is_true, is_false, done; 780 Label is_true, is_false, done;
781 BranchLabels labels = { &is_true, &is_false, &is_false }; 781 BranchLabels labels = { &is_true, &is_false, &is_false };
782 EmitComparisonCode(compiler, labels); 782 EmitComparisonCode(compiler, labels);
783 __ Bind(&is_false); 783 __ Bind(&is_false);
784 __ LoadObject(result_reg, Bool::False()); 784 __ LoadObject(result_reg, Bool::False());
785 __ b(&done); 785 __ b(&done);
786 __ Bind(&is_true); 786 __ Bind(&is_true);
787 __ LoadObject(result_reg, Bool::True()); 787 __ LoadObject(result_reg, Bool::True());
788 __ Bind(&done); 788 __ Bind(&done);
789 } 789 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
836 return EmitDoubleComparisonOp(compiler, locs(), kind()); 836 return EmitDoubleComparisonOp(compiler, locs(), kind());
837 } 837 }
838 } 838 }
839 839
840 840
841 void RelationalOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 841 void RelationalOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
842 // The ARM code does not use true- and false-labels here. 842 // The ARM code does not use true- and false-labels here.
843 BranchLabels labels = { NULL, NULL, NULL }; 843 BranchLabels labels = { NULL, NULL, NULL };
844 Condition true_condition = EmitComparisonCode(compiler, labels); 844 Condition true_condition = EmitComparisonCode(compiler, labels);
845 845
846 Register result = locs()->out(0).reg(); 846 const Register result = locs()->out(0).reg();
847 if (operation_cid() == kSmiCid) { 847 if (operation_cid() == kSmiCid) {
848 __ LoadObject(result, Bool::True(), true_condition); 848 __ LoadObject(result, Bool::True(), true_condition);
849 __ LoadObject(result, Bool::False(), NegateCondition(true_condition)); 849 __ LoadObject(result, Bool::False(), NegateCondition(true_condition));
850 } else if (operation_cid() == kMintCid) { 850 } else if (operation_cid() == kMintCid) {
851 Register cr = locs()->temp(0).reg(); 851 const Register cr = locs()->temp(0).reg();
852 __ LoadObject(result, Bool::True()); 852 __ LoadObject(result, Bool::True());
853 __ CompareImmediate(cr, 1); 853 __ CompareImmediate(cr, 1);
854 __ LoadObject(result, Bool::False(), NE); 854 __ LoadObject(result, Bool::False(), NE);
855 } else { 855 } else {
856 ASSERT(operation_cid() == kDoubleCid); 856 ASSERT(operation_cid() == kDoubleCid);
857 Label done; 857 Label done;
858 __ LoadObject(result, Bool::False()); 858 __ LoadObject(result, Bool::False());
859 if (true_condition != NE) { 859 if (true_condition != NE) {
860 __ b(&done, VS); // x == NaN -> false, x != NaN -> true. 860 __ b(&done, VS); // x == NaN -> false, x != NaN -> true.
861 } 861 }
862 __ LoadObject(result, Bool::True(), true_condition); 862 __ LoadObject(result, Bool::True(), true_condition);
863 __ Bind(&done); 863 __ Bind(&done);
864 } 864 }
865 } 865 }
866 866
867 867
868 void RelationalOpInstr::EmitBranchCode(FlowGraphCompiler* compiler, 868 void RelationalOpInstr::EmitBranchCode(FlowGraphCompiler* compiler,
869 BranchInstr* branch) { 869 BranchInstr* branch) {
870 BranchLabels labels = compiler->CreateBranchLabels(branch); 870 BranchLabels labels = compiler->CreateBranchLabels(branch);
871 Condition true_condition = EmitComparisonCode(compiler, labels); 871 Condition true_condition = EmitComparisonCode(compiler, labels);
872 872
873 if (operation_cid() == kSmiCid) { 873 if (operation_cid() == kSmiCid) {
874 EmitBranchOnCondition(compiler, true_condition, labels); 874 EmitBranchOnCondition(compiler, true_condition, labels);
875 } else if (operation_cid() == kMintCid) { 875 } else if (operation_cid() == kMintCid) {
876 Register result = locs()->temp(0).reg(); 876 const Register result = locs()->temp(0).reg();
877 __ CompareImmediate(result, 1); 877 __ CompareImmediate(result, 1);
878 __ b(labels.true_label, EQ); 878 __ b(labels.true_label, EQ);
879 __ b(labels.false_label, NE); 879 __ b(labels.false_label, NE);
880 } else if (operation_cid() == kDoubleCid) { 880 } else if (operation_cid() == kDoubleCid) {
881 Label* nan_result = (true_condition == NE) ? 881 Label* nan_result = (true_condition == NE) ?
882 labels.true_label : labels.false_label; 882 labels.true_label : labels.false_label;
883 __ b(nan_result, VS); 883 __ b(nan_result, VS);
884 EmitBranchOnCondition(compiler, true_condition, labels); 884 EmitBranchOnCondition(compiler, true_condition, labels);
885 } 885 }
886 } 886 }
887 887
888 888
889 LocationSummary* NativeCallInstr::MakeLocationSummary(bool opt) const { 889 LocationSummary* NativeCallInstr::MakeLocationSummary(bool opt) const {
890 const intptr_t kNumInputs = 0; 890 const intptr_t kNumInputs = 0;
891 const intptr_t kNumTemps = 3; 891 const intptr_t kNumTemps = 3;
892 LocationSummary* locs = 892 LocationSummary* locs =
893 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 893 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
894 locs->set_temp(0, Location::RegisterLocation(R1)); 894 locs->set_temp(0, Location::RegisterLocation(R1));
895 locs->set_temp(1, Location::RegisterLocation(R2)); 895 locs->set_temp(1, Location::RegisterLocation(R2));
896 locs->set_temp(2, Location::RegisterLocation(R5)); 896 locs->set_temp(2, Location::RegisterLocation(R5));
897 locs->set_out(0, Location::RegisterLocation(R0)); 897 locs->set_out(0, Location::RegisterLocation(R0));
898 return locs; 898 return locs;
899 } 899 }
900 900
901 901
902 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 902 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
903 ASSERT(locs()->temp(0).reg() == R1); 903 ASSERT(locs()->temp(0).reg() == R1);
904 ASSERT(locs()->temp(1).reg() == R2); 904 ASSERT(locs()->temp(1).reg() == R2);
905 ASSERT(locs()->temp(2).reg() == R5); 905 ASSERT(locs()->temp(2).reg() == R5);
906 Register result = locs()->out(0).reg(); 906 const Register result = locs()->out(0).reg();
907 907
908 // Push the result place holder initialized to NULL. 908 // Push the result place holder initialized to NULL.
909 __ PushObject(Object::ZoneHandle()); 909 __ PushObject(Object::ZoneHandle());
910 // Pass a pointer to the first argument in R2. 910 // Pass a pointer to the first argument in R2.
911 if (!function().HasOptionalParameters()) { 911 if (!function().HasOptionalParameters()) {
912 __ AddImmediate(R2, FP, (kParamEndSlotFromFp + 912 __ AddImmediate(R2, FP, (kParamEndSlotFromFp +
913 function().NumParameters()) * kWordSize); 913 function().NumParameters()) * kWordSize);
914 } else { 914 } else {
915 __ AddImmediate(R2, FP, kFirstLocalSlotFromFp * kWordSize); 915 __ AddImmediate(R2, FP, kFirstLocalSlotFromFp * kWordSize);
916 } 916 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
950 LocationSummary* StringFromCharCodeInstr::MakeLocationSummary(bool opt) const { 950 LocationSummary* StringFromCharCodeInstr::MakeLocationSummary(bool opt) const {
951 const intptr_t kNumInputs = 1; 951 const intptr_t kNumInputs = 1;
952 // TODO(fschneider): Allow immediate operands for the char code. 952 // TODO(fschneider): Allow immediate operands for the char code.
953 return LocationSummary::Make(kNumInputs, 953 return LocationSummary::Make(kNumInputs,
954 Location::RequiresRegister(), 954 Location::RequiresRegister(),
955 LocationSummary::kNoCall); 955 LocationSummary::kNoCall);
956 } 956 }
957 957
958 958
959 void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 959 void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
960 Register char_code = locs()->in(0).reg(); 960 const Register char_code = locs()->in(0).reg();
961 Register result = locs()->out(0).reg(); 961 const Register result = locs()->out(0).reg();
962 __ LoadImmediate(result, 962 __ LoadImmediate(result,
963 reinterpret_cast<uword>(Symbols::PredefinedAddress())); 963 reinterpret_cast<uword>(Symbols::PredefinedAddress()));
964 __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize); 964 __ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize);
965 __ ldr(result, Address(result, char_code, LSL, 1)); // Char code is a smi. 965 __ ldr(result, Address(result, char_code, LSL, 1)); // Char code is a smi.
966 } 966 }
967 967
968 968
969 LocationSummary* StringToCharCodeInstr::MakeLocationSummary(bool opt) const { 969 LocationSummary* StringToCharCodeInstr::MakeLocationSummary(bool opt) const {
970 const intptr_t kNumInputs = 1; 970 const intptr_t kNumInputs = 1;
971 return LocationSummary::Make(kNumInputs, 971 return LocationSummary::Make(kNumInputs,
972 Location::RequiresRegister(), 972 Location::RequiresRegister(),
973 LocationSummary::kNoCall); 973 LocationSummary::kNoCall);
974 } 974 }
975 975
976 976
977 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 977 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
978 ASSERT(cid_ == kOneByteStringCid); 978 ASSERT(cid_ == kOneByteStringCid);
979 Register str = locs()->in(0).reg(); 979 const Register str = locs()->in(0).reg();
980 Register result = locs()->out(0).reg(); 980 const Register result = locs()->out(0).reg();
981 __ ldr(result, FieldAddress(str, String::length_offset())); 981 __ ldr(result, FieldAddress(str, String::length_offset()));
982 __ cmp(result, ShifterOperand(Smi::RawValue(1))); 982 __ cmp(result, ShifterOperand(Smi::RawValue(1)));
983 __ LoadImmediate(result, -1, NE); 983 __ LoadImmediate(result, -1, NE);
984 __ ldrb(result, FieldAddress(str, OneByteString::data_offset()), EQ); 984 __ ldrb(result, FieldAddress(str, OneByteString::data_offset()), EQ);
985 __ SmiTag(result); 985 __ SmiTag(result);
986 } 986 }
987 987
988 988
989 LocationSummary* StringInterpolateInstr::MakeLocationSummary(bool opt) const { 989 LocationSummary* StringInterpolateInstr::MakeLocationSummary(bool opt) const {
990 const intptr_t kNumInputs = 1; 990 const intptr_t kNumInputs = 1;
991 const intptr_t kNumTemps = 0; 991 const intptr_t kNumTemps = 0;
992 LocationSummary* summary = 992 LocationSummary* summary =
993 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 993 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
994 summary->set_in(0, Location::RegisterLocation(R0)); 994 summary->set_in(0, Location::RegisterLocation(R0));
995 summary->set_out(0, Location::RegisterLocation(R0)); 995 summary->set_out(0, Location::RegisterLocation(R0));
996 return summary; 996 return summary;
997 } 997 }
998 998
999 999
1000 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1000 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1001 Register array = locs()->in(0).reg(); 1001 const Register array = locs()->in(0).reg();
1002 __ Push(array); 1002 __ Push(array);
1003 const int kNumberOfArguments = 1; 1003 const int kNumberOfArguments = 1;
1004 const Array& kNoArgumentNames = Object::null_array(); 1004 const Array& kNoArgumentNames = Object::null_array();
1005 compiler->GenerateStaticCall(deopt_id(), 1005 compiler->GenerateStaticCall(deopt_id(),
1006 token_pos(), 1006 token_pos(),
1007 CallFunction(), 1007 CallFunction(),
1008 kNumberOfArguments, 1008 kNumberOfArguments,
1009 kNoArgumentNames, 1009 kNoArgumentNames,
1010 locs()); 1010 locs());
1011 ASSERT(locs()->out(0).reg() == R0); 1011 ASSERT(locs()->out(0).reg() == R0);
1012 } 1012 }
1013 1013
1014 1014
1015 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(bool opt) const { 1015 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(bool opt) const {
1016 const intptr_t kNumInputs = 1; 1016 const intptr_t kNumInputs = 1;
1017 return LocationSummary::Make(kNumInputs, 1017 return LocationSummary::Make(kNumInputs,
1018 Location::RequiresRegister(), 1018 Location::RequiresRegister(),
1019 LocationSummary::kNoCall); 1019 LocationSummary::kNoCall);
1020 } 1020 }
1021 1021
1022 1022
1023 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1023 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1024 Register object = locs()->in(0).reg(); 1024 const Register object = locs()->in(0).reg();
1025 Register result = locs()->out(0).reg(); 1025 const Register result = locs()->out(0).reg();
1026 __ LoadFromOffset(kWord, result, object, offset() - kHeapObjectTag); 1026 __ LoadFromOffset(kWord, result, object, offset() - kHeapObjectTag);
1027 } 1027 }
1028 1028
1029 1029
1030 LocationSummary* LoadClassIdInstr::MakeLocationSummary(bool opt) const { 1030 LocationSummary* LoadClassIdInstr::MakeLocationSummary(bool opt) const {
1031 const intptr_t kNumInputs = 1; 1031 const intptr_t kNumInputs = 1;
1032 return LocationSummary::Make(kNumInputs, 1032 return LocationSummary::Make(kNumInputs,
1033 Location::RequiresRegister(), 1033 Location::RequiresRegister(),
1034 LocationSummary::kNoCall); 1034 LocationSummary::kNoCall);
1035 } 1035 }
1036 1036
1037 1037
1038 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1038 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1039 Register object = locs()->in(0).reg(); 1039 const Register object = locs()->in(0).reg();
1040 Register result = locs()->out(0).reg(); 1040 const Register result = locs()->out(0).reg();
1041 Label load, done; 1041 Label load, done;
1042 __ tst(object, ShifterOperand(kSmiTagMask)); 1042 __ tst(object, ShifterOperand(kSmiTagMask));
1043 __ b(&load, NE); 1043 __ b(&load, NE);
1044 __ LoadImmediate(result, Smi::RawValue(kSmiCid)); 1044 __ LoadImmediate(result, Smi::RawValue(kSmiCid));
1045 __ b(&done); 1045 __ b(&done);
1046 __ Bind(&load); 1046 __ Bind(&load);
1047 __ LoadClassId(result, object); 1047 __ LoadClassId(result, object);
1048 __ SmiTag(result); 1048 __ SmiTag(result);
1049 __ Bind(&done); 1049 __ Bind(&done);
1050 } 1050 }
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
1159 return locs; 1159 return locs;
1160 } 1160 }
1161 1161
1162 1162
1163 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1163 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1164 if ((representation() == kUnboxedDouble) || 1164 if ((representation() == kUnboxedDouble) ||
1165 (representation() == kUnboxedMint) || 1165 (representation() == kUnboxedMint) ||
1166 (representation() == kUnboxedFloat32x4) || 1166 (representation() == kUnboxedFloat32x4) ||
1167 (representation() == kUnboxedInt32x4) || 1167 (representation() == kUnboxedInt32x4) ||
1168 (representation() == kUnboxedFloat64x2)) { 1168 (representation() == kUnboxedFloat64x2)) {
1169 Register array = locs()->in(0).reg(); 1169 const Register array = locs()->in(0).reg();
1170 Register idx = locs()->in(1).reg(); 1170 const Register idx = locs()->in(1).reg();
1171 switch (index_scale()) { 1171 switch (index_scale()) {
1172 case 1: 1172 case 1:
1173 __ add(idx, array, ShifterOperand(idx, ASR, kSmiTagSize)); 1173 __ add(idx, array, ShifterOperand(idx, ASR, kSmiTagSize));
1174 break; 1174 break;
1175 case 4: 1175 case 4:
1176 __ add(idx, array, ShifterOperand(idx, LSL, 1)); 1176 __ add(idx, array, ShifterOperand(idx, LSL, 1));
1177 break; 1177 break;
1178 case 8: 1178 case 8:
1179 __ add(idx, array, ShifterOperand(idx, LSL, 2)); 1179 __ add(idx, array, ShifterOperand(idx, LSL, 2));
1180 break; 1180 break;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1222 break; 1222 break;
1223 case kTypedDataFloat64x2ArrayCid: 1223 case kTypedDataFloat64x2ArrayCid:
1224 case kTypedDataInt32x4ArrayCid: 1224 case kTypedDataInt32x4ArrayCid:
1225 case kTypedDataFloat32x4ArrayCid: 1225 case kTypedDataFloat32x4ArrayCid:
1226 __ vldmd(IA, idx, dresult0, 2); 1226 __ vldmd(IA, idx, dresult0, 2);
1227 break; 1227 break;
1228 } 1228 }
1229 return; 1229 return;
1230 } 1230 }
1231 1231
1232 Register array = locs()->in(0).reg(); 1232 const Register array = locs()->in(0).reg();
1233 Location index = locs()->in(1); 1233 Location index = locs()->in(1);
1234 ASSERT(index.IsRegister()); // TODO(regis): Revisit. 1234 ASSERT(index.IsRegister()); // TODO(regis): Revisit.
1235 Address element_address(kNoRegister, 0); 1235 Address element_address(kNoRegister, 0);
1236 // Note that index is expected smi-tagged, (i.e, times 2) for all arrays 1236 // Note that index is expected smi-tagged, (i.e, times 2) for all arrays
1237 // with index scale factor > 1. E.g., for Uint8Array and OneByteString the 1237 // with index scale factor > 1. E.g., for Uint8Array and OneByteString the
1238 // index is expected to be untagged before accessing. 1238 // index is expected to be untagged before accessing.
1239 ASSERT(kSmiTagShift == 1); 1239 ASSERT(kSmiTagShift == 1);
1240 const intptr_t offset = IsExternal() 1240 const intptr_t offset = IsExternal()
1241 ? 0 1241 ? 0
1242 : FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag; 1242 : FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag;
(...skipping 17 matching lines...) Expand all
1260 case 4: { 1260 case 4: {
1261 __ add(index.reg(), array, ShifterOperand(index.reg(), LSL, 1)); 1261 __ add(index.reg(), array, ShifterOperand(index.reg(), LSL, 1));
1262 element_address = Address(index.reg(), offset); 1262 element_address = Address(index.reg(), offset);
1263 break; 1263 break;
1264 } 1264 }
1265 // Cases 8 and 16 are only for unboxed values and are handled above. 1265 // Cases 8 and 16 are only for unboxed values and are handled above.
1266 default: 1266 default:
1267 UNREACHABLE(); 1267 UNREACHABLE();
1268 } 1268 }
1269 1269
1270 Register result = locs()->out(0).reg(); 1270 const Register result = locs()->out(0).reg();
1271 switch (class_id()) { 1271 switch (class_id()) {
1272 case kTypedDataInt8ArrayCid: 1272 case kTypedDataInt8ArrayCid:
1273 ASSERT(index_scale() == 1); 1273 ASSERT(index_scale() == 1);
1274 __ ldrsb(result, element_address); 1274 __ ldrsb(result, element_address);
1275 __ SmiTag(result); 1275 __ SmiTag(result);
1276 break; 1276 break;
1277 case kTypedDataUint8ArrayCid: 1277 case kTypedDataUint8ArrayCid:
1278 case kTypedDataUint8ClampedArrayCid: 1278 case kTypedDataUint8ClampedArrayCid:
1279 case kExternalTypedDataUint8ArrayCid: 1279 case kExternalTypedDataUint8ArrayCid:
1280 case kExternalTypedDataUint8ClampedArrayCid: 1280 case kExternalTypedDataUint8ClampedArrayCid:
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
1407 return locs; 1407 return locs;
1408 } 1408 }
1409 1409
1410 1410
1411 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1411 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1412 if ((class_id() == kTypedDataFloat32ArrayCid) || 1412 if ((class_id() == kTypedDataFloat32ArrayCid) ||
1413 (class_id() == kTypedDataFloat64ArrayCid) || 1413 (class_id() == kTypedDataFloat64ArrayCid) ||
1414 (class_id() == kTypedDataFloat32x4ArrayCid) || 1414 (class_id() == kTypedDataFloat32x4ArrayCid) ||
1415 (class_id() == kTypedDataFloat64x2ArrayCid) || 1415 (class_id() == kTypedDataFloat64x2ArrayCid) ||
1416 (class_id() == kTypedDataInt32x4ArrayCid)) { 1416 (class_id() == kTypedDataInt32x4ArrayCid)) {
1417 Register array = locs()->in(0).reg(); 1417 const Register array = locs()->in(0).reg();
1418 Register idx = locs()->in(1).reg(); 1418 const Register idx = locs()->in(1).reg();
1419 Location value = locs()->in(2); 1419 Location value = locs()->in(2);
1420 switch (index_scale()) { 1420 switch (index_scale()) {
1421 case 1: 1421 case 1:
1422 __ add(idx, array, ShifterOperand(idx, ASR, kSmiTagSize)); 1422 __ add(idx, array, ShifterOperand(idx, ASR, kSmiTagSize));
1423 break; 1423 break;
1424 case 4: 1424 case 4:
1425 __ add(idx, array, ShifterOperand(idx, LSL, 1)); 1425 __ add(idx, array, ShifterOperand(idx, LSL, 1));
1426 break; 1426 break;
1427 case 8: 1427 case 8:
1428 __ add(idx, array, ShifterOperand(idx, LSL, 2)); 1428 __ add(idx, array, ShifterOperand(idx, LSL, 2));
1429 break; 1429 break;
1430 case 16: 1430 case 16:
1431 __ add(idx, array, ShifterOperand(idx, LSL, 3)); 1431 __ add(idx, array, ShifterOperand(idx, LSL, 3));
1432 break; 1432 break;
1433 default: 1433 default:
1434 // Case 2 is not reachable: We don't have unboxed 16-bit sized loads. 1434 // Case 2 is not reachable: We don't have unboxed 16-bit sized loads.
1435 UNREACHABLE(); 1435 UNREACHABLE();
1436 } 1436 }
1437 if (!IsExternal()) { 1437 if (!IsExternal()) {
1438 ASSERT(this->array()->definition()->representation() == kTagged); 1438 ASSERT(this->array()->definition()->representation() == kTagged);
1439 __ AddImmediate(idx, 1439 __ AddImmediate(idx,
1440 FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag); 1440 FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag);
1441 } 1441 }
1442 switch (class_id()) { 1442 switch (class_id()) {
1443 case kTypedDataFloat32ArrayCid: { 1443 case kTypedDataFloat32ArrayCid: {
1444 SRegister value_reg = 1444 const SRegister value_reg =
1445 EvenSRegisterOf(EvenDRegisterOf(value.fpu_reg())); 1445 EvenSRegisterOf(EvenDRegisterOf(value.fpu_reg()));
1446 __ StoreSToOffset(value_reg, idx, 0); 1446 __ StoreSToOffset(value_reg, idx, 0);
1447 break; 1447 break;
1448 } 1448 }
1449 case kTypedDataFloat64ArrayCid: { 1449 case kTypedDataFloat64ArrayCid: {
1450 DRegister value_reg = EvenDRegisterOf(value.fpu_reg()); 1450 const DRegister value_reg = EvenDRegisterOf(value.fpu_reg());
1451 __ StoreDToOffset(value_reg, idx, 0); 1451 __ StoreDToOffset(value_reg, idx, 0);
1452 break; 1452 break;
1453 } 1453 }
1454 case kTypedDataFloat64x2ArrayCid: 1454 case kTypedDataFloat64x2ArrayCid:
1455 case kTypedDataInt32x4ArrayCid: 1455 case kTypedDataInt32x4ArrayCid:
1456 case kTypedDataFloat32x4ArrayCid: { 1456 case kTypedDataFloat32x4ArrayCid: {
1457 const DRegister value_reg = EvenDRegisterOf(value.fpu_reg()); 1457 const DRegister value_reg = EvenDRegisterOf(value.fpu_reg());
1458 __ vstmd(IA, idx, value_reg, 2); 1458 __ vstmd(IA, idx, value_reg, 2);
1459 break; 1459 break;
1460 } 1460 }
1461 default: 1461 default:
1462 UNREACHABLE(); 1462 UNREACHABLE();
1463 } 1463 }
1464 return; 1464 return;
1465 } 1465 }
1466 1466
1467 Register array = locs()->in(0).reg(); 1467 const Register array = locs()->in(0).reg();
1468 Location index = locs()->in(1); 1468 Location index = locs()->in(1);
1469 1469
1470 Address element_address(kNoRegister, 0); 1470 Address element_address(kNoRegister, 0);
1471 ASSERT(index.IsRegister()); // TODO(regis): Revisit. 1471 ASSERT(index.IsRegister()); // TODO(regis): Revisit.
1472 // Note that index is expected smi-tagged, (i.e, times 2) for all arrays 1472 // Note that index is expected smi-tagged, (i.e, times 2) for all arrays
1473 // with index scale factor > 1. E.g., for Uint8Array and OneByteString the 1473 // with index scale factor > 1. E.g., for Uint8Array and OneByteString the
1474 // index is expected to be untagged before accessing. 1474 // index is expected to be untagged before accessing.
1475 ASSERT(kSmiTagShift == 1); 1475 ASSERT(kSmiTagShift == 1);
1476 const intptr_t offset = IsExternal() 1476 const intptr_t offset = IsExternal()
1477 ? 0 1477 ? 0
(...skipping 20 matching lines...) Expand all
1498 break; 1498 break;
1499 } 1499 }
1500 // Cases 8 and 16 are only for unboxed values and are handled above. 1500 // Cases 8 and 16 are only for unboxed values and are handled above.
1501 default: 1501 default:
1502 UNREACHABLE(); 1502 UNREACHABLE();
1503 } 1503 }
1504 1504
1505 switch (class_id()) { 1505 switch (class_id()) {
1506 case kArrayCid: 1506 case kArrayCid:
1507 if (ShouldEmitStoreBarrier()) { 1507 if (ShouldEmitStoreBarrier()) {
1508 Register value = locs()->in(2).reg(); 1508 const Register value = locs()->in(2).reg();
1509 __ StoreIntoObject(array, element_address, value); 1509 __ StoreIntoObject(array, element_address, value);
1510 } else if (locs()->in(2).IsConstant()) { 1510 } else if (locs()->in(2).IsConstant()) {
1511 const Object& constant = locs()->in(2).constant(); 1511 const Object& constant = locs()->in(2).constant();
1512 __ StoreIntoObjectNoBarrier(array, element_address, constant); 1512 __ StoreIntoObjectNoBarrier(array, element_address, constant);
1513 } else { 1513 } else {
1514 Register value = locs()->in(2).reg(); 1514 const Register value = locs()->in(2).reg();
1515 __ StoreIntoObjectNoBarrier(array, element_address, value); 1515 __ StoreIntoObjectNoBarrier(array, element_address, value);
1516 } 1516 }
1517 break; 1517 break;
1518 case kTypedDataInt8ArrayCid: 1518 case kTypedDataInt8ArrayCid:
1519 case kTypedDataUint8ArrayCid: 1519 case kTypedDataUint8ArrayCid:
1520 case kExternalTypedDataUint8ArrayCid: 1520 case kExternalTypedDataUint8ArrayCid:
1521 case kOneByteStringCid: { 1521 case kOneByteStringCid: {
1522 if (locs()->in(2).IsConstant()) { 1522 if (locs()->in(2).IsConstant()) {
1523 const Smi& constant = Smi::Cast(locs()->in(2).constant()); 1523 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1524 __ LoadImmediate(IP, static_cast<int8_t>(constant.Value())); 1524 __ LoadImmediate(IP, static_cast<int8_t>(constant.Value()));
1525 __ strb(IP, element_address); 1525 __ strb(IP, element_address);
1526 } else { 1526 } else {
1527 Register value = locs()->in(2).reg(); 1527 const Register value = locs()->in(2).reg();
1528 __ SmiUntag(value); 1528 __ SmiUntag(value);
1529 __ strb(value, element_address); 1529 __ strb(value, element_address);
1530 } 1530 }
1531 break; 1531 break;
1532 } 1532 }
1533 case kTypedDataUint8ClampedArrayCid: 1533 case kTypedDataUint8ClampedArrayCid:
1534 case kExternalTypedDataUint8ClampedArrayCid: { 1534 case kExternalTypedDataUint8ClampedArrayCid: {
1535 if (locs()->in(2).IsConstant()) { 1535 if (locs()->in(2).IsConstant()) {
1536 const Smi& constant = Smi::Cast(locs()->in(2).constant()); 1536 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1537 intptr_t value = constant.Value(); 1537 intptr_t value = constant.Value();
1538 // Clamp to 0x0 or 0xFF respectively. 1538 // Clamp to 0x0 or 0xFF respectively.
1539 if (value > 0xFF) { 1539 if (value > 0xFF) {
1540 value = 0xFF; 1540 value = 0xFF;
1541 } else if (value < 0) { 1541 } else if (value < 0) {
1542 value = 0; 1542 value = 0;
1543 } 1543 }
1544 __ LoadImmediate(IP, static_cast<int8_t>(value)); 1544 __ LoadImmediate(IP, static_cast<int8_t>(value));
1545 __ strb(IP, element_address); 1545 __ strb(IP, element_address);
1546 } else { 1546 } else {
1547 Register value = locs()->in(2).reg(); 1547 const Register value = locs()->in(2).reg();
1548 Label store_value; 1548 Label store_value;
1549 __ SmiUntag(value); 1549 __ SmiUntag(value);
1550 __ cmp(value, ShifterOperand(0xFF)); 1550 __ cmp(value, ShifterOperand(0xFF));
1551 // Clamp to 0x00 or 0xFF respectively. 1551 // Clamp to 0x00 or 0xFF respectively.
1552 __ b(&store_value, LS); 1552 __ b(&store_value, LS);
1553 __ mov(value, ShifterOperand(0x00), LE); 1553 __ mov(value, ShifterOperand(0x00), LE);
1554 __ mov(value, ShifterOperand(0xFF), GT); 1554 __ mov(value, ShifterOperand(0xFF), GT);
1555 __ Bind(&store_value); 1555 __ Bind(&store_value);
1556 __ strb(value, element_address); 1556 __ strb(value, element_address);
1557 } 1557 }
1558 break; 1558 break;
1559 } 1559 }
1560 case kTypedDataInt16ArrayCid: 1560 case kTypedDataInt16ArrayCid:
1561 case kTypedDataUint16ArrayCid: { 1561 case kTypedDataUint16ArrayCid: {
1562 Register value = locs()->in(2).reg(); 1562 const Register value = locs()->in(2).reg();
1563 __ SmiUntag(value); 1563 __ SmiUntag(value);
1564 __ strh(value, element_address); 1564 __ strh(value, element_address);
1565 break; 1565 break;
1566 } 1566 }
1567 case kTypedDataInt32ArrayCid: 1567 case kTypedDataInt32ArrayCid:
1568 case kTypedDataUint32ArrayCid: { 1568 case kTypedDataUint32ArrayCid: {
1569 if (value()->IsSmiValue()) { 1569 if (value()->IsSmiValue()) {
1570 ASSERT(RequiredInputRepresentation(2) == kTagged); 1570 ASSERT(RequiredInputRepresentation(2) == kTagged);
1571 Register value = locs()->in(2).reg(); 1571 const Register value = locs()->in(2).reg();
1572 __ SmiUntag(value); 1572 __ SmiUntag(value);
1573 __ str(value, element_address); 1573 __ str(value, element_address);
1574 } else { 1574 } else {
1575 ASSERT(RequiredInputRepresentation(2) == kUnboxedMint); 1575 ASSERT(RequiredInputRepresentation(2) == kUnboxedMint);
1576 QRegister value = locs()->in(2).fpu_reg(); 1576 const QRegister value = locs()->in(2).fpu_reg();
1577 ASSERT(value == Q7); 1577 ASSERT(value == Q7);
1578 __ vmovrs(TMP, EvenSRegisterOf(EvenDRegisterOf(value))); 1578 __ vmovrs(TMP, EvenSRegisterOf(EvenDRegisterOf(value)));
1579 __ str(TMP, element_address); 1579 __ str(TMP, element_address);
1580 } 1580 }
1581 break; 1581 break;
1582 } 1582 }
1583 default: 1583 default:
1584 UNREACHABLE(); 1584 UNREACHABLE();
1585 } 1585 }
1586 } 1586 }
(...skipping 28 matching lines...) Expand all
1615 ASSERT(field().is_final()); 1615 ASSERT(field().is_final());
1616 } 1616 }
1617 1617
1618 if (field_cid == kDynamicCid) { 1618 if (field_cid == kDynamicCid) {
1619 ASSERT(!compiler->is_optimizing()); 1619 ASSERT(!compiler->is_optimizing());
1620 return; // Nothing to emit. 1620 return; // Nothing to emit.
1621 } 1621 }
1622 1622
1623 const intptr_t value_cid = value()->Type()->ToCid(); 1623 const intptr_t value_cid = value()->Type()->ToCid();
1624 1624
1625 Register value_reg = locs()->in(0).reg(); 1625 const Register value_reg = locs()->in(0).reg();
1626 1626
1627 Register value_cid_reg = locs()->temp(0).reg(); 1627 const Register value_cid_reg = locs()->temp(0).reg();
1628 1628
1629 Register temp_reg = locs()->temp(1).reg(); 1629 const Register temp_reg = locs()->temp(1).reg();
1630 1630
1631 Register field_reg = needs_field_temp_reg ? 1631 Register field_reg = needs_field_temp_reg ?
1632 locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister; 1632 locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister;
1633 1633
1634 Label ok, fail_label; 1634 Label ok, fail_label;
1635 1635
1636 Label* deopt = compiler->is_optimizing() ? 1636 Label* deopt = compiler->is_optimizing() ?
1637 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL; 1637 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL;
1638 1638
1639 Label* fail = (deopt != NULL) ? deopt : &fail_label; 1639 Label* fail = (deopt != NULL) ? deopt : &fail_label;
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
1968 ? Location::WritableRegister() 1968 ? Location::WritableRegister()
1969 : Location::RegisterOrConstant(value())); 1969 : Location::RegisterOrConstant(value()));
1970 } 1970 }
1971 return summary; 1971 return summary;
1972 } 1972 }
1973 1973
1974 1974
1975 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1975 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1976 Label skip_store; 1976 Label skip_store;
1977 1977
1978 Register instance_reg = locs()->in(0).reg(); 1978 const Register instance_reg = locs()->in(0).reg();
1979 1979
1980 if (IsUnboxedStore() && compiler->is_optimizing()) { 1980 if (IsUnboxedStore() && compiler->is_optimizing()) {
1981 const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg()); 1981 const DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg());
1982 const Register temp = locs()->temp(0).reg(); 1982 const Register temp = locs()->temp(0).reg();
1983 const Register temp2 = locs()->temp(1).reg(); 1983 const Register temp2 = locs()->temp(1).reg();
1984 const intptr_t cid = field().UnboxedFieldCid(); 1984 const intptr_t cid = field().UnboxedFieldCid();
1985 1985
1986 if (is_initialization_) { 1986 if (is_initialization_) {
1987 const Class* cls = NULL; 1987 const Class* cls = NULL;
1988 switch (cid) { 1988 switch (cid) {
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
2153 temp2); 2153 temp2);
2154 __ Bind(&copy_float64x2); 2154 __ Bind(&copy_float64x2);
2155 __ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp); 2155 __ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp);
2156 __ b(&skip_store); 2156 __ b(&skip_store);
2157 } 2157 }
2158 2158
2159 __ Bind(&store_pointer); 2159 __ Bind(&store_pointer);
2160 } 2160 }
2161 2161
2162 if (ShouldEmitStoreBarrier()) { 2162 if (ShouldEmitStoreBarrier()) {
2163 Register value_reg = locs()->in(1).reg(); 2163 const Register value_reg = locs()->in(1).reg();
2164 __ StoreIntoObject(instance_reg, 2164 __ StoreIntoObject(instance_reg,
2165 FieldAddress(instance_reg, offset_in_bytes_), 2165 FieldAddress(instance_reg, offset_in_bytes_),
2166 value_reg, 2166 value_reg,
2167 CanValueBeSmi()); 2167 CanValueBeSmi());
2168 } else { 2168 } else {
2169 if (locs()->in(1).IsConstant()) { 2169 if (locs()->in(1).IsConstant()) {
2170 __ StoreIntoObjectNoBarrier( 2170 __ StoreIntoObjectNoBarrier(
2171 instance_reg, 2171 instance_reg,
2172 FieldAddress(instance_reg, offset_in_bytes_), 2172 FieldAddress(instance_reg, offset_in_bytes_),
2173 locs()->in(1).constant()); 2173 locs()->in(1).constant());
2174 } else { 2174 } else {
2175 Register value_reg = locs()->in(1).reg(); 2175 const Register value_reg = locs()->in(1).reg();
2176 __ StoreIntoObjectNoBarrier(instance_reg, 2176 __ StoreIntoObjectNoBarrier(instance_reg,
2177 FieldAddress(instance_reg, offset_in_bytes_), value_reg); 2177 FieldAddress(instance_reg, offset_in_bytes_), value_reg);
2178 } 2178 }
2179 } 2179 }
2180 __ Bind(&skip_store); 2180 __ Bind(&skip_store);
2181 } 2181 }
2182 2182
2183 2183
2184 LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(bool opt) const { 2184 LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(bool opt) const {
2185 const intptr_t kNumInputs = 1; 2185 const intptr_t kNumInputs = 1;
2186 const intptr_t kNumTemps = 0; 2186 const intptr_t kNumTemps = 0;
2187 LocationSummary* summary = 2187 LocationSummary* summary =
2188 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 2188 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
2189 summary->set_in(0, Location::RequiresRegister()); 2189 summary->set_in(0, Location::RequiresRegister());
2190 summary->set_out(0, Location::RequiresRegister()); 2190 summary->set_out(0, Location::RequiresRegister());
2191 return summary; 2191 return summary;
2192 } 2192 }
2193 2193
2194 2194
2195 // When the parser is building an implicit static getter for optimization, 2195 // When the parser is building an implicit static getter for optimization,
2196 // it can generate a function body where deoptimization ids do not line up 2196 // it can generate a function body where deoptimization ids do not line up
2197 // with the unoptimized code. 2197 // with the unoptimized code.
2198 // 2198 //
2199 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize. 2199 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
2200 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2200 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2201 Register field = locs()->in(0).reg(); 2201 const Register field = locs()->in(0).reg();
2202 Register result = locs()->out(0).reg(); 2202 const Register result = locs()->out(0).reg();
2203 __ LoadFromOffset(kWord, result, 2203 __ LoadFromOffset(kWord, result,
2204 field, Field::value_offset() - kHeapObjectTag); 2204 field, Field::value_offset() - kHeapObjectTag);
2205 } 2205 }
2206 2206
2207 2207
2208 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(bool opt) const { 2208 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(bool opt) const {
2209 LocationSummary* locs = new LocationSummary(1, 1, LocationSummary::kNoCall); 2209 LocationSummary* locs = new LocationSummary(1, 1, LocationSummary::kNoCall);
2210 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() 2210 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
2211 : Location::RequiresRegister()); 2211 : Location::RequiresRegister());
2212 locs->set_temp(0, Location::RequiresRegister()); 2212 locs->set_temp(0, Location::RequiresRegister());
2213 return locs; 2213 return locs;
2214 } 2214 }
2215 2215
2216 2216
2217 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2217 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2218 Register value = locs()->in(0).reg(); 2218 const Register value = locs()->in(0).reg();
2219 Register temp = locs()->temp(0).reg(); 2219 const Register temp = locs()->temp(0).reg();
2220 2220
2221 __ LoadObject(temp, field()); 2221 __ LoadObject(temp, field());
2222 if (this->value()->NeedsStoreBuffer()) { 2222 if (this->value()->NeedsStoreBuffer()) {
2223 __ StoreIntoObject(temp, 2223 __ StoreIntoObject(temp,
2224 FieldAddress(temp, Field::value_offset()), value, CanValueBeSmi()); 2224 FieldAddress(temp, Field::value_offset()), value, CanValueBeSmi());
2225 } else { 2225 } else {
2226 __ StoreIntoObjectNoBarrier( 2226 __ StoreIntoObjectNoBarrier(
2227 temp, FieldAddress(temp, Field::value_offset()), value); 2227 temp, FieldAddress(temp, Field::value_offset()), value);
2228 } 2228 }
2229 } 2229 }
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
2524 __ LoadMultipleDFromOffset(result, 2, temp, 2524 __ LoadMultipleDFromOffset(result, 2, temp,
2525 Float64x2::value_offset() - kHeapObjectTag); 2525 Float64x2::value_offset() - kHeapObjectTag);
2526 break; 2526 break;
2527 default: 2527 default:
2528 UNREACHABLE(); 2528 UNREACHABLE();
2529 } 2529 }
2530 return; 2530 return;
2531 } 2531 }
2532 2532
2533 Label done; 2533 Label done;
2534 Register result_reg = locs()->out(0).reg(); 2534 const Register result_reg = locs()->out(0).reg();
2535 if (IsPotentialUnboxedLoad()) { 2535 if (IsPotentialUnboxedLoad()) {
2536 const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg()); 2536 const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
2537 const Register temp = locs()->temp(1).reg(); 2537 const Register temp = locs()->temp(1).reg();
2538 const Register temp2 = locs()->temp(2).reg(); 2538 const Register temp2 = locs()->temp(2).reg();
2539 2539
2540 Label load_pointer; 2540 Label load_pointer;
2541 Label load_double; 2541 Label load_double;
2542 Label load_float32x4; 2542 Label load_float32x4;
2543 Label load_float64x2; 2543 Label load_float64x2;
2544 2544
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
2629 const intptr_t kNumTemps = 0; 2629 const intptr_t kNumTemps = 0;
2630 LocationSummary* locs = 2630 LocationSummary* locs =
2631 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2631 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2632 locs->set_in(0, Location::RegisterLocation(R0)); 2632 locs->set_in(0, Location::RegisterLocation(R0));
2633 locs->set_out(0, Location::RegisterLocation(R0)); 2633 locs->set_out(0, Location::RegisterLocation(R0));
2634 return locs; 2634 return locs;
2635 } 2635 }
2636 2636
2637 2637
2638 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2638 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2639 Register instantiator_reg = locs()->in(0).reg(); 2639 const Register instantiator_reg = locs()->in(0).reg();
2640 Register result_reg = locs()->out(0).reg(); 2640 const Register result_reg = locs()->out(0).reg();
2641 2641
2642 // 'instantiator_reg' is the instantiator TypeArguments object (or null). 2642 // 'instantiator_reg' is the instantiator TypeArguments object (or null).
2643 // A runtime call to instantiate the type is required. 2643 // A runtime call to instantiate the type is required.
2644 __ PushObject(Object::ZoneHandle()); // Make room for the result. 2644 __ PushObject(Object::ZoneHandle()); // Make room for the result.
2645 __ PushObject(type()); 2645 __ PushObject(type());
2646 __ Push(instantiator_reg); // Push instantiator type arguments. 2646 __ Push(instantiator_reg); // Push instantiator type arguments.
2647 compiler->GenerateRuntimeCall(token_pos(), 2647 compiler->GenerateRuntimeCall(token_pos(),
2648 deopt_id(), 2648 deopt_id(),
2649 kInstantiateTypeRuntimeEntry, 2649 kInstantiateTypeRuntimeEntry,
2650 2, 2650 2,
(...skipping 11 matching lines...) Expand all
2662 LocationSummary* locs = 2662 LocationSummary* locs =
2663 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2663 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2664 locs->set_in(0, Location::RegisterLocation(R0)); 2664 locs->set_in(0, Location::RegisterLocation(R0));
2665 locs->set_out(0, Location::RegisterLocation(R0)); 2665 locs->set_out(0, Location::RegisterLocation(R0));
2666 return locs; 2666 return locs;
2667 } 2667 }
2668 2668
2669 2669
2670 void InstantiateTypeArgumentsInstr::EmitNativeCode( 2670 void InstantiateTypeArgumentsInstr::EmitNativeCode(
2671 FlowGraphCompiler* compiler) { 2671 FlowGraphCompiler* compiler) {
2672 Register instantiator_reg = locs()->in(0).reg(); 2672 const Register instantiator_reg = locs()->in(0).reg();
2673 Register result_reg = locs()->out(0).reg(); 2673 const Register result_reg = locs()->out(0).reg();
2674 ASSERT(instantiator_reg == R0); 2674 ASSERT(instantiator_reg == R0);
2675 ASSERT(instantiator_reg == result_reg); 2675 ASSERT(instantiator_reg == result_reg);
2676 2676
2677 // 'instantiator_reg' is the instantiator TypeArguments object (or null). 2677 // 'instantiator_reg' is the instantiator TypeArguments object (or null).
2678 ASSERT(!type_arguments().IsUninstantiatedIdentity() && 2678 ASSERT(!type_arguments().IsUninstantiatedIdentity() &&
2679 !type_arguments().CanShareInstantiatorTypeArguments( 2679 !type_arguments().CanShareInstantiatorTypeArguments(
2680 instantiator_class())); 2680 instantiator_class()));
2681 // If the instantiator is null and if the type argument vector 2681 // If the instantiator is null and if the type argument vector
2682 // instantiated from null becomes a vector of dynamic, then use null as 2682 // instantiated from null becomes a vector of dynamic, then use null as
2683 // the type arguments. 2683 // the type arguments.
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
2754 const intptr_t kNumTemps = 0; 2754 const intptr_t kNumTemps = 0;
2755 LocationSummary* locs = 2755 LocationSummary* locs =
2756 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2756 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2757 locs->set_in(0, Location::RegisterLocation(R0)); 2757 locs->set_in(0, Location::RegisterLocation(R0));
2758 locs->set_out(0, Location::RegisterLocation(R0)); 2758 locs->set_out(0, Location::RegisterLocation(R0));
2759 return locs; 2759 return locs;
2760 } 2760 }
2761 2761
2762 2762
2763 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2763 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2764 Register context_value = locs()->in(0).reg(); 2764 const Register context_value = locs()->in(0).reg();
2765 Register result = locs()->out(0).reg(); 2765 const Register result = locs()->out(0).reg();
2766 2766
2767 __ PushObject(Object::ZoneHandle()); // Make room for the result. 2767 __ PushObject(Object::ZoneHandle()); // Make room for the result.
2768 __ Push(context_value); 2768 __ Push(context_value);
2769 compiler->GenerateRuntimeCall(token_pos(), 2769 compiler->GenerateRuntimeCall(token_pos(),
2770 deopt_id(), 2770 deopt_id(),
2771 kCloneContextRuntimeEntry, 2771 kCloneContextRuntimeEntry,
2772 1, 2772 1,
2773 locs()); 2773 locs());
2774 __ Drop(1); // Remove argument. 2774 __ Drop(1); // Remove argument.
2775 __ Pop(result); // Get result (cloned context). 2775 __ Pop(result); // Get result (cloned context).
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
2826 2826
2827 2827
2828 class CheckStackOverflowSlowPath : public SlowPathCode { 2828 class CheckStackOverflowSlowPath : public SlowPathCode {
2829 public: 2829 public:
2830 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) 2830 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2831 : instruction_(instruction) { } 2831 : instruction_(instruction) { }
2832 2832
2833 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 2833 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2834 if (FLAG_use_osr) { 2834 if (FLAG_use_osr) {
2835 uword flags_address = Isolate::Current()->stack_overflow_flags_address(); 2835 uword flags_address = Isolate::Current()->stack_overflow_flags_address();
2836 Register value = instruction_->locs()->temp(0).reg(); 2836 const Register value = instruction_->locs()->temp(0).reg();
2837 __ Comment("CheckStackOverflowSlowPathOsr"); 2837 __ Comment("CheckStackOverflowSlowPathOsr");
2838 __ Bind(osr_entry_label()); 2838 __ Bind(osr_entry_label());
2839 __ LoadImmediate(IP, flags_address); 2839 __ LoadImmediate(IP, flags_address);
2840 __ LoadImmediate(value, Isolate::kOsrRequest); 2840 __ LoadImmediate(value, Isolate::kOsrRequest);
2841 __ str(value, Address(IP)); 2841 __ str(value, Address(IP));
2842 } 2842 }
2843 __ Comment("CheckStackOverflowSlowPath"); 2843 __ Comment("CheckStackOverflowSlowPath");
2844 __ Bind(entry_label()); 2844 __ Bind(entry_label());
2845 compiler->SaveLiveRegisters(instruction_->locs()); 2845 compiler->SaveLiveRegisters(instruction_->locs());
2846 // pending_deoptimization_env_ is needed to generate a runtime call that 2846 // pending_deoptimization_env_ is needed to generate a runtime call that
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2878 2878
2879 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2879 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2880 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); 2880 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
2881 compiler->AddSlowPathCode(slow_path); 2881 compiler->AddSlowPathCode(slow_path);
2882 2882
2883 __ LoadImmediate(IP, Isolate::Current()->stack_limit_address()); 2883 __ LoadImmediate(IP, Isolate::Current()->stack_limit_address());
2884 __ ldr(IP, Address(IP)); 2884 __ ldr(IP, Address(IP));
2885 __ cmp(SP, ShifterOperand(IP)); 2885 __ cmp(SP, ShifterOperand(IP));
2886 __ b(slow_path->entry_label(), LS); 2886 __ b(slow_path->entry_label(), LS);
2887 if (compiler->CanOSRFunction() && in_loop()) { 2887 if (compiler->CanOSRFunction() && in_loop()) {
2888 Register temp = locs()->temp(0).reg(); 2888 const Register temp = locs()->temp(0).reg();
2889 // In unoptimized code check the usage counter to trigger OSR at loop 2889 // In unoptimized code check the usage counter to trigger OSR at loop
2890 // stack checks. Use progressively higher thresholds for more deeply 2890 // stack checks. Use progressively higher thresholds for more deeply
2891 // nested loops to attempt to hit outer loops with OSR when possible. 2891 // nested loops to attempt to hit outer loops with OSR when possible.
2892 __ LoadObject(temp, compiler->parsed_function().function()); 2892 __ LoadObject(temp, compiler->parsed_function().function());
2893 intptr_t threshold = 2893 intptr_t threshold =
2894 FLAG_optimization_counter_threshold * (loop_depth() + 1); 2894 FLAG_optimization_counter_threshold * (loop_depth() + 1);
2895 __ ldr(temp, FieldAddress(temp, Function::usage_counter_offset())); 2895 __ ldr(temp, FieldAddress(temp, Function::usage_counter_offset()));
2896 __ CompareImmediate(temp, threshold); 2896 __ CompareImmediate(temp, threshold);
2897 __ b(slow_path->osr_entry_label(), GE); 2897 __ b(slow_path->osr_entry_label(), GE);
2898 } 2898 }
2899 if (compiler->ForceSlowPathForStackOverflow()) { 2899 if (compiler->ForceSlowPathForStackOverflow()) {
2900 __ b(slow_path->entry_label()); 2900 __ b(slow_path->entry_label());
2901 } 2901 }
2902 __ Bind(slow_path->exit_label()); 2902 __ Bind(slow_path->exit_label());
2903 } 2903 }
2904 2904
2905 2905
2906 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, 2906 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
2907 BinarySmiOpInstr* shift_left) { 2907 BinarySmiOpInstr* shift_left) {
2908 const bool is_truncating = shift_left->is_truncating(); 2908 const bool is_truncating = shift_left->is_truncating();
2909 const LocationSummary& locs = *shift_left->locs(); 2909 const LocationSummary& locs = *shift_left->locs();
2910 Register left = locs.in(0).reg(); 2910 const Register left = locs.in(0).reg();
2911 Register result = locs.out(0).reg(); 2911 const Register result = locs.out(0).reg();
2912 Label* deopt = shift_left->CanDeoptimize() ? 2912 Label* deopt = shift_left->CanDeoptimize() ?
2913 compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp) 2913 compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
2914 : NULL; 2914 : NULL;
2915 if (locs.in(1).IsConstant()) { 2915 if (locs.in(1).IsConstant()) {
2916 const Object& constant = locs.in(1).constant(); 2916 const Object& constant = locs.in(1).constant();
2917 ASSERT(constant.IsSmi()); 2917 ASSERT(constant.IsSmi());
2918 // Immediate shift operation takes 5 bits for the count. 2918 // Immediate shift operation takes 5 bits for the count.
2919 const intptr_t kCountLimit = 0x1F; 2919 const intptr_t kCountLimit = 0x1F;
2920 const intptr_t value = Smi::Cast(constant).Value(); 2920 const intptr_t value = Smi::Cast(constant).Value();
2921 if (value == 0) { 2921 if (value == 0) {
(...skipping 14 matching lines...) Expand all
2936 __ cmp(left, ShifterOperand(IP, ASR, value)); 2936 __ cmp(left, ShifterOperand(IP, ASR, value));
2937 __ b(deopt, NE); // Overflow. 2937 __ b(deopt, NE); // Overflow.
2938 } 2938 }
2939 // Shift for result now we know there is no overflow. 2939 // Shift for result now we know there is no overflow.
2940 __ Lsl(result, left, value); 2940 __ Lsl(result, left, value);
2941 } 2941 }
2942 return; 2942 return;
2943 } 2943 }
2944 2944
2945 // Right (locs.in(1)) is not constant. 2945 // Right (locs.in(1)) is not constant.
2946 Register right = locs.in(1).reg(); 2946 const Register right = locs.in(1).reg();
2947 Range* right_range = shift_left->right()->definition()->range(); 2947 Range* right_range = shift_left->right()->definition()->range();
2948 if (shift_left->left()->BindsToConstant() && !is_truncating) { 2948 if (shift_left->left()->BindsToConstant() && !is_truncating) {
2949 // TODO(srdjan): Implement code below for is_truncating(). 2949 // TODO(srdjan): Implement code below for is_truncating().
2950 // If left is constant, we know the maximal allowed size for right. 2950 // If left is constant, we know the maximal allowed size for right.
2951 const Object& obj = shift_left->left()->BoundConstant(); 2951 const Object& obj = shift_left->left()->BoundConstant();
2952 if (obj.IsSmi()) { 2952 if (obj.IsSmi()) {
2953 const intptr_t left_int = Smi::Cast(obj).Value(); 2953 const intptr_t left_int = Smi::Cast(obj).Value();
2954 if (left_int == 0) { 2954 if (left_int == 0) {
2955 __ cmp(right, ShifterOperand(0)); 2955 __ cmp(right, ShifterOperand(0));
2956 __ b(deopt, MI); 2956 __ b(deopt, MI);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
2998 if (right_needs_check) { 2998 if (right_needs_check) {
2999 ASSERT(shift_left->CanDeoptimize()); 2999 ASSERT(shift_left->CanDeoptimize());
3000 __ cmp(right, 3000 __ cmp(right,
3001 ShifterOperand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits)))); 3001 ShifterOperand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
3002 __ b(deopt, CS); 3002 __ b(deopt, CS);
3003 } 3003 }
3004 // Left is not a constant. 3004 // Left is not a constant.
3005 // Check if count too large for handling it inlined. 3005 // Check if count too large for handling it inlined.
3006 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3006 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3007 // Overflow test (preserve left, right, and IP); 3007 // Overflow test (preserve left, right, and IP);
3008 Register temp = locs.temp(0).reg(); 3008 const Register temp = locs.temp(0).reg();
3009 __ Lsl(temp, left, IP); 3009 __ Lsl(temp, left, IP);
3010 __ cmp(left, ShifterOperand(temp, ASR, IP)); 3010 __ cmp(left, ShifterOperand(temp, ASR, IP));
3011 __ b(deopt, NE); // Overflow. 3011 __ b(deopt, NE); // Overflow.
3012 // Shift for result now we know there is no overflow. 3012 // Shift for result now we know there is no overflow.
3013 __ Lsl(result, left, IP); 3013 __ Lsl(result, left, IP);
3014 } 3014 }
3015 } 3015 }
3016 3016
3017 3017
3018 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(bool opt) const { 3018 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(bool opt) const {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
3060 } 3060 }
3061 3061
3062 3062
3063 void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3063 void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3064 if (op_kind() == Token::kSHL) { 3064 if (op_kind() == Token::kSHL) {
3065 EmitSmiShiftLeft(compiler, this); 3065 EmitSmiShiftLeft(compiler, this);
3066 return; 3066 return;
3067 } 3067 }
3068 3068
3069 ASSERT(!is_truncating()); 3069 ASSERT(!is_truncating());
3070 Register left = locs()->in(0).reg(); 3070 const Register left = locs()->in(0).reg();
3071 Register result = locs()->out(0).reg(); 3071 const Register result = locs()->out(0).reg();
3072 Label* deopt = NULL; 3072 Label* deopt = NULL;
3073 if (CanDeoptimize()) { 3073 if (CanDeoptimize()) {
3074 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); 3074 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3075 } 3075 }
3076 3076
3077 if (locs()->in(1).IsConstant()) { 3077 if (locs()->in(1).IsConstant()) {
3078 const Object& constant = locs()->in(1).constant(); 3078 const Object& constant = locs()->in(1).constant();
3079 ASSERT(constant.IsSmi()); 3079 ASSERT(constant.IsSmi());
3080 const int32_t imm = reinterpret_cast<int32_t>(constant.raw()); 3080 const int32_t imm = reinterpret_cast<int32_t>(constant.raw());
3081 switch (op_kind()) { 3081 switch (op_kind()) {
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
3147 __ b(deopt, EQ); 3147 __ b(deopt, EQ);
3148 __ rsb(result, left, ShifterOperand(0)); 3148 __ rsb(result, left, ShifterOperand(0));
3149 break; 3149 break;
3150 } 3150 }
3151 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); 3151 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value)));
3152 const intptr_t shift_count = 3152 const intptr_t shift_count =
3153 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; 3153 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
3154 ASSERT(kSmiTagSize == 1); 3154 ASSERT(kSmiTagSize == 1);
3155 __ mov(IP, ShifterOperand(left, ASR, 31)); 3155 __ mov(IP, ShifterOperand(left, ASR, 31));
3156 ASSERT(shift_count > 1); // 1, -1 case handled above. 3156 ASSERT(shift_count > 1); // 1, -1 case handled above.
3157 Register temp = locs()->temp(0).reg(); 3157 const Register temp = locs()->temp(0).reg();
3158 __ add(temp, left, ShifterOperand(IP, LSR, 32 - shift_count)); 3158 __ add(temp, left, ShifterOperand(IP, LSR, 32 - shift_count));
3159 ASSERT(shift_count > 0); 3159 ASSERT(shift_count > 0);
3160 __ mov(result, ShifterOperand(temp, ASR, shift_count)); 3160 __ mov(result, ShifterOperand(temp, ASR, shift_count));
3161 if (value < 0) { 3161 if (value < 0) {
3162 __ rsb(result, result, ShifterOperand(0)); 3162 __ rsb(result, result, ShifterOperand(0));
3163 } 3163 }
3164 __ SmiTag(result); 3164 __ SmiTag(result);
3165 break; 3165 break;
3166 } 3166 }
3167 case Token::kBIT_AND: { 3167 case Token::kBIT_AND: {
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3224 break; 3224 break;
3225 } 3225 }
3226 3226
3227 default: 3227 default:
3228 UNREACHABLE(); 3228 UNREACHABLE();
3229 break; 3229 break;
3230 } 3230 }
3231 return; 3231 return;
3232 } 3232 }
3233 3233
3234 Register right = locs()->in(1).reg(); 3234 const Register right = locs()->in(1).reg();
3235 Range* right_range = this->right()->definition()->range(); 3235 Range* right_range = this->right()->definition()->range();
3236 switch (op_kind()) { 3236 switch (op_kind()) {
3237 case Token::kADD: { 3237 case Token::kADD: {
3238 if (deopt == NULL) { 3238 if (deopt == NULL) {
3239 __ add(result, left, ShifterOperand(right)); 3239 __ add(result, left, ShifterOperand(right));
3240 } else { 3240 } else {
3241 __ adds(result, left, ShifterOperand(right)); 3241 __ adds(result, left, ShifterOperand(right));
3242 __ b(deopt, VS); 3242 __ b(deopt, VS);
3243 } 3243 }
3244 break; 3244 break;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
3286 // No overflow check. 3286 // No overflow check.
3287 __ eor(result, left, ShifterOperand(right)); 3287 __ eor(result, left, ShifterOperand(right));
3288 break; 3288 break;
3289 } 3289 }
3290 case Token::kTRUNCDIV: { 3290 case Token::kTRUNCDIV: {
3291 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 3291 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
3292 // Handle divide by zero in runtime. 3292 // Handle divide by zero in runtime.
3293 __ cmp(right, ShifterOperand(0)); 3293 __ cmp(right, ShifterOperand(0));
3294 __ b(deopt, EQ); 3294 __ b(deopt, EQ);
3295 } 3295 }
3296 Register temp = locs()->temp(0).reg(); 3296 const Register temp = locs()->temp(0).reg();
3297 DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3297 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3298 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 3298 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
3299 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3299 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3300 3300
3301 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3301 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3302 3302
3303 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 3303 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3304 // case we cannot tag the result. 3304 // case we cannot tag the result.
3305 __ CompareImmediate(result, 0x40000000); 3305 __ CompareImmediate(result, 0x40000000);
3306 __ b(deopt, EQ); 3306 __ b(deopt, EQ);
3307 __ SmiTag(result); 3307 __ SmiTag(result);
3308 break; 3308 break;
3309 } 3309 }
3310 case Token::kMOD: { 3310 case Token::kMOD: {
3311 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 3311 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
3312 // Handle divide by zero in runtime. 3312 // Handle divide by zero in runtime.
3313 __ cmp(right, ShifterOperand(0)); 3313 __ cmp(right, ShifterOperand(0));
3314 __ b(deopt, EQ); 3314 __ b(deopt, EQ);
3315 } 3315 }
3316 Register temp = locs()->temp(0).reg(); 3316 const Register temp = locs()->temp(0).reg();
3317 DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3317 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3318 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 3318 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
3319 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3319 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3320 3320
3321 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3321 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3322 3322
3323 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3323 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3324 __ mls(result, IP, result, temp); // result <- left - right * result 3324 __ mls(result, IP, result, temp); // result <- left - right * result
3325 __ SmiTag(result); 3325 __ SmiTag(result);
3326 // res = left % right; 3326 // res = left % right;
3327 // if (res < 0) { 3327 // if (res < 0) {
(...skipping 19 matching lines...) Expand all
3347 __ b(deopt, LT); 3347 __ b(deopt, LT);
3348 } 3348 }
3349 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3349 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3350 // sarl operation masks the count to 5 bits. 3350 // sarl operation masks the count to 5 bits.
3351 const intptr_t kCountLimit = 0x1F; 3351 const intptr_t kCountLimit = 0x1F;
3352 if ((right_range == NULL) || 3352 if ((right_range == NULL) ||
3353 !right_range->IsWithin(RangeBoundary::kMinusInfinity, kCountLimit)) { 3353 !right_range->IsWithin(RangeBoundary::kMinusInfinity, kCountLimit)) {
3354 __ CompareImmediate(IP, kCountLimit); 3354 __ CompareImmediate(IP, kCountLimit);
3355 __ LoadImmediate(IP, kCountLimit, GT); 3355 __ LoadImmediate(IP, kCountLimit, GT);
3356 } 3356 }
3357 Register temp = locs()->temp(0).reg(); 3357 const Register temp = locs()->temp(0).reg();
3358 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 3358 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
3359 __ Asr(result, temp, IP); 3359 __ Asr(result, temp, IP);
3360 __ SmiTag(result); 3360 __ SmiTag(result);
3361 break; 3361 break;
3362 } 3362 }
3363 case Token::kDIV: { 3363 case Token::kDIV: {
3364 // Dispatches to 'Double./'. 3364 // Dispatches to 'Double./'.
3365 // TODO(srdjan): Implement as conversion to double and double division. 3365 // TODO(srdjan): Implement as conversion to double and double division.
3366 UNREACHABLE(); 3366 UNREACHABLE();
3367 break; 3367 break;
(...skipping 24 matching lines...) Expand all
3392 summary->set_in(1, Location::RequiresRegister()); 3392 summary->set_in(1, Location::RequiresRegister());
3393 return summary; 3393 return summary;
3394 } 3394 }
3395 3395
3396 3396
3397 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3397 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3398 Label* deopt = compiler->AddDeoptStub(deopt_id(), 3398 Label* deopt = compiler->AddDeoptStub(deopt_id(),
3399 ICData::kDeoptBinaryDoubleOp); 3399 ICData::kDeoptBinaryDoubleOp);
3400 intptr_t left_cid = left()->Type()->ToCid(); 3400 intptr_t left_cid = left()->Type()->ToCid();
3401 intptr_t right_cid = right()->Type()->ToCid(); 3401 intptr_t right_cid = right()->Type()->ToCid();
3402 Register left = locs()->in(0).reg(); 3402 const Register left = locs()->in(0).reg();
3403 Register right = locs()->in(1).reg(); 3403 const Register right = locs()->in(1).reg();
3404 if (this->left()->definition() == this->right()->definition()) { 3404 if (this->left()->definition() == this->right()->definition()) {
3405 __ tst(left, ShifterOperand(kSmiTagMask)); 3405 __ tst(left, ShifterOperand(kSmiTagMask));
3406 } else if (left_cid == kSmiCid) { 3406 } else if (left_cid == kSmiCid) {
3407 __ tst(right, ShifterOperand(kSmiTagMask)); 3407 __ tst(right, ShifterOperand(kSmiTagMask));
3408 } else if (right_cid == kSmiCid) { 3408 } else if (right_cid == kSmiCid) {
3409 __ tst(left, ShifterOperand(kSmiTagMask)); 3409 __ tst(left, ShifterOperand(kSmiTagMask));
3410 } else { 3410 } else {
3411 __ orr(IP, left, ShifterOperand(right)); 3411 __ orr(IP, left, ShifterOperand(right));
3412 __ tst(IP, ShifterOperand(kSmiTagMask)); 3412 __ tst(IP, ShifterOperand(kSmiTagMask));
3413 } 3413 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3470 3470
3471 if (value_cid == kDoubleCid) { 3471 if (value_cid == kDoubleCid) {
3472 __ LoadDFromOffset(result, value, Double::value_offset() - kHeapObjectTag); 3472 __ LoadDFromOffset(result, value, Double::value_offset() - kHeapObjectTag);
3473 } else if (value_cid == kSmiCid) { 3473 } else if (value_cid == kSmiCid) {
3474 __ SmiUntag(value); // Untag input before conversion. 3474 __ SmiUntag(value); // Untag input before conversion.
3475 __ vmovsr(STMP, value); 3475 __ vmovsr(STMP, value);
3476 __ vcvtdi(result, STMP); 3476 __ vcvtdi(result, STMP);
3477 } else { 3477 } else {
3478 Label* deopt = compiler->AddDeoptStub(deopt_id_, 3478 Label* deopt = compiler->AddDeoptStub(deopt_id_,
3479 ICData::kDeoptBinaryDoubleOp); 3479 ICData::kDeoptBinaryDoubleOp);
3480 Register temp = locs()->temp(0).reg(); 3480 const Register temp = locs()->temp(0).reg();
3481 if (value_type->is_nullable() && 3481 if (value_type->is_nullable() &&
3482 (value_type->ToNullableCid() == kDoubleCid)) { 3482 (value_type->ToNullableCid() == kDoubleCid)) {
3483 __ CompareImmediate(value, reinterpret_cast<intptr_t>(Object::null())); 3483 __ CompareImmediate(value, reinterpret_cast<intptr_t>(Object::null()));
3484 __ b(deopt, EQ); 3484 __ b(deopt, EQ);
3485 // It must be double now. 3485 // It must be double now.
3486 __ LoadDFromOffset(result, value, 3486 __ LoadDFromOffset(result, value,
3487 Double::value_offset() - kHeapObjectTag); 3487 Double::value_offset() - kHeapObjectTag);
3488 } else { 3488 } else {
3489 Label is_smi, done; 3489 Label is_smi, done;
3490 __ tst(value, ShifterOperand(kSmiTagMask)); 3490 __ tst(value, ShifterOperand(kSmiTagMask));
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
3775 LocationSummary* summary = 3775 LocationSummary* summary =
3776 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3776 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3777 summary->set_in(0, Location::RequiresFpuRegister()); 3777 summary->set_in(0, Location::RequiresFpuRegister());
3778 summary->set_in(1, Location::RequiresFpuRegister()); 3778 summary->set_in(1, Location::RequiresFpuRegister());
3779 summary->set_out(0, Location::RequiresFpuRegister()); 3779 summary->set_out(0, Location::RequiresFpuRegister());
3780 return summary; 3780 return summary;
3781 } 3781 }
3782 3782
3783 3783
3784 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3784 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3785 QRegister left = locs()->in(0).fpu_reg(); 3785 const QRegister left = locs()->in(0).fpu_reg();
3786 QRegister right = locs()->in(1).fpu_reg(); 3786 const QRegister right = locs()->in(1).fpu_reg();
3787 QRegister result = locs()->out(0).fpu_reg(); 3787 const QRegister result = locs()->out(0).fpu_reg();
3788 3788
3789 switch (op_kind()) { 3789 switch (op_kind()) {
3790 case Token::kADD: __ vaddqs(result, left, right); break; 3790 case Token::kADD: __ vaddqs(result, left, right); break;
3791 case Token::kSUB: __ vsubqs(result, left, right); break; 3791 case Token::kSUB: __ vsubqs(result, left, right); break;
3792 case Token::kMUL: __ vmulqs(result, left, right); break; 3792 case Token::kMUL: __ vmulqs(result, left, right); break;
3793 case Token::kDIV: __ Vdivqs(result, left, right); break; 3793 case Token::kDIV: __ Vdivqs(result, left, right); break;
3794 default: UNREACHABLE(); 3794 default: UNREACHABLE();
3795 } 3795 }
3796 } 3796 }
3797 3797
3798 3798
3799 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(bool opt) const { 3799 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(bool opt) const {
3800 const intptr_t kNumInputs = 2; 3800 const intptr_t kNumInputs = 2;
3801 const intptr_t kNumTemps = 0; 3801 const intptr_t kNumTemps = 0;
3802 LocationSummary* summary = 3802 LocationSummary* summary =
3803 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3803 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3804 summary->set_in(0, Location::RequiresFpuRegister()); 3804 summary->set_in(0, Location::RequiresFpuRegister());
3805 summary->set_in(1, Location::RequiresFpuRegister()); 3805 summary->set_in(1, Location::RequiresFpuRegister());
3806 summary->set_out(0, Location::RequiresFpuRegister()); 3806 summary->set_out(0, Location::RequiresFpuRegister());
3807 return summary; 3807 return summary;
3808 } 3808 }
3809 3809
3810 3810
3811 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3811 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3812 QRegister left = locs()->in(0).fpu_reg(); 3812 const QRegister left = locs()->in(0).fpu_reg();
3813 QRegister right = locs()->in(1).fpu_reg(); 3813 const QRegister right = locs()->in(1).fpu_reg();
3814 QRegister result = locs()->out(0).fpu_reg(); 3814 const QRegister result = locs()->out(0).fpu_reg();
3815 3815
3816 DRegister left0 = EvenDRegisterOf(left); 3816 const DRegister left0 = EvenDRegisterOf(left);
3817 DRegister left1 = OddDRegisterOf(left); 3817 const DRegister left1 = OddDRegisterOf(left);
3818 3818
3819 DRegister right0 = EvenDRegisterOf(right); 3819 const DRegister right0 = EvenDRegisterOf(right);
3820 DRegister right1 = OddDRegisterOf(right); 3820 const DRegister right1 = OddDRegisterOf(right);
3821 3821
3822 DRegister result0 = EvenDRegisterOf(result); 3822 const DRegister result0 = EvenDRegisterOf(result);
3823 DRegister result1 = OddDRegisterOf(result); 3823 const DRegister result1 = OddDRegisterOf(result);
3824 3824
3825 switch (op_kind()) { 3825 switch (op_kind()) {
3826 case Token::kADD: 3826 case Token::kADD:
3827 __ vaddd(result0, left0, right0); 3827 __ vaddd(result0, left0, right0);
3828 __ vaddd(result1, left1, right1); 3828 __ vaddd(result1, left1, right1);
3829 break; 3829 break;
3830 case Token::kSUB: 3830 case Token::kSUB:
3831 __ vsubd(result0, left0, right0); 3831 __ vsubd(result0, left0, right0);
3832 __ vsubd(result1, left1, right1); 3832 __ vsubd(result1, left1, right1);
3833 break; 3833 break;
(...skipping 16 matching lines...) Expand all
3850 LocationSummary* summary = 3850 LocationSummary* summary =
3851 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3851 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3852 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions. 3852 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
3853 summary->set_in(0, Location::FpuRegisterLocation(Q5)); 3853 summary->set_in(0, Location::FpuRegisterLocation(Q5));
3854 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 3854 summary->set_out(0, Location::FpuRegisterLocation(Q6));
3855 return summary; 3855 return summary;
3856 } 3856 }
3857 3857
3858 3858
3859 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3859 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3860 QRegister value = locs()->in(0).fpu_reg(); 3860 const QRegister value = locs()->in(0).fpu_reg();
3861 QRegister result = locs()->out(0).fpu_reg(); 3861 const QRegister result = locs()->out(0).fpu_reg();
3862 DRegister dresult0 = EvenDRegisterOf(result); 3862 const DRegister dresult0 = EvenDRegisterOf(result);
3863 DRegister dresult1 = OddDRegisterOf(result); 3863 const DRegister dresult1 = OddDRegisterOf(result);
3864 SRegister sresult0 = EvenSRegisterOf(dresult0); 3864 const SRegister sresult0 = EvenSRegisterOf(dresult0);
3865 SRegister sresult1 = OddSRegisterOf(dresult0); 3865 const SRegister sresult1 = OddSRegisterOf(dresult0);
3866 SRegister sresult2 = EvenSRegisterOf(dresult1); 3866 const SRegister sresult2 = EvenSRegisterOf(dresult1);
3867 SRegister sresult3 = OddSRegisterOf(dresult1); 3867 const SRegister sresult3 = OddSRegisterOf(dresult1);
3868 3868
3869 DRegister dvalue0 = EvenDRegisterOf(value); 3869 const DRegister dvalue0 = EvenDRegisterOf(value);
3870 DRegister dvalue1 = OddDRegisterOf(value); 3870 const DRegister dvalue1 = OddDRegisterOf(value);
3871 3871
3872 DRegister dtemp0 = DTMP; 3872 const DRegister dtemp0 = DTMP;
3873 DRegister dtemp1 = OddDRegisterOf(QTMP); 3873 const DRegister dtemp1 = OddDRegisterOf(QTMP);
3874 3874
3875 // For some cases the vdup instruction requires fewer 3875 // For some cases the vdup instruction requires fewer
3876 // instructions. For arbitrary shuffles, use vtbl. 3876 // instructions. For arbitrary shuffles, use vtbl.
3877 3877
3878 switch (op_kind()) { 3878 switch (op_kind()) {
3879 case MethodRecognizer::kFloat32x4ShuffleX: 3879 case MethodRecognizer::kFloat32x4ShuffleX:
3880 __ vdup(kWord, result, dvalue0, 0); 3880 __ vdup(kWord, result, dvalue0, 0);
3881 __ vcvtds(dresult0, sresult0); 3881 __ vcvtds(dresult0, sresult0);
3882 break; 3882 break;
3883 case MethodRecognizer::kFloat32x4ShuffleY: 3883 case MethodRecognizer::kFloat32x4ShuffleY:
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3931 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3931 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3932 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions. 3932 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
3933 summary->set_in(0, Location::FpuRegisterLocation(Q4)); 3933 summary->set_in(0, Location::FpuRegisterLocation(Q4));
3934 summary->set_in(1, Location::FpuRegisterLocation(Q5)); 3934 summary->set_in(1, Location::FpuRegisterLocation(Q5));
3935 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 3935 summary->set_out(0, Location::FpuRegisterLocation(Q6));
3936 return summary; 3936 return summary;
3937 } 3937 }
3938 3938
3939 3939
3940 void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3940 void Simd32x4ShuffleMixInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3941 QRegister left = locs()->in(0).fpu_reg(); 3941 const QRegister left = locs()->in(0).fpu_reg();
3942 QRegister right = locs()->in(1).fpu_reg(); 3942 const QRegister right = locs()->in(1).fpu_reg();
3943 QRegister result = locs()->out(0).fpu_reg(); 3943 const QRegister result = locs()->out(0).fpu_reg();
3944 3944
3945 DRegister dresult0 = EvenDRegisterOf(result); 3945 const DRegister dresult0 = EvenDRegisterOf(result);
3946 DRegister dresult1 = OddDRegisterOf(result); 3946 const DRegister dresult1 = OddDRegisterOf(result);
3947 SRegister sresult0 = EvenSRegisterOf(dresult0); 3947 const SRegister sresult0 = EvenSRegisterOf(dresult0);
3948 SRegister sresult1 = OddSRegisterOf(dresult0); 3948 const SRegister sresult1 = OddSRegisterOf(dresult0);
3949 SRegister sresult2 = EvenSRegisterOf(dresult1); 3949 const SRegister sresult2 = EvenSRegisterOf(dresult1);
3950 SRegister sresult3 = OddSRegisterOf(dresult1); 3950 const SRegister sresult3 = OddSRegisterOf(dresult1);
3951 3951
3952 DRegister dleft0 = EvenDRegisterOf(left); 3952 const DRegister dleft0 = EvenDRegisterOf(left);
3953 DRegister dleft1 = OddDRegisterOf(left); 3953 const DRegister dleft1 = OddDRegisterOf(left);
3954 DRegister dright0 = EvenDRegisterOf(right); 3954 const DRegister dright0 = EvenDRegisterOf(right);
3955 DRegister dright1 = OddDRegisterOf(right); 3955 const DRegister dright1 = OddDRegisterOf(right);
3956 3956
3957 switch (op_kind()) { 3957 switch (op_kind()) {
3958 case MethodRecognizer::kFloat32x4ShuffleMix: 3958 case MethodRecognizer::kFloat32x4ShuffleMix:
3959 case MethodRecognizer::kInt32x4ShuffleMix: 3959 case MethodRecognizer::kInt32x4ShuffleMix:
3960 // TODO(zra): Investigate better instruction sequences for shuffle masks. 3960 // TODO(zra): Investigate better instruction sequences for shuffle masks.
3961 SRegister left_svalues[4]; 3961 SRegister left_svalues[4];
3962 SRegister right_svalues[4]; 3962 SRegister right_svalues[4];
3963 3963
3964 left_svalues[0] = EvenSRegisterOf(dleft0); 3964 left_svalues[0] = EvenSRegisterOf(dleft0);
3965 left_svalues[1] = OddSRegisterOf(dleft0); 3965 left_svalues[1] = OddSRegisterOf(dleft0);
(...skipping 20 matching lines...) Expand all
3986 LocationSummary* summary = 3986 LocationSummary* summary =
3987 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3987 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3988 summary->set_in(0, Location::FpuRegisterLocation(Q5)); 3988 summary->set_in(0, Location::FpuRegisterLocation(Q5));
3989 summary->set_temp(0, Location::RequiresRegister()); 3989 summary->set_temp(0, Location::RequiresRegister());
3990 summary->set_out(0, Location::RequiresRegister()); 3990 summary->set_out(0, Location::RequiresRegister());
3991 return summary; 3991 return summary;
3992 } 3992 }
3993 3993
3994 3994
3995 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3995 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3996 QRegister value = locs()->in(0).fpu_reg(); 3996 const QRegister value = locs()->in(0).fpu_reg();
3997 DRegister dvalue0 = EvenDRegisterOf(value); 3997 const DRegister dvalue0 = EvenDRegisterOf(value);
3998 DRegister dvalue1 = OddDRegisterOf(value); 3998 const DRegister dvalue1 = OddDRegisterOf(value);
3999 3999
4000 Register out = locs()->out(0).reg(); 4000 const Register out = locs()->out(0).reg();
4001 Register temp = locs()->temp(0).reg(); 4001 const Register temp = locs()->temp(0).reg();
4002 4002
4003 // X lane. 4003 // X lane.
4004 __ vmovrs(out, EvenSRegisterOf(dvalue0)); 4004 __ vmovrs(out, EvenSRegisterOf(dvalue0));
4005 __ Lsr(out, out, 31); 4005 __ Lsr(out, out, 31);
4006 // Y lane. 4006 // Y lane.
4007 __ vmovrs(temp, OddSRegisterOf(dvalue0)); 4007 __ vmovrs(temp, OddSRegisterOf(dvalue0));
4008 __ Lsr(temp, temp, 31); 4008 __ Lsr(temp, temp, 31);
4009 __ orr(out, out, ShifterOperand(temp, LSL, 1)); 4009 __ orr(out, out, ShifterOperand(temp, LSL, 1));
4010 // Z lane. 4010 // Z lane.
4011 __ vmovrs(temp, EvenSRegisterOf(dvalue1)); 4011 __ vmovrs(temp, EvenSRegisterOf(dvalue1));
(...skipping 18 matching lines...) Expand all
4030 summary->set_in(1, Location::RequiresFpuRegister()); 4030 summary->set_in(1, Location::RequiresFpuRegister());
4031 summary->set_in(2, Location::RequiresFpuRegister()); 4031 summary->set_in(2, Location::RequiresFpuRegister());
4032 summary->set_in(3, Location::RequiresFpuRegister()); 4032 summary->set_in(3, Location::RequiresFpuRegister());
4033 // Low (< 7) Q registers are needed for the vcvtsd instruction. 4033 // Low (< 7) Q registers are needed for the vcvtsd instruction.
4034 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4034 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4035 return summary; 4035 return summary;
4036 } 4036 }
4037 4037
4038 4038
4039 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4039 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4040 QRegister q0 = locs()->in(0).fpu_reg(); 4040 const QRegister q0 = locs()->in(0).fpu_reg();
4041 QRegister q1 = locs()->in(1).fpu_reg(); 4041 const QRegister q1 = locs()->in(1).fpu_reg();
4042 QRegister q2 = locs()->in(2).fpu_reg(); 4042 const QRegister q2 = locs()->in(2).fpu_reg();
4043 QRegister q3 = locs()->in(3).fpu_reg(); 4043 const QRegister q3 = locs()->in(3).fpu_reg();
4044 QRegister r = locs()->out(0).fpu_reg(); 4044 const QRegister r = locs()->out(0).fpu_reg();
4045 4045
4046 DRegister dr0 = EvenDRegisterOf(r); 4046 const DRegister dr0 = EvenDRegisterOf(r);
4047 DRegister dr1 = OddDRegisterOf(r); 4047 const DRegister dr1 = OddDRegisterOf(r);
4048 4048
4049 __ vcvtsd(EvenSRegisterOf(dr0), EvenDRegisterOf(q0)); 4049 __ vcvtsd(EvenSRegisterOf(dr0), EvenDRegisterOf(q0));
4050 __ vcvtsd(OddSRegisterOf(dr0), EvenDRegisterOf(q1)); 4050 __ vcvtsd(OddSRegisterOf(dr0), EvenDRegisterOf(q1));
4051 __ vcvtsd(EvenSRegisterOf(dr1), EvenDRegisterOf(q2)); 4051 __ vcvtsd(EvenSRegisterOf(dr1), EvenDRegisterOf(q2));
4052 __ vcvtsd(OddSRegisterOf(dr1), EvenDRegisterOf(q3)); 4052 __ vcvtsd(OddSRegisterOf(dr1), EvenDRegisterOf(q3));
4053 } 4053 }
4054 4054
4055 4055
4056 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(bool opt) const { 4056 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(bool opt) const {
4057 const intptr_t kNumInputs = 0; 4057 const intptr_t kNumInputs = 0;
4058 const intptr_t kNumTemps = 0; 4058 const intptr_t kNumTemps = 0;
4059 LocationSummary* summary = 4059 LocationSummary* summary =
4060 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4060 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4061 summary->set_out(0, Location::RequiresFpuRegister()); 4061 summary->set_out(0, Location::RequiresFpuRegister());
4062 return summary; 4062 return summary;
4063 } 4063 }
4064 4064
4065 4065
4066 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4066 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4067 QRegister q = locs()->out(0).fpu_reg(); 4067 const QRegister q = locs()->out(0).fpu_reg();
4068 __ veorq(q, q, q); 4068 __ veorq(q, q, q);
4069 } 4069 }
4070 4070
4071 4071
4072 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(bool opt) const { 4072 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(bool opt) const {
4073 const intptr_t kNumInputs = 1; 4073 const intptr_t kNumInputs = 1;
4074 const intptr_t kNumTemps = 0; 4074 const intptr_t kNumTemps = 0;
4075 LocationSummary* summary = 4075 LocationSummary* summary =
4076 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4076 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4077 summary->set_in(0, Location::RequiresFpuRegister()); 4077 summary->set_in(0, Location::RequiresFpuRegister());
4078 summary->set_out(0, Location::RequiresFpuRegister()); 4078 summary->set_out(0, Location::RequiresFpuRegister());
4079 return summary; 4079 return summary;
4080 } 4080 }
4081 4081
4082 4082
4083 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4083 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4084 QRegister value = locs()->in(0).fpu_reg(); 4084 const QRegister value = locs()->in(0).fpu_reg();
4085 QRegister result = locs()->out(0).fpu_reg(); 4085 const QRegister result = locs()->out(0).fpu_reg();
4086 4086
4087 DRegister dvalue0 = EvenDRegisterOf(value); 4087 const DRegister dvalue0 = EvenDRegisterOf(value);
4088 4088
4089 // Convert to Float32. 4089 // Convert to Float32.
4090 __ vcvtsd(STMP, dvalue0); 4090 __ vcvtsd(STMP, dvalue0);
4091 4091
4092 // Splat across all lanes. 4092 // Splat across all lanes.
4093 __ vdup(kWord, result, DTMP, 0); 4093 __ vdup(kWord, result, DTMP, 0);
4094 } 4094 }
4095 4095
4096 4096
4097 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(bool opt) const { 4097 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(bool opt) const {
4098 const intptr_t kNumInputs = 2; 4098 const intptr_t kNumInputs = 2;
4099 const intptr_t kNumTemps = 0; 4099 const intptr_t kNumTemps = 0;
4100 LocationSummary* summary = 4100 LocationSummary* summary =
4101 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4101 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4102 summary->set_in(0, Location::RequiresFpuRegister()); 4102 summary->set_in(0, Location::RequiresFpuRegister());
4103 summary->set_in(1, Location::RequiresFpuRegister()); 4103 summary->set_in(1, Location::RequiresFpuRegister());
4104 summary->set_out(0, Location::RequiresFpuRegister()); 4104 summary->set_out(0, Location::RequiresFpuRegister());
4105 return summary; 4105 return summary;
4106 } 4106 }
4107 4107
4108 4108
4109 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4109 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4110 QRegister left = locs()->in(0).fpu_reg(); 4110 const QRegister left = locs()->in(0).fpu_reg();
4111 QRegister right = locs()->in(1).fpu_reg(); 4111 const QRegister right = locs()->in(1).fpu_reg();
4112 QRegister result = locs()->out(0).fpu_reg(); 4112 const QRegister result = locs()->out(0).fpu_reg();
4113 4113
4114 switch (op_kind()) { 4114 switch (op_kind()) {
4115 case MethodRecognizer::kFloat32x4Equal: 4115 case MethodRecognizer::kFloat32x4Equal:
4116 __ vceqqs(result, left, right); 4116 __ vceqqs(result, left, right);
4117 break; 4117 break;
4118 case MethodRecognizer::kFloat32x4NotEqual: 4118 case MethodRecognizer::kFloat32x4NotEqual:
4119 __ vceqqs(result, left, right); 4119 __ vceqqs(result, left, right);
4120 // Invert the result. 4120 // Invert the result.
4121 __ vmvnq(result, result); 4121 __ vmvnq(result, result);
4122 break; 4122 break;
(...skipping 21 matching lines...) Expand all
4144 LocationSummary* summary = 4144 LocationSummary* summary =
4145 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4145 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4146 summary->set_in(0, Location::RequiresFpuRegister()); 4146 summary->set_in(0, Location::RequiresFpuRegister());
4147 summary->set_in(1, Location::RequiresFpuRegister()); 4147 summary->set_in(1, Location::RequiresFpuRegister());
4148 summary->set_out(0, Location::RequiresFpuRegister()); 4148 summary->set_out(0, Location::RequiresFpuRegister());
4149 return summary; 4149 return summary;
4150 } 4150 }
4151 4151
4152 4152
4153 void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4153 void Float32x4MinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4154 QRegister left = locs()->in(0).fpu_reg(); 4154 const QRegister left = locs()->in(0).fpu_reg();
4155 QRegister right = locs()->in(1).fpu_reg(); 4155 const QRegister right = locs()->in(1).fpu_reg();
4156 QRegister result = locs()->out(0).fpu_reg(); 4156 const QRegister result = locs()->out(0).fpu_reg();
4157 4157
4158 switch (op_kind()) { 4158 switch (op_kind()) {
4159 case MethodRecognizer::kFloat32x4Min: 4159 case MethodRecognizer::kFloat32x4Min:
4160 __ vminqs(result, left, right); 4160 __ vminqs(result, left, right);
4161 break; 4161 break;
4162 case MethodRecognizer::kFloat32x4Max: 4162 case MethodRecognizer::kFloat32x4Max:
4163 __ vmaxqs(result, left, right); 4163 __ vmaxqs(result, left, right);
4164 break; 4164 break;
4165 default: UNREACHABLE(); 4165 default: UNREACHABLE();
4166 } 4166 }
4167 } 4167 }
4168 4168
4169 4169
4170 LocationSummary* Float32x4SqrtInstr::MakeLocationSummary(bool opt) const { 4170 LocationSummary* Float32x4SqrtInstr::MakeLocationSummary(bool opt) const {
4171 const intptr_t kNumInputs = 1; 4171 const intptr_t kNumInputs = 1;
4172 const intptr_t kNumTemps = 1; 4172 const intptr_t kNumTemps = 1;
4173 LocationSummary* summary = 4173 LocationSummary* summary =
4174 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4174 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4175 summary->set_in(0, Location::RequiresFpuRegister()); 4175 summary->set_in(0, Location::RequiresFpuRegister());
4176 summary->set_out(0, Location::RequiresFpuRegister()); 4176 summary->set_out(0, Location::RequiresFpuRegister());
4177 summary->set_temp(0, Location::RequiresFpuRegister()); 4177 summary->set_temp(0, Location::RequiresFpuRegister());
4178 return summary; 4178 return summary;
4179 } 4179 }
4180 4180
4181 4181
4182 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4182 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4183 QRegister left = locs()->in(0).fpu_reg(); 4183 const QRegister left = locs()->in(0).fpu_reg();
4184 QRegister result = locs()->out(0).fpu_reg(); 4184 const QRegister result = locs()->out(0).fpu_reg();
4185 QRegister temp = locs()->temp(0).fpu_reg(); 4185 const QRegister temp = locs()->temp(0).fpu_reg();
4186 4186
4187 switch (op_kind()) { 4187 switch (op_kind()) {
4188 case MethodRecognizer::kFloat32x4Sqrt: 4188 case MethodRecognizer::kFloat32x4Sqrt:
4189 __ Vsqrtqs(result, left, temp); 4189 __ Vsqrtqs(result, left, temp);
4190 break; 4190 break;
4191 case MethodRecognizer::kFloat32x4Reciprocal: 4191 case MethodRecognizer::kFloat32x4Reciprocal:
4192 __ Vreciprocalqs(result, left); 4192 __ Vreciprocalqs(result, left);
4193 break; 4193 break;
4194 case MethodRecognizer::kFloat32x4ReciprocalSqrt: 4194 case MethodRecognizer::kFloat32x4ReciprocalSqrt:
4195 __ VreciprocalSqrtqs(result, left); 4195 __ VreciprocalSqrtqs(result, left);
4196 break; 4196 break;
4197 default: UNREACHABLE(); 4197 default: UNREACHABLE();
4198 } 4198 }
4199 } 4199 }
4200 4200
4201 4201
4202 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(bool opt) const { 4202 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(bool opt) const {
4203 const intptr_t kNumInputs = 2; 4203 const intptr_t kNumInputs = 2;
4204 const intptr_t kNumTemps = 0; 4204 const intptr_t kNumTemps = 0;
4205 LocationSummary* summary = 4205 LocationSummary* summary =
4206 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4206 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4207 summary->set_in(0, Location::RequiresFpuRegister()); 4207 summary->set_in(0, Location::RequiresFpuRegister());
4208 summary->set_in(1, Location::RequiresFpuRegister()); 4208 summary->set_in(1, Location::RequiresFpuRegister());
4209 summary->set_out(0, Location::RequiresFpuRegister()); 4209 summary->set_out(0, Location::RequiresFpuRegister());
4210 return summary; 4210 return summary;
4211 } 4211 }
4212 4212
4213 4213
4214 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4214 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4215 QRegister left = locs()->in(0).fpu_reg(); 4215 const QRegister left = locs()->in(0).fpu_reg();
4216 QRegister right = locs()->in(1).fpu_reg(); 4216 const QRegister right = locs()->in(1).fpu_reg();
4217 QRegister result = locs()->out(0).fpu_reg(); 4217 const QRegister result = locs()->out(0).fpu_reg();
4218 4218
4219 switch (op_kind()) { 4219 switch (op_kind()) {
4220 case MethodRecognizer::kFloat32x4Scale: 4220 case MethodRecognizer::kFloat32x4Scale:
4221 __ vcvtsd(STMP, EvenDRegisterOf(left)); 4221 __ vcvtsd(STMP, EvenDRegisterOf(left));
4222 __ vdup(kWord, result, DTMP, 0); 4222 __ vdup(kWord, result, DTMP, 0);
4223 __ vmulqs(result, result, right); 4223 __ vmulqs(result, result, right);
4224 break; 4224 break;
4225 default: UNREACHABLE(); 4225 default: UNREACHABLE();
4226 } 4226 }
4227 } 4227 }
4228 4228
4229 4229
4230 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(bool opt) const { 4230 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(bool opt) const {
4231 const intptr_t kNumInputs = 1; 4231 const intptr_t kNumInputs = 1;
4232 const intptr_t kNumTemps = 0; 4232 const intptr_t kNumTemps = 0;
4233 LocationSummary* summary = 4233 LocationSummary* summary =
4234 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4234 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4235 summary->set_in(0, Location::RequiresFpuRegister()); 4235 summary->set_in(0, Location::RequiresFpuRegister());
4236 summary->set_out(0, Location::RequiresFpuRegister()); 4236 summary->set_out(0, Location::RequiresFpuRegister());
4237 return summary; 4237 return summary;
4238 } 4238 }
4239 4239
4240 4240
4241 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4241 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4242 QRegister left = locs()->in(0).fpu_reg(); 4242 const QRegister left = locs()->in(0).fpu_reg();
4243 QRegister result = locs()->out(0).fpu_reg(); 4243 const QRegister result = locs()->out(0).fpu_reg();
4244 4244
4245 switch (op_kind()) { 4245 switch (op_kind()) {
4246 case MethodRecognizer::kFloat32x4Negate: 4246 case MethodRecognizer::kFloat32x4Negate:
4247 __ vnegqs(result, left); 4247 __ vnegqs(result, left);
4248 break; 4248 break;
4249 case MethodRecognizer::kFloat32x4Absolute: 4249 case MethodRecognizer::kFloat32x4Absolute:
4250 __ vabsqs(result, left); 4250 __ vabsqs(result, left);
4251 break; 4251 break;
4252 default: UNREACHABLE(); 4252 default: UNREACHABLE();
4253 } 4253 }
4254 } 4254 }
4255 4255
4256 4256
4257 LocationSummary* Float32x4ClampInstr::MakeLocationSummary(bool opt) const { 4257 LocationSummary* Float32x4ClampInstr::MakeLocationSummary(bool opt) const {
4258 const intptr_t kNumInputs = 3; 4258 const intptr_t kNumInputs = 3;
4259 const intptr_t kNumTemps = 0; 4259 const intptr_t kNumTemps = 0;
4260 LocationSummary* summary = 4260 LocationSummary* summary =
4261 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4261 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4262 summary->set_in(0, Location::RequiresFpuRegister()); 4262 summary->set_in(0, Location::RequiresFpuRegister());
4263 summary->set_in(1, Location::RequiresFpuRegister()); 4263 summary->set_in(1, Location::RequiresFpuRegister());
4264 summary->set_in(2, Location::RequiresFpuRegister()); 4264 summary->set_in(2, Location::RequiresFpuRegister());
4265 summary->set_out(0, Location::RequiresFpuRegister()); 4265 summary->set_out(0, Location::RequiresFpuRegister());
4266 return summary; 4266 return summary;
4267 } 4267 }
4268 4268
4269 4269
4270 void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4270 void Float32x4ClampInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4271 QRegister left = locs()->in(0).fpu_reg(); 4271 const QRegister left = locs()->in(0).fpu_reg();
4272 QRegister lower = locs()->in(1).fpu_reg(); 4272 const QRegister lower = locs()->in(1).fpu_reg();
4273 QRegister upper = locs()->in(2).fpu_reg(); 4273 const QRegister upper = locs()->in(2).fpu_reg();
4274 QRegister result = locs()->out(0).fpu_reg(); 4274 const QRegister result = locs()->out(0).fpu_reg();
4275 __ vminqs(result, left, upper); 4275 __ vminqs(result, left, upper);
4276 __ vmaxqs(result, result, lower); 4276 __ vmaxqs(result, result, lower);
4277 } 4277 }
4278 4278
4279 4279
4280 LocationSummary* Float32x4WithInstr::MakeLocationSummary(bool opt) const { 4280 LocationSummary* Float32x4WithInstr::MakeLocationSummary(bool opt) const {
4281 const intptr_t kNumInputs = 2; 4281 const intptr_t kNumInputs = 2;
4282 const intptr_t kNumTemps = 0; 4282 const intptr_t kNumTemps = 0;
4283 LocationSummary* summary = 4283 LocationSummary* summary =
4284 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4284 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4285 summary->set_in(0, Location::RequiresFpuRegister()); 4285 summary->set_in(0, Location::RequiresFpuRegister());
4286 summary->set_in(1, Location::RequiresFpuRegister()); 4286 summary->set_in(1, Location::RequiresFpuRegister());
4287 // Low (< 7) Q registers are needed for the vmovs instruction. 4287 // Low (< 7) Q registers are needed for the vmovs instruction.
4288 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4288 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4289 return summary; 4289 return summary;
4290 } 4290 }
4291 4291
4292 4292
4293 void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4293 void Float32x4WithInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4294 QRegister replacement = locs()->in(0).fpu_reg(); 4294 const QRegister replacement = locs()->in(0).fpu_reg();
4295 QRegister value = locs()->in(1).fpu_reg(); 4295 const QRegister value = locs()->in(1).fpu_reg();
4296 QRegister result = locs()->out(0).fpu_reg(); 4296 const QRegister result = locs()->out(0).fpu_reg();
4297 4297
4298 DRegister dresult0 = EvenDRegisterOf(result); 4298 const DRegister dresult0 = EvenDRegisterOf(result);
4299 DRegister dresult1 = OddDRegisterOf(result); 4299 const DRegister dresult1 = OddDRegisterOf(result);
4300 SRegister sresult0 = EvenSRegisterOf(dresult0); 4300 const SRegister sresult0 = EvenSRegisterOf(dresult0);
4301 SRegister sresult1 = OddSRegisterOf(dresult0); 4301 const SRegister sresult1 = OddSRegisterOf(dresult0);
4302 SRegister sresult2 = EvenSRegisterOf(dresult1); 4302 const SRegister sresult2 = EvenSRegisterOf(dresult1);
4303 SRegister sresult3 = OddSRegisterOf(dresult1); 4303 const SRegister sresult3 = OddSRegisterOf(dresult1);
4304 4304
4305 __ vcvtsd(STMP, EvenDRegisterOf(replacement)); 4305 __ vcvtsd(STMP, EvenDRegisterOf(replacement));
4306 if (result != value) { 4306 if (result != value) {
4307 __ vmovq(result, value); 4307 __ vmovq(result, value);
4308 } 4308 }
4309 4309
4310 switch (op_kind()) { 4310 switch (op_kind()) {
4311 case MethodRecognizer::kFloat32x4WithX: 4311 case MethodRecognizer::kFloat32x4WithX:
4312 __ vmovs(sresult0, STMP); 4312 __ vmovs(sresult0, STMP);
4313 break; 4313 break;
(...skipping 16 matching lines...) Expand all
4330 const intptr_t kNumTemps = 0; 4330 const intptr_t kNumTemps = 0;
4331 LocationSummary* summary = 4331 LocationSummary* summary =
4332 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4332 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4333 summary->set_in(0, Location::RequiresFpuRegister()); 4333 summary->set_in(0, Location::RequiresFpuRegister());
4334 summary->set_out(0, Location::RequiresFpuRegister()); 4334 summary->set_out(0, Location::RequiresFpuRegister());
4335 return summary; 4335 return summary;
4336 } 4336 }
4337 4337
4338 4338
4339 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 4339 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4340 QRegister value = locs()->in(0).fpu_reg(); 4340 const QRegister value = locs()->in(0).fpu_reg();
4341 QRegister result = locs()->out(0).fpu_reg(); 4341 const QRegister result = locs()->out(0).fpu_reg();
4342 4342
4343 if (value != result) { 4343 if (value != result) {
4344 __ vmovq(result, value); 4344 __ vmovq(result, value);
4345 } 4345 }
4346 } 4346 }
4347 4347
4348 4348
4349 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(bool opt) const { 4349 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(bool opt) const {
4350 const intptr_t kNumInputs = 1; 4350 const intptr_t kNumInputs = 1;
4351 const intptr_t kNumTemps = 0; 4351 const intptr_t kNumTemps = 0;
4352 LocationSummary* summary = 4352 LocationSummary* summary =
4353 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4353 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4354 summary->set_in(0, Location::RequiresFpuRegister()); 4354 summary->set_in(0, Location::RequiresFpuRegister());
4355 summary->set_out(0, Location::RequiresFpuRegister()); 4355 summary->set_out(0, Location::RequiresFpuRegister());
4356 return summary; 4356 return summary;
4357 } 4357 }
4358 4358
4359 4359
4360 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4360 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4361 QRegister value = locs()->in(0).fpu_reg(); 4361 const QRegister value = locs()->in(0).fpu_reg();
4362 4362
4363 DRegister dvalue0 = EvenDRegisterOf(value); 4363 const DRegister dvalue0 = EvenDRegisterOf(value);
4364 DRegister dvalue1 = OddDRegisterOf(value); 4364 const DRegister dvalue1 = OddDRegisterOf(value);
4365 4365
4366 QRegister result = locs()->out(0).fpu_reg(); 4366 const QRegister result = locs()->out(0).fpu_reg();
4367 4367
4368 DRegister dresult0 = EvenDRegisterOf(result); 4368 const DRegister dresult0 = EvenDRegisterOf(result);
4369 4369
4370 switch (op_kind()) { 4370 switch (op_kind()) {
4371 case MethodRecognizer::kFloat64x2GetX: 4371 case MethodRecognizer::kFloat64x2GetX:
4372 __ vmovd(dresult0, dvalue0); 4372 __ vmovd(dresult0, dvalue0);
4373 break; 4373 break;
4374 case MethodRecognizer::kFloat64x2GetY: 4374 case MethodRecognizer::kFloat64x2GetY:
4375 __ vmovd(dresult0, dvalue1); 4375 __ vmovd(dresult0, dvalue1);
4376 break; 4376 break;
4377 default: UNREACHABLE(); 4377 default: UNREACHABLE();
4378 } 4378 }
4379 } 4379 }
4380 4380
4381 4381
4382 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(bool opt) const { 4382 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(bool opt) const {
4383 const intptr_t kNumInputs = 0; 4383 const intptr_t kNumInputs = 0;
4384 const intptr_t kNumTemps = 0; 4384 const intptr_t kNumTemps = 0;
4385 LocationSummary* summary = 4385 LocationSummary* summary =
4386 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4386 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4387 summary->set_out(0, Location::RequiresFpuRegister()); 4387 summary->set_out(0, Location::RequiresFpuRegister());
4388 return summary; 4388 return summary;
4389 } 4389 }
4390 4390
4391 4391
4392 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4392 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4393 QRegister q = locs()->out(0).fpu_reg(); 4393 const QRegister q = locs()->out(0).fpu_reg();
4394 __ veorq(q, q, q); 4394 __ veorq(q, q, q);
4395 } 4395 }
4396 4396
4397 4397
4398 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(bool opt) const { 4398 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(bool opt) const {
4399 const intptr_t kNumInputs = 1; 4399 const intptr_t kNumInputs = 1;
4400 const intptr_t kNumTemps = 0; 4400 const intptr_t kNumTemps = 0;
4401 LocationSummary* summary = 4401 LocationSummary* summary =
4402 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4402 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4403 summary->set_in(0, Location::RequiresFpuRegister()); 4403 summary->set_in(0, Location::RequiresFpuRegister());
4404 summary->set_out(0, Location::RequiresFpuRegister()); 4404 summary->set_out(0, Location::RequiresFpuRegister());
4405 return summary; 4405 return summary;
4406 } 4406 }
4407 4407
4408 4408
4409 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4409 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4410 QRegister value = locs()->in(0).fpu_reg(); 4410 const QRegister value = locs()->in(0).fpu_reg();
4411 4411
4412 DRegister dvalue = EvenDRegisterOf(value); 4412 const DRegister dvalue = EvenDRegisterOf(value);
4413 4413
4414 QRegister result = locs()->out(0).fpu_reg(); 4414 const QRegister result = locs()->out(0).fpu_reg();
4415 4415
4416 DRegister dresult0 = EvenDRegisterOf(result); 4416 const DRegister dresult0 = EvenDRegisterOf(result);
4417 DRegister dresult1 = OddDRegisterOf(result); 4417 const DRegister dresult1 = OddDRegisterOf(result);
4418 4418
4419 // Splat across all lanes. 4419 // Splat across all lanes.
4420 __ vmovd(dresult0, dvalue); 4420 __ vmovd(dresult0, dvalue);
4421 __ vmovd(dresult1, dvalue); 4421 __ vmovd(dresult1, dvalue);
4422 } 4422 }
4423 4423
4424 4424
4425 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary( 4425 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary(
4426 bool opt) const { 4426 bool opt) const {
4427 const intptr_t kNumInputs = 2; 4427 const intptr_t kNumInputs = 2;
4428 const intptr_t kNumTemps = 0; 4428 const intptr_t kNumTemps = 0;
4429 LocationSummary* summary = 4429 LocationSummary* summary =
4430 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4430 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4431 summary->set_in(0, Location::RequiresFpuRegister()); 4431 summary->set_in(0, Location::RequiresFpuRegister());
4432 summary->set_in(1, Location::RequiresFpuRegister()); 4432 summary->set_in(1, Location::RequiresFpuRegister());
4433 summary->set_out(0, Location::RequiresFpuRegister()); 4433 summary->set_out(0, Location::RequiresFpuRegister());
4434 return summary; 4434 return summary;
4435 } 4435 }
4436 4436
4437 4437
4438 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4438 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4439 QRegister q0 = locs()->in(0).fpu_reg(); 4439 const QRegister q0 = locs()->in(0).fpu_reg();
4440 QRegister q1 = locs()->in(1).fpu_reg(); 4440 const QRegister q1 = locs()->in(1).fpu_reg();
4441 QRegister r = locs()->out(0).fpu_reg(); 4441 const QRegister r = locs()->out(0).fpu_reg();
4442 4442
4443 DRegister d0 = EvenDRegisterOf(q0); 4443 const DRegister d0 = EvenDRegisterOf(q0);
4444 DRegister d1 = EvenDRegisterOf(q1); 4444 const DRegister d1 = EvenDRegisterOf(q1);
4445 4445
4446 DRegister dr0 = EvenDRegisterOf(r); 4446 const DRegister dr0 = EvenDRegisterOf(r);
4447 DRegister dr1 = OddDRegisterOf(r); 4447 const DRegister dr1 = OddDRegisterOf(r);
4448 4448
4449 __ vmovd(dr0, d0); 4449 __ vmovd(dr0, d0);
4450 __ vmovd(dr1, d1); 4450 __ vmovd(dr1, d1);
4451 } 4451 }
4452 4452
4453 4453
4454 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary( 4454 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary(
4455 bool opt) const { 4455 bool opt) const {
4456 const intptr_t kNumInputs = 1; 4456 const intptr_t kNumInputs = 1;
4457 const intptr_t kNumTemps = 0; 4457 const intptr_t kNumTemps = 0;
4458 LocationSummary* summary = 4458 LocationSummary* summary =
4459 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4459 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4460 summary->set_in(0, Location::RequiresFpuRegister()); 4460 summary->set_in(0, Location::RequiresFpuRegister());
4461 // Low (< 7) Q registers are needed for the vcvtsd instruction. 4461 // Low (< 7) Q registers are needed for the vcvtsd instruction.
4462 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4462 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4463 return summary; 4463 return summary;
4464 } 4464 }
4465 4465
4466 4466
4467 void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 4467 void Float64x2ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4468 QRegister q = locs()->in(0).fpu_reg(); 4468 const QRegister q = locs()->in(0).fpu_reg();
4469 QRegister r = locs()->out(0).fpu_reg(); 4469 const QRegister r = locs()->out(0).fpu_reg();
4470 4470
4471 DRegister dq0 = EvenDRegisterOf(q); 4471 const DRegister dq0 = EvenDRegisterOf(q);
4472 DRegister dq1 = OddDRegisterOf(q); 4472 const DRegister dq1 = OddDRegisterOf(q);
4473 4473
4474 DRegister dr0 = EvenDRegisterOf(r); 4474 const DRegister dr0 = EvenDRegisterOf(r);
4475 4475
4476 // Zero register. 4476 // Zero register.
4477 __ veorq(r, r, r); 4477 __ veorq(r, r, r);
4478 // Set X lane. 4478 // Set X lane.
4479 __ vcvtsd(EvenSRegisterOf(dr0), dq0); 4479 __ vcvtsd(EvenSRegisterOf(dr0), dq0);
4480 // Set Y lane. 4480 // Set Y lane.
4481 __ vcvtsd(OddSRegisterOf(dr0), dq1); 4481 __ vcvtsd(OddSRegisterOf(dr0), dq1);
4482 } 4482 }
4483 4483
4484 4484
4485 LocationSummary* Float32x4ToFloat64x2Instr::MakeLocationSummary( 4485 LocationSummary* Float32x4ToFloat64x2Instr::MakeLocationSummary(
4486 bool opt) const { 4486 bool opt) const {
4487 const intptr_t kNumInputs = 1; 4487 const intptr_t kNumInputs = 1;
4488 const intptr_t kNumTemps = 0; 4488 const intptr_t kNumTemps = 0;
4489 LocationSummary* summary = 4489 LocationSummary* summary =
4490 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4490 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4491 summary->set_in(0, Location::RequiresFpuRegister()); 4491 summary->set_in(0, Location::RequiresFpuRegister());
4492 // Low (< 7) Q registers are needed for the vcvtsd instruction. 4492 // Low (< 7) Q registers are needed for the vcvtsd instruction.
4493 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4493 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4494 return summary; 4494 return summary;
4495 } 4495 }
4496 4496
4497 4497
4498 void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 4498 void Float32x4ToFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4499 QRegister q = locs()->in(0).fpu_reg(); 4499 const QRegister q = locs()->in(0).fpu_reg();
4500 QRegister r = locs()->out(0).fpu_reg(); 4500 const QRegister r = locs()->out(0).fpu_reg();
4501 4501
4502 DRegister dq0 = EvenDRegisterOf(q); 4502 const DRegister dq0 = EvenDRegisterOf(q);
4503 4503
4504 DRegister dr0 = EvenDRegisterOf(r); 4504 const DRegister dr0 = EvenDRegisterOf(r);
4505 DRegister dr1 = OddDRegisterOf(r); 4505 const DRegister dr1 = OddDRegisterOf(r);
4506 4506
4507 // Set X. 4507 // Set X.
4508 __ vcvtds(dr0, EvenSRegisterOf(dq0)); 4508 __ vcvtds(dr0, EvenSRegisterOf(dq0));
4509 // Set Y. 4509 // Set Y.
4510 __ vcvtds(dr1, OddSRegisterOf(dq0)); 4510 __ vcvtds(dr1, OddSRegisterOf(dq0));
4511 } 4511 }
4512 4512
4513 4513
4514 LocationSummary* Float64x2ZeroArgInstr::MakeLocationSummary(bool opt) const { 4514 LocationSummary* Float64x2ZeroArgInstr::MakeLocationSummary(bool opt) const {
4515 const intptr_t kNumInputs = 1; 4515 const intptr_t kNumInputs = 1;
4516 const intptr_t kNumTemps = 0; 4516 const intptr_t kNumTemps = 0;
4517 LocationSummary* summary = 4517 LocationSummary* summary =
4518 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4518 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4519 4519
4520 if (representation() == kTagged) { 4520 if (representation() == kTagged) {
4521 ASSERT(op_kind() == MethodRecognizer::kFloat64x2GetSignMask); 4521 ASSERT(op_kind() == MethodRecognizer::kFloat64x2GetSignMask);
4522 // Grabbing the S components means we need a low (< 7) Q. 4522 // Grabbing the S components means we need a low (< 7) Q.
4523 summary->set_in(0, Location::FpuRegisterLocation(Q6)); 4523 summary->set_in(0, Location::FpuRegisterLocation(Q6));
4524 summary->set_out(0, Location::RequiresRegister()); 4524 summary->set_out(0, Location::RequiresRegister());
4525 summary->AddTemp(Location::RequiresRegister()); 4525 summary->AddTemp(Location::RequiresRegister());
4526 } else { 4526 } else {
4527 summary->set_in(0, Location::RequiresFpuRegister()); 4527 summary->set_in(0, Location::RequiresFpuRegister());
4528 summary->set_out(0, Location::RequiresFpuRegister()); 4528 summary->set_out(0, Location::RequiresFpuRegister());
4529 } 4529 }
4530 return summary; 4530 return summary;
4531 } 4531 }
4532 4532
4533 4533
4534 void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4534 void Float64x2ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4535 QRegister q = locs()->in(0).fpu_reg(); 4535 const QRegister q = locs()->in(0).fpu_reg();
4536 4536
4537 if ((op_kind() == MethodRecognizer::kFloat64x2GetSignMask)) { 4537 if ((op_kind() == MethodRecognizer::kFloat64x2GetSignMask)) {
4538 DRegister dvalue0 = EvenDRegisterOf(q); 4538 const DRegister dvalue0 = EvenDRegisterOf(q);
4539 DRegister dvalue1 = OddDRegisterOf(q); 4539 const DRegister dvalue1 = OddDRegisterOf(q);
4540 4540
4541 Register out = locs()->out(0).reg(); 4541 const Register out = locs()->out(0).reg();
4542 Register temp = locs()->temp(0).reg(); 4542 const Register temp = locs()->temp(0).reg();
4543 4543
4544 // Upper 32-bits of X lane. 4544 // Upper 32-bits of X lane.
4545 __ vmovrs(out, OddSRegisterOf(dvalue0)); 4545 __ vmovrs(out, OddSRegisterOf(dvalue0));
4546 __ Lsr(out, out, 31); 4546 __ Lsr(out, out, 31);
4547 // Upper 32-bits of Y lane. 4547 // Upper 32-bits of Y lane.
4548 __ vmovrs(temp, OddSRegisterOf(dvalue1)); 4548 __ vmovrs(temp, OddSRegisterOf(dvalue1));
4549 __ Lsr(temp, temp, 31); 4549 __ Lsr(temp, temp, 31);
4550 __ orr(out, out, ShifterOperand(temp, LSL, 1)); 4550 __ orr(out, out, ShifterOperand(temp, LSL, 1));
4551 // Tag. 4551 // Tag.
4552 __ SmiTag(out); 4552 __ SmiTag(out);
4553 return; 4553 return;
4554 } 4554 }
4555 ASSERT(representation() == kUnboxedFloat64x2); 4555 ASSERT(representation() == kUnboxedFloat64x2);
4556 QRegister r = locs()->out(0).fpu_reg(); 4556 const QRegister r = locs()->out(0).fpu_reg();
4557 4557
4558 DRegister dvalue0 = EvenDRegisterOf(q); 4558 const DRegister dvalue0 = EvenDRegisterOf(q);
4559 DRegister dvalue1 = OddDRegisterOf(q); 4559 const DRegister dvalue1 = OddDRegisterOf(q);
4560 DRegister dresult0 = EvenDRegisterOf(r); 4560 const DRegister dresult0 = EvenDRegisterOf(r);
4561 DRegister dresult1 = OddDRegisterOf(r); 4561 const DRegister dresult1 = OddDRegisterOf(r);
4562 4562
4563 switch (op_kind()) { 4563 switch (op_kind()) {
4564 case MethodRecognizer::kFloat64x2Negate: 4564 case MethodRecognizer::kFloat64x2Negate:
4565 __ vnegd(dresult0, dvalue0); 4565 __ vnegd(dresult0, dvalue0);
4566 __ vnegd(dresult1, dvalue1); 4566 __ vnegd(dresult1, dvalue1);
4567 break; 4567 break;
4568 case MethodRecognizer::kFloat64x2Abs: 4568 case MethodRecognizer::kFloat64x2Abs:
4569 __ vabsd(dresult0, dvalue0); 4569 __ vabsd(dresult0, dvalue0);
4570 __ vabsd(dresult1, dvalue1); 4570 __ vabsd(dresult1, dvalue1);
4571 break; 4571 break;
(...skipping 12 matching lines...) Expand all
4584 LocationSummary* summary = 4584 LocationSummary* summary =
4585 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4585 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4586 summary->set_in(0, Location::RequiresFpuRegister()); 4586 summary->set_in(0, Location::RequiresFpuRegister());
4587 summary->set_in(1, Location::RequiresFpuRegister()); 4587 summary->set_in(1, Location::RequiresFpuRegister());
4588 summary->set_out(0, Location::SameAsFirstInput()); 4588 summary->set_out(0, Location::SameAsFirstInput());
4589 return summary; 4589 return summary;
4590 } 4590 }
4591 4591
4592 4592
4593 void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4593 void Float64x2OneArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4594 QRegister left = locs()->in(0).fpu_reg(); 4594 const QRegister left = locs()->in(0).fpu_reg();
4595 DRegister left0 = EvenDRegisterOf(left); 4595 const DRegister left0 = EvenDRegisterOf(left);
4596 DRegister left1 = OddDRegisterOf(left); 4596 const DRegister left1 = OddDRegisterOf(left);
4597 QRegister right = locs()->in(1).fpu_reg(); 4597 const QRegister right = locs()->in(1).fpu_reg();
4598 DRegister right0 = EvenDRegisterOf(right); 4598 const DRegister right0 = EvenDRegisterOf(right);
4599 DRegister right1 = OddDRegisterOf(right); 4599 const DRegister right1 = OddDRegisterOf(right);
4600 QRegister out = locs()->out(0).fpu_reg(); 4600 const QRegister out = locs()->out(0).fpu_reg();
4601 ASSERT(left == out); 4601 ASSERT(left == out);
4602 4602
4603 switch (op_kind()) { 4603 switch (op_kind()) {
4604 case MethodRecognizer::kFloat64x2Scale: 4604 case MethodRecognizer::kFloat64x2Scale:
4605 __ vmuld(left0, left0, right0); 4605 __ vmuld(left0, left0, right0);
4606 __ vmuld(left1, left1, right0); 4606 __ vmuld(left1, left1, right0);
4607 break; 4607 break;
4608 case MethodRecognizer::kFloat64x2WithX: 4608 case MethodRecognizer::kFloat64x2WithX:
4609 __ vmovd(left0, right0); 4609 __ vmovd(left0, right0);
4610 break; 4610 break;
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
4661 summary->set_in(2, Location::RequiresRegister()); 4661 summary->set_in(2, Location::RequiresRegister());
4662 summary->set_in(3, Location::RequiresRegister()); 4662 summary->set_in(3, Location::RequiresRegister());
4663 summary->set_temp(0, Location::RequiresRegister()); 4663 summary->set_temp(0, Location::RequiresRegister());
4664 // Low (< 7) Q register needed for the vmovsr instruction. 4664 // Low (< 7) Q register needed for the vmovsr instruction.
4665 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4665 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4666 return summary; 4666 return summary;
4667 } 4667 }
4668 4668
4669 4669
4670 void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4670 void Int32x4BoolConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4671 Register v0 = locs()->in(0).reg(); 4671 const Register v0 = locs()->in(0).reg();
4672 Register v1 = locs()->in(1).reg(); 4672 const Register v1 = locs()->in(1).reg();
4673 Register v2 = locs()->in(2).reg(); 4673 const Register v2 = locs()->in(2).reg();
4674 Register v3 = locs()->in(3).reg(); 4674 const Register v3 = locs()->in(3).reg();
4675 Register temp = locs()->temp(0).reg(); 4675 const Register temp = locs()->temp(0).reg();
4676 QRegister result = locs()->out(0).fpu_reg(); 4676 const QRegister result = locs()->out(0).fpu_reg();
4677 DRegister dresult0 = EvenDRegisterOf(result); 4677 const DRegister dresult0 = EvenDRegisterOf(result);
4678 DRegister dresult1 = OddDRegisterOf(result); 4678 const DRegister dresult1 = OddDRegisterOf(result);
4679 SRegister sresult0 = EvenSRegisterOf(dresult0); 4679 const SRegister sresult0 = EvenSRegisterOf(dresult0);
4680 SRegister sresult1 = OddSRegisterOf(dresult0); 4680 const SRegister sresult1 = OddSRegisterOf(dresult0);
4681 SRegister sresult2 = EvenSRegisterOf(dresult1); 4681 const SRegister sresult2 = EvenSRegisterOf(dresult1);
4682 SRegister sresult3 = OddSRegisterOf(dresult1); 4682 const SRegister sresult3 = OddSRegisterOf(dresult1);
4683 4683
4684 __ veorq(result, result, result); 4684 __ veorq(result, result, result);
4685 __ LoadImmediate(temp, 0xffffffff); 4685 __ LoadImmediate(temp, 0xffffffff);
4686 4686
4687 __ CompareObject(v0, Bool::True()); 4687 __ CompareObject(v0, Bool::True());
4688 __ vmovsr(sresult0, temp, EQ); 4688 __ vmovsr(sresult0, temp, EQ);
4689 4689
4690 __ CompareObject(v1, Bool::True()); 4690 __ CompareObject(v1, Bool::True());
4691 __ vmovsr(sresult1, temp, EQ); 4691 __ vmovsr(sresult1, temp, EQ);
4692 4692
(...skipping 11 matching lines...) Expand all
4704 LocationSummary* summary = 4704 LocationSummary* summary =
4705 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4705 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4706 // Low (< 7) Q registers are needed for the vmovrs instruction. 4706 // Low (< 7) Q registers are needed for the vmovrs instruction.
4707 summary->set_in(0, Location::FpuRegisterLocation(Q6)); 4707 summary->set_in(0, Location::FpuRegisterLocation(Q6));
4708 summary->set_out(0, Location::RequiresRegister()); 4708 summary->set_out(0, Location::RequiresRegister());
4709 return summary; 4709 return summary;
4710 } 4710 }
4711 4711
4712 4712
4713 void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4713 void Int32x4GetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4714 QRegister value = locs()->in(0).fpu_reg(); 4714 const QRegister value = locs()->in(0).fpu_reg();
4715 Register result = locs()->out(0).reg(); 4715 const Register result = locs()->out(0).reg();
4716 4716
4717 DRegister dvalue0 = EvenDRegisterOf(value); 4717 const DRegister dvalue0 = EvenDRegisterOf(value);
4718 DRegister dvalue1 = OddDRegisterOf(value); 4718 const DRegister dvalue1 = OddDRegisterOf(value);
4719 SRegister svalue0 = EvenSRegisterOf(dvalue0); 4719 const SRegister svalue0 = EvenSRegisterOf(dvalue0);
4720 SRegister svalue1 = OddSRegisterOf(dvalue0); 4720 const SRegister svalue1 = OddSRegisterOf(dvalue0);
4721 SRegister svalue2 = EvenSRegisterOf(dvalue1); 4721 const SRegister svalue2 = EvenSRegisterOf(dvalue1);
4722 SRegister svalue3 = OddSRegisterOf(dvalue1); 4722 const SRegister svalue3 = OddSRegisterOf(dvalue1);
4723 4723
4724 switch (op_kind()) { 4724 switch (op_kind()) {
4725 case MethodRecognizer::kInt32x4GetFlagX: 4725 case MethodRecognizer::kInt32x4GetFlagX:
4726 __ vmovrs(result, svalue0); 4726 __ vmovrs(result, svalue0);
4727 break; 4727 break;
4728 case MethodRecognizer::kInt32x4GetFlagY: 4728 case MethodRecognizer::kInt32x4GetFlagY:
4729 __ vmovrs(result, svalue1); 4729 __ vmovrs(result, svalue1);
4730 break; 4730 break;
4731 case MethodRecognizer::kInt32x4GetFlagZ: 4731 case MethodRecognizer::kInt32x4GetFlagZ:
4732 __ vmovrs(result, svalue2); 4732 __ vmovrs(result, svalue2);
(...skipping 18 matching lines...) Expand all
4751 summary->set_in(0, Location::RequiresFpuRegister()); 4751 summary->set_in(0, Location::RequiresFpuRegister());
4752 summary->set_in(1, Location::RequiresFpuRegister()); 4752 summary->set_in(1, Location::RequiresFpuRegister());
4753 summary->set_in(2, Location::RequiresFpuRegister()); 4753 summary->set_in(2, Location::RequiresFpuRegister());
4754 summary->set_temp(0, Location::RequiresFpuRegister()); 4754 summary->set_temp(0, Location::RequiresFpuRegister());
4755 summary->set_out(0, Location::RequiresFpuRegister()); 4755 summary->set_out(0, Location::RequiresFpuRegister());
4756 return summary; 4756 return summary;
4757 } 4757 }
4758 4758
4759 4759
4760 void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4760 void Int32x4SelectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4761 QRegister mask = locs()->in(0).fpu_reg(); 4761 const QRegister mask = locs()->in(0).fpu_reg();
4762 QRegister trueValue = locs()->in(1).fpu_reg(); 4762 const QRegister trueValue = locs()->in(1).fpu_reg();
4763 QRegister falseValue = locs()->in(2).fpu_reg(); 4763 const QRegister falseValue = locs()->in(2).fpu_reg();
4764 QRegister out = locs()->out(0).fpu_reg(); 4764 const QRegister out = locs()->out(0).fpu_reg();
4765 QRegister temp = locs()->temp(0).fpu_reg(); 4765 const QRegister temp = locs()->temp(0).fpu_reg();
4766 4766
4767 // Copy mask. 4767 // Copy mask.
4768 __ vmovq(temp, mask); 4768 __ vmovq(temp, mask);
4769 // Invert it. 4769 // Invert it.
4770 __ vmvnq(temp, temp); 4770 __ vmvnq(temp, temp);
4771 // mask = mask & trueValue. 4771 // mask = mask & trueValue.
4772 __ vandq(mask, mask, trueValue); 4772 __ vandq(mask, mask, trueValue);
4773 // temp = temp & falseValue. 4773 // temp = temp & falseValue.
4774 __ vandq(temp, temp, falseValue); 4774 __ vandq(temp, temp, falseValue);
4775 // out = mask | temp. 4775 // out = mask | temp.
4776 __ vorrq(out, mask, temp); 4776 __ vorrq(out, mask, temp);
4777 } 4777 }
4778 4778
4779 4779
4780 LocationSummary* Int32x4SetFlagInstr::MakeLocationSummary(bool opt) const { 4780 LocationSummary* Int32x4SetFlagInstr::MakeLocationSummary(bool opt) const {
4781 const intptr_t kNumInputs = 2; 4781 const intptr_t kNumInputs = 2;
4782 const intptr_t kNumTemps = 0; 4782 const intptr_t kNumTemps = 0;
4783 LocationSummary* summary = 4783 LocationSummary* summary =
4784 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4784 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4785 summary->set_in(0, Location::RequiresFpuRegister()); 4785 summary->set_in(0, Location::RequiresFpuRegister());
4786 summary->set_in(1, Location::RequiresRegister()); 4786 summary->set_in(1, Location::RequiresRegister());
4787 // Low (< 7) Q register needed for the vmovsr instruction. 4787 // Low (< 7) Q register needed for the vmovsr instruction.
4788 summary->set_out(0, Location::FpuRegisterLocation(Q6)); 4788 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4789 return summary; 4789 return summary;
4790 } 4790 }
4791 4791
4792 4792
4793 void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4793 void Int32x4SetFlagInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4794 QRegister mask = locs()->in(0).fpu_reg(); 4794 const QRegister mask = locs()->in(0).fpu_reg();
4795 Register flag = locs()->in(1).reg(); 4795 const Register flag = locs()->in(1).reg();
4796 QRegister result = locs()->out(0).fpu_reg(); 4796 const QRegister result = locs()->out(0).fpu_reg();
4797 4797
4798 DRegister dresult0 = EvenDRegisterOf(result); 4798 const DRegister dresult0 = EvenDRegisterOf(result);
4799 DRegister dresult1 = OddDRegisterOf(result); 4799 const DRegister dresult1 = OddDRegisterOf(result);
4800 SRegister sresult0 = EvenSRegisterOf(dresult0); 4800 const SRegister sresult0 = EvenSRegisterOf(dresult0);
4801 SRegister sresult1 = OddSRegisterOf(dresult0); 4801 const SRegister sresult1 = OddSRegisterOf(dresult0);
4802 SRegister sresult2 = EvenSRegisterOf(dresult1); 4802 const SRegister sresult2 = EvenSRegisterOf(dresult1);
4803 SRegister sresult3 = OddSRegisterOf(dresult1); 4803 const SRegister sresult3 = OddSRegisterOf(dresult1);
4804 4804
4805 if (result != mask) { 4805 if (result != mask) {
4806 __ vmovq(result, mask); 4806 __ vmovq(result, mask);
4807 } 4807 }
4808 4808
4809 __ CompareObject(flag, Bool::True()); 4809 __ CompareObject(flag, Bool::True());
4810 __ LoadImmediate(TMP, 0xffffffff, EQ); 4810 __ LoadImmediate(TMP, 0xffffffff, EQ);
4811 __ LoadImmediate(TMP, 0, NE); 4811 __ LoadImmediate(TMP, 0, NE);
4812 switch (op_kind()) { 4812 switch (op_kind()) {
4813 case MethodRecognizer::kInt32x4WithFlagX: 4813 case MethodRecognizer::kInt32x4WithFlagX:
(...skipping 18 matching lines...) Expand all
4832 const intptr_t kNumTemps = 0; 4832 const intptr_t kNumTemps = 0;
4833 LocationSummary* summary = 4833 LocationSummary* summary =
4834 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4834 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4835 summary->set_in(0, Location::RequiresFpuRegister()); 4835 summary->set_in(0, Location::RequiresFpuRegister());
4836 summary->set_out(0, Location::RequiresFpuRegister()); 4836 summary->set_out(0, Location::RequiresFpuRegister());
4837 return summary; 4837 return summary;
4838 } 4838 }
4839 4839
4840 4840
4841 void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 4841 void Int32x4ToFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4842 QRegister value = locs()->in(0).fpu_reg(); 4842 const QRegister value = locs()->in(0).fpu_reg();
4843 QRegister result = locs()->out(0).fpu_reg(); 4843 const QRegister result = locs()->out(0).fpu_reg();
4844 4844
4845 if (value != result) { 4845 if (value != result) {
4846 __ vmovq(result, value); 4846 __ vmovq(result, value);
4847 } 4847 }
4848 } 4848 }
4849 4849
4850 4850
4851 LocationSummary* BinaryInt32x4OpInstr::MakeLocationSummary(bool opt) const { 4851 LocationSummary* BinaryInt32x4OpInstr::MakeLocationSummary(bool opt) const {
4852 const intptr_t kNumInputs = 2; 4852 const intptr_t kNumInputs = 2;
4853 const intptr_t kNumTemps = 0; 4853 const intptr_t kNumTemps = 0;
4854 LocationSummary* summary = 4854 LocationSummary* summary =
4855 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4855 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4856 summary->set_in(0, Location::RequiresFpuRegister()); 4856 summary->set_in(0, Location::RequiresFpuRegister());
4857 summary->set_in(1, Location::RequiresFpuRegister()); 4857 summary->set_in(1, Location::RequiresFpuRegister());
4858 summary->set_out(0, Location::RequiresFpuRegister()); 4858 summary->set_out(0, Location::RequiresFpuRegister());
4859 return summary; 4859 return summary;
4860 } 4860 }
4861 4861
4862 4862
4863 void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4863 void BinaryInt32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4864 QRegister left = locs()->in(0).fpu_reg(); 4864 const QRegister left = locs()->in(0).fpu_reg();
4865 QRegister right = locs()->in(1).fpu_reg(); 4865 const QRegister right = locs()->in(1).fpu_reg();
4866 QRegister result = locs()->out(0).fpu_reg(); 4866 const QRegister result = locs()->out(0).fpu_reg();
4867 switch (op_kind()) { 4867 switch (op_kind()) {
4868 case Token::kBIT_AND: { 4868 case Token::kBIT_AND: {
4869 __ vandq(result, left, right); 4869 __ vandq(result, left, right);
4870 break; 4870 break;
4871 } 4871 }
4872 case Token::kBIT_OR: { 4872 case Token::kBIT_OR: {
4873 __ vorrq(result, left, right); 4873 __ vorrq(result, left, right);
4874 break; 4874 break;
4875 } 4875 }
4876 case Token::kBIT_XOR: { 4876 case Token::kBIT_XOR: {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
4911 LocationSummary* summary = 4911 LocationSummary* summary =
4912 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4912 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4913 summary->set_in(0, Location::RequiresFpuRegister()); 4913 summary->set_in(0, Location::RequiresFpuRegister());
4914 summary->set_out(0, Location::RequiresFpuRegister()); 4914 summary->set_out(0, Location::RequiresFpuRegister());
4915 return summary; 4915 return summary;
4916 } 4916 }
4917 4917
4918 4918
4919 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4919 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4920 if (kind() == MathUnaryInstr::kSqrt) { 4920 if (kind() == MathUnaryInstr::kSqrt) {
4921 DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg()); 4921 const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
4922 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 4922 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4923 __ vsqrtd(result, val); 4923 __ vsqrtd(result, val);
4924 } else if (kind() == MathUnaryInstr::kDoubleSquare) { 4924 } else if (kind() == MathUnaryInstr::kDoubleSquare) {
4925 DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg()); 4925 const DRegister val = EvenDRegisterOf(locs()->in(0).fpu_reg());
4926 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 4926 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4927 __ vmuld(result, val, val); 4927 __ vmuld(result, val, val);
4928 } else { 4928 } else {
4929 ASSERT((kind() == MathUnaryInstr::kSin) || 4929 ASSERT((kind() == MathUnaryInstr::kSin) ||
4930 (kind() == MathUnaryInstr::kCos)); 4930 (kind() == MathUnaryInstr::kCos));
4931 if (TargetCPUFeatures::hardfp_supported()) { 4931 if (TargetCPUFeatures::hardfp_supported()) {
4932 __ CallRuntime(TargetFunction(), InputCount()); 4932 __ CallRuntime(TargetFunction(), InputCount());
4933 } else { 4933 } else {
4934 // If we aren't doing "hardfp", then we have to move the double arguments 4934 // If we aren't doing "hardfp", then we have to move the double arguments
4935 // to the integer registers, and take the results from the integer 4935 // to the integer registers, and take the results from the integer
4936 // registers. 4936 // registers.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
4969 return summary; 4969 return summary;
4970 } 4970 }
4971 4971
4972 4972
4973 void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4973 void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4974 ASSERT((op_kind() == MethodRecognizer::kMathMin) || 4974 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4975 (op_kind() == MethodRecognizer::kMathMax)); 4975 (op_kind() == MethodRecognizer::kMathMax));
4976 const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin); 4976 const intptr_t is_min = (op_kind() == MethodRecognizer::kMathMin);
4977 if (result_cid() == kDoubleCid) { 4977 if (result_cid() == kDoubleCid) {
4978 Label done, returns_nan, are_equal; 4978 Label done, returns_nan, are_equal;
4979 DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg()); 4979 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
4980 DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg()); 4980 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
4981 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 4981 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4982 Register temp = locs()->temp(0).reg(); 4982 const Register temp = locs()->temp(0).reg();
4983 __ vcmpd(left, right); 4983 __ vcmpd(left, right);
4984 __ vmstat(); 4984 __ vmstat();
4985 __ b(&returns_nan, VS); 4985 __ b(&returns_nan, VS);
4986 __ b(&are_equal, EQ); 4986 __ b(&are_equal, EQ);
4987 const Condition neg_double_condition = 4987 const Condition neg_double_condition =
4988 is_min ? TokenKindToDoubleCondition(Token::kGTE) 4988 is_min ? TokenKindToDoubleCondition(Token::kGTE)
4989 : TokenKindToDoubleCondition(Token::kLTE); 4989 : TokenKindToDoubleCondition(Token::kLTE);
4990 ASSERT(left == result); 4990 ASSERT(left == result);
4991 __ vmovd(result, right, neg_double_condition); 4991 __ vmovd(result, right, neg_double_condition);
4992 __ b(&done); 4992 __ b(&done);
(...skipping 16 matching lines...) Expand all
5009 __ vmovd(result, right, GE); 5009 __ vmovd(result, right, GE);
5010 } else { 5010 } else {
5011 __ vmovd(result, right, LT); 5011 __ vmovd(result, right, LT);
5012 ASSERT(left == result); 5012 ASSERT(left == result);
5013 } 5013 }
5014 __ Bind(&done); 5014 __ Bind(&done);
5015 return; 5015 return;
5016 } 5016 }
5017 5017
5018 ASSERT(result_cid() == kSmiCid); 5018 ASSERT(result_cid() == kSmiCid);
5019 Register left = locs()->in(0).reg(); 5019 const Register left = locs()->in(0).reg();
5020 Register right = locs()->in(1).reg(); 5020 const Register right = locs()->in(1).reg();
5021 Register result = locs()->out(0).reg(); 5021 const Register result = locs()->out(0).reg();
5022 __ cmp(left, ShifterOperand(right)); 5022 __ cmp(left, ShifterOperand(right));
5023 ASSERT(result == left); 5023 ASSERT(result == left);
5024 if (is_min) { 5024 if (is_min) {
5025 __ mov(result, ShifterOperand(right), GT); 5025 __ mov(result, ShifterOperand(right), GT);
5026 } else { 5026 } else {
5027 __ mov(result, ShifterOperand(right), LT); 5027 __ mov(result, ShifterOperand(right), LT);
5028 } 5028 }
5029 } 5029 }
5030 5030
5031 5031
5032 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(bool opt) const { 5032 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(bool opt) const {
5033 const intptr_t kNumInputs = 1; 5033 const intptr_t kNumInputs = 1;
5034 const intptr_t kNumTemps = 0; 5034 const intptr_t kNumTemps = 0;
5035 LocationSummary* summary = 5035 LocationSummary* summary =
5036 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5036 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5037 summary->set_in(0, Location::RequiresRegister()); 5037 summary->set_in(0, Location::RequiresRegister());
5038 // We make use of 3-operand instructions by not requiring result register 5038 // We make use of 3-operand instructions by not requiring result register
5039 // to be identical to first input register as on Intel. 5039 // to be identical to first input register as on Intel.
5040 summary->set_out(0, Location::RequiresRegister()); 5040 summary->set_out(0, Location::RequiresRegister());
5041 return summary; 5041 return summary;
5042 } 5042 }
5043 5043
5044 5044
5045 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5045 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5046 Register value = locs()->in(0).reg(); 5046 const Register value = locs()->in(0).reg();
5047 Register result = locs()->out(0).reg(); 5047 const Register result = locs()->out(0).reg();
5048 switch (op_kind()) { 5048 switch (op_kind()) {
5049 case Token::kNEGATE: { 5049 case Token::kNEGATE: {
5050 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); 5050 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
5051 __ rsbs(result, value, ShifterOperand(0)); 5051 __ rsbs(result, value, ShifterOperand(0));
5052 __ b(deopt, VS); 5052 __ b(deopt, VS);
5053 break; 5053 break;
5054 } 5054 }
5055 case Token::kBIT_NOT: 5055 case Token::kBIT_NOT:
5056 __ mvn(result, ShifterOperand(value)); 5056 __ mvn(result, ShifterOperand(value));
5057 // Remove inverted smi-tag. 5057 // Remove inverted smi-tag.
(...skipping 10 matching lines...) Expand all
5068 const intptr_t kNumTemps = 0; 5068 const intptr_t kNumTemps = 0;
5069 LocationSummary* summary = 5069 LocationSummary* summary =
5070 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5070 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5071 summary->set_in(0, Location::RequiresFpuRegister()); 5071 summary->set_in(0, Location::RequiresFpuRegister());
5072 summary->set_out(0, Location::RequiresFpuRegister()); 5072 summary->set_out(0, Location::RequiresFpuRegister());
5073 return summary; 5073 return summary;
5074 } 5074 }
5075 5075
5076 5076
5077 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5077 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5078 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 5078 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5079 DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg()); 5079 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5080 __ vnegd(result, value); 5080 __ vnegd(result, value);
5081 } 5081 }
5082 5082
5083 5083
5084 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(bool opt) const { 5084 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(bool opt) const {
5085 const intptr_t kNumInputs = 1; 5085 const intptr_t kNumInputs = 1;
5086 const intptr_t kNumTemps = 0; 5086 const intptr_t kNumTemps = 0;
5087 LocationSummary* result = 5087 LocationSummary* result =
5088 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5088 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5089 result->set_in(0, Location::WritableRegister()); 5089 result->set_in(0, Location::WritableRegister());
5090 result->set_out(0, Location::RequiresFpuRegister()); 5090 result->set_out(0, Location::RequiresFpuRegister());
5091 return result; 5091 return result;
5092 } 5092 }
5093 5093
5094 5094
5095 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5095 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5096 Register value = locs()->in(0).reg(); 5096 const Register value = locs()->in(0).reg();
5097 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 5097 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5098 __ SmiUntag(value); 5098 __ SmiUntag(value);
5099 __ vmovsr(STMP, value); 5099 __ vmovsr(STMP, value);
5100 __ vcvtdi(result, STMP); 5100 __ vcvtdi(result, STMP);
5101 } 5101 }
5102 5102
5103 5103
5104 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(bool opt) const { 5104 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(bool opt) const {
5105 const intptr_t kNumInputs = 1; 5105 const intptr_t kNumInputs = 1;
5106 const intptr_t kNumTemps = 0; 5106 const intptr_t kNumTemps = 0;
5107 LocationSummary* result = 5107 LocationSummary* result =
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
5200 LocationSummary* result = 5200 LocationSummary* result =
5201 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5201 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5202 // Low (<= Q7) Q registers are needed for the conversion instructions. 5202 // Low (<= Q7) Q registers are needed for the conversion instructions.
5203 result->set_in(0, Location::RequiresFpuRegister()); 5203 result->set_in(0, Location::RequiresFpuRegister());
5204 result->set_out(0, Location::FpuRegisterLocation(Q7)); 5204 result->set_out(0, Location::FpuRegisterLocation(Q7));
5205 return result; 5205 return result;
5206 } 5206 }
5207 5207
5208 5208
5209 void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5209 void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5210 DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg()); 5210 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5211 SRegister result = EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg())); 5211 const SRegister result =
5212 EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
5212 __ vcvtsd(result, value); 5213 __ vcvtsd(result, value);
5213 } 5214 }
5214 5215
5215 5216
5216 LocationSummary* FloatToDoubleInstr::MakeLocationSummary(bool opt) const { 5217 LocationSummary* FloatToDoubleInstr::MakeLocationSummary(bool opt) const {
5217 const intptr_t kNumInputs = 1; 5218 const intptr_t kNumInputs = 1;
5218 const intptr_t kNumTemps = 0; 5219 const intptr_t kNumTemps = 0;
5219 LocationSummary* result = 5220 LocationSummary* result =
5220 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5221 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5221 // Low (<= Q7) Q registers are needed for the conversion instructions. 5222 // Low (<= Q7) Q registers are needed for the conversion instructions.
5222 result->set_in(0, Location::FpuRegisterLocation(Q7)); 5223 result->set_in(0, Location::FpuRegisterLocation(Q7));
5223 result->set_out(0, Location::RequiresFpuRegister()); 5224 result->set_out(0, Location::RequiresFpuRegister());
5224 return result; 5225 return result;
5225 } 5226 }
5226 5227
5227 5228
5228 void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5229 void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5229 SRegister value = EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg())); 5230 const SRegister value =
5230 DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg()); 5231 EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
5232 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5231 __ vcvtds(result, value); 5233 __ vcvtds(result, value);
5232 } 5234 }
5233 5235
5234 5236
5235 LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(bool opt) const { 5237 LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(bool opt) const {
5236 ASSERT((InputCount() == 1) || (InputCount() == 2)); 5238 ASSERT((InputCount() == 1) || (InputCount() == 2));
5237 const intptr_t kNumTemps = 0; 5239 const intptr_t kNumTemps = 0;
5238 LocationSummary* result = 5240 LocationSummary* result =
5239 new LocationSummary(InputCount(), kNumTemps, LocationSummary::kCall); 5241 new LocationSummary(InputCount(), kNumTemps, LocationSummary::kCall);
5240 result->set_in(0, Location::FpuRegisterLocation(Q0)); 5242 result->set_in(0, Location::FpuRegisterLocation(Q0));
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
5445 } 5447 }
5446 return summary; 5448 return summary;
5447 } 5449 }
5448 5450
5449 5451
5450 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5452 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5451 ASSERT(locs()->in(0).IsPairLocation()); 5453 ASSERT(locs()->in(0).IsPairLocation());
5452 PairLocation* pair = locs()->in(0).AsPairLocation(); 5454 PairLocation* pair = locs()->in(0).AsPairLocation();
5453 Location in_loc = pair->At(index()); 5455 Location in_loc = pair->At(index());
5454 if (representation() == kUnboxedDouble) { 5456 if (representation() == kUnboxedDouble) {
5455 QRegister out = locs()->out(0).fpu_reg(); 5457 const QRegister out = locs()->out(0).fpu_reg();
5456 QRegister in = in_loc.fpu_reg(); 5458 const QRegister in = in_loc.fpu_reg();
5457 __ vmovq(out, in); 5459 __ vmovq(out, in);
5458 } else { 5460 } else {
5459 ASSERT(representation() == kTagged); 5461 ASSERT(representation() == kTagged);
5460 Register out = locs()->out(0).reg(); 5462 const Register out = locs()->out(0).reg();
5461 Register in = in_loc.reg(); 5463 const Register in = in_loc.reg();
5462 __ mov(out, ShifterOperand(in)); 5464 __ mov(out, ShifterOperand(in));
5463 } 5465 }
5464 } 5466 }
5465 5467
5466 5468
5467 LocationSummary* MergedMathInstr::MakeLocationSummary(bool opt) const { 5469 LocationSummary* MergedMathInstr::MakeLocationSummary(bool opt) const {
5468 if (kind() == MergedMathInstr::kTruncDivMod) { 5470 if (kind() == MergedMathInstr::kTruncDivMod) {
5469 const intptr_t kNumInputs = 2; 5471 const intptr_t kNumInputs = 2;
5470 const intptr_t kNumTemps = 2; 5472 const intptr_t kNumTemps = 2;
5471 LocationSummary* summary = 5473 LocationSummary* summary =
(...skipping 11 matching lines...) Expand all
5483 return NULL; 5485 return NULL;
5484 } 5486 }
5485 5487
5486 5488
5487 void MergedMathInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5489 void MergedMathInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5488 Label* deopt = NULL; 5490 Label* deopt = NULL;
5489 if (CanDeoptimize()) { 5491 if (CanDeoptimize()) {
5490 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp); 5492 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5491 } 5493 }
5492 if (kind() == MergedMathInstr::kTruncDivMod) { 5494 if (kind() == MergedMathInstr::kTruncDivMod) {
5493 Register left = locs()->in(0).reg(); 5495 const Register left = locs()->in(0).reg();
5494 Register right = locs()->in(1).reg(); 5496 const Register right = locs()->in(1).reg();
5495 ASSERT(locs()->out(0).IsPairLocation()); 5497 ASSERT(locs()->out(0).IsPairLocation());
5496 PairLocation* pair = locs()->out(0).AsPairLocation(); 5498 PairLocation* pair = locs()->out(0).AsPairLocation();
5497 Register result_div = pair->At(0).reg(); 5499 const Register result_div = pair->At(0).reg();
5498 Register result_mod = pair->At(1).reg(); 5500 const Register result_mod = pair->At(1).reg();
5499 Range* right_range = InputAt(1)->definition()->range(); 5501 Range* right_range = InputAt(1)->definition()->range();
5500 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 5502 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
5501 // Handle divide by zero in runtime. 5503 // Handle divide by zero in runtime.
5502 __ cmp(right, ShifterOperand(0)); 5504 __ cmp(right, ShifterOperand(0));
5503 __ b(deopt, EQ); 5505 __ b(deopt, EQ);
5504 } 5506 }
5505 Register temp = locs()->temp(0).reg(); 5507 const Register temp = locs()->temp(0).reg();
5506 DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 5508 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
5507 5509
5508 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 5510 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
5509 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 5511 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
5510 5512
5511 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP); 5513 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
5512 5514
5513 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 5515 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5514 // case we cannot tag the result. 5516 // case we cannot tag the result.
5515 __ CompareImmediate(result_div, 0x40000000); 5517 __ CompareImmediate(result_div, 0x40000000);
5516 __ b(deopt, EQ); 5518 __ b(deopt, EQ);
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
5622 if (IsNullCheck()) { 5624 if (IsNullCheck()) {
5623 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason); 5625 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
5624 __ CompareImmediate(locs()->in(0).reg(), 5626 __ CompareImmediate(locs()->in(0).reg(),
5625 reinterpret_cast<intptr_t>(Object::null())); 5627 reinterpret_cast<intptr_t>(Object::null()));
5626 __ b(deopt, EQ); 5628 __ b(deopt, EQ);
5627 return; 5629 return;
5628 } 5630 }
5629 5631
5630 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) || 5632 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) ||
5631 (unary_checks().NumberOfChecks() > 1)); 5633 (unary_checks().NumberOfChecks() > 1));
5632 Register value = locs()->in(0).reg(); 5634 const Register value = locs()->in(0).reg();
5633 Register temp = locs()->temp(0).reg(); 5635 const Register temp = locs()->temp(0).reg();
5634 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason); 5636 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
5635 Label is_ok; 5637 Label is_ok;
5636 intptr_t cix = 0; 5638 intptr_t cix = 0;
5637 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) { 5639 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) {
5638 __ tst(value, ShifterOperand(kSmiTagMask)); 5640 __ tst(value, ShifterOperand(kSmiTagMask));
5639 __ b(&is_ok, EQ); 5641 __ b(&is_ok, EQ);
5640 cix++; // Skip first check. 5642 cix++; // Skip first check.
5641 } else { 5643 } else {
5642 __ tst(value, ShifterOperand(kSmiTagMask)); 5644 __ tst(value, ShifterOperand(kSmiTagMask));
5643 __ b(deopt, EQ); 5645 __ b(deopt, EQ);
(...skipping 17 matching lines...) Expand all
5661 const intptr_t kNumInputs = 1; 5663 const intptr_t kNumInputs = 1;
5662 const intptr_t kNumTemps = 0; 5664 const intptr_t kNumTemps = 0;
5663 LocationSummary* summary = 5665 LocationSummary* summary =
5664 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5666 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5665 summary->set_in(0, Location::RequiresRegister()); 5667 summary->set_in(0, Location::RequiresRegister());
5666 return summary; 5668 return summary;
5667 } 5669 }
5668 5670
5669 5671
5670 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5672 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5671 Register value = locs()->in(0).reg(); 5673 const Register value = locs()->in(0).reg();
5672 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi); 5674 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5673 __ tst(value, ShifterOperand(kSmiTagMask)); 5675 __ tst(value, ShifterOperand(kSmiTagMask));
5674 __ b(deopt, NE); 5676 __ b(deopt, NE);
5675 } 5677 }
5676 5678
5677 5679
5678 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(bool opt) const { 5680 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(bool opt) const {
5679 const intptr_t kNumInputs = 2; 5681 const intptr_t kNumInputs = 2;
5680 const intptr_t kNumTemps = 0; 5682 const intptr_t kNumTemps = 0;
5681 LocationSummary* locs = 5683 LocationSummary* locs =
(...skipping 15 matching lines...) Expand all
5697 ASSERT((Smi::Cast(length_loc.constant()).Value() <= 5699 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5698 Smi::Cast(index_loc.constant()).Value()) || 5700 Smi::Cast(index_loc.constant()).Value()) ||
5699 (Smi::Cast(index_loc.constant()).Value() < 0)); 5701 (Smi::Cast(index_loc.constant()).Value() < 0));
5700 // Unconditionally deoptimize for constant bounds checks because they 5702 // Unconditionally deoptimize for constant bounds checks because they
5701 // only occur only when index is out-of-bounds. 5703 // only occur only when index is out-of-bounds.
5702 __ b(deopt); 5704 __ b(deopt);
5703 return; 5705 return;
5704 } 5706 }
5705 5707
5706 if (index_loc.IsConstant()) { 5708 if (index_loc.IsConstant()) {
5707 Register length = length_loc.reg(); 5709 const Register length = length_loc.reg();
5708 const Smi& index = Smi::Cast(index_loc.constant()); 5710 const Smi& index = Smi::Cast(index_loc.constant());
5709 __ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw())); 5711 __ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw()));
5710 __ b(deopt, LS); 5712 __ b(deopt, LS);
5711 } else if (length_loc.IsConstant()) { 5713 } else if (length_loc.IsConstant()) {
5712 const Smi& length = Smi::Cast(length_loc.constant()); 5714 const Smi& length = Smi::Cast(length_loc.constant());
5713 Register index = index_loc.reg(); 5715 const Register index = index_loc.reg();
5714 __ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw())); 5716 __ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw()));
5715 __ b(deopt, CS); 5717 __ b(deopt, CS);
5716 } else { 5718 } else {
5717 Register length = length_loc.reg(); 5719 const Register length = length_loc.reg();
5718 Register index = index_loc.reg(); 5720 const Register index = index_loc.reg();
5719 __ cmp(index, ShifterOperand(length)); 5721 __ cmp(index, ShifterOperand(length));
5720 __ b(deopt, CS); 5722 __ b(deopt, CS);
5721 } 5723 }
5722 } 5724 }
5723 5725
5724 5726
5725 static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler, 5727 static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler,
5726 Label* overflow, 5728 Label* overflow,
5727 QRegister result, 5729 QRegister result,
5728 Register tmp_hi, Register tmp_lo) { 5730 Register tmp_hi, Register tmp_lo) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
5767 const intptr_t value_cid = value()->Type()->ToCid(); 5769 const intptr_t value_cid = value()->Type()->ToCid();
5768 const Register value = locs()->in(0).reg(); 5770 const Register value = locs()->in(0).reg();
5769 const QRegister result = locs()->out(0).fpu_reg(); 5771 const QRegister result = locs()->out(0).fpu_reg();
5770 5772
5771 __ Comment("UnboxIntegerInstr"); 5773 __ Comment("UnboxIntegerInstr");
5772 __ veorq(result, result, result); 5774 __ veorq(result, result, result);
5773 if (value_cid == kMintCid) { 5775 if (value_cid == kMintCid) {
5774 __ LoadDFromOffset(EvenDRegisterOf(result), value, 5776 __ LoadDFromOffset(EvenDRegisterOf(result), value,
5775 Mint::value_offset() - kHeapObjectTag); 5777 Mint::value_offset() - kHeapObjectTag);
5776 } else if (value_cid == kSmiCid) { 5778 } else if (value_cid == kSmiCid) {
5777 Register temp = locs()->temp(0).reg(); 5779 const Register temp = locs()->temp(0).reg();
5778 __ SmiUntag(value); 5780 __ SmiUntag(value);
5779 // Sign extend value into temp. 5781 // Sign extend value into temp.
5780 __ Asr(temp, value, 31); 5782 __ Asr(temp, value, 31);
5781 __ vmovdrr(EvenDRegisterOf(result), value, temp); 5783 __ vmovdrr(EvenDRegisterOf(result), value, temp);
5782 } else { 5784 } else {
5783 Register temp = locs()->temp(0).reg(); 5785 const Register temp = locs()->temp(0).reg();
5784 Label* deopt = compiler->AddDeoptStub(deopt_id_, 5786 Label* deopt = compiler->AddDeoptStub(deopt_id_,
5785 ICData::kDeoptUnboxInteger); 5787 ICData::kDeoptUnboxInteger);
5786 Label is_smi, done; 5788 Label is_smi, done;
5787 __ tst(value, ShifterOperand(kSmiTagMask)); 5789 __ tst(value, ShifterOperand(kSmiTagMask));
5788 __ b(&is_smi, EQ); 5790 __ b(&is_smi, EQ);
5789 __ CompareClassId(value, kMintCid, temp); 5791 __ CompareClassId(value, kMintCid, temp);
5790 __ b(deopt, NE); 5792 __ b(deopt, NE);
5791 5793
5792 // It's a Mint. 5794 // It's a Mint.
5793 __ LoadDFromOffset(EvenDRegisterOf(result), value, 5795 __ LoadDFromOffset(EvenDRegisterOf(result), value,
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
5850 5852
5851 private: 5853 private:
5852 BoxIntegerInstr* instruction_; 5854 BoxIntegerInstr* instruction_;
5853 }; 5855 };
5854 5856
5855 5857
5856 void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5858 void BoxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5857 BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this); 5859 BoxIntegerSlowPath* slow_path = new BoxIntegerSlowPath(this);
5858 compiler->AddSlowPathCode(slow_path); 5860 compiler->AddSlowPathCode(slow_path);
5859 5861
5860 Register out_reg = locs()->out(0).reg(); 5862 const Register out_reg = locs()->out(0).reg();
5861 QRegister value = locs()->in(0).fpu_reg(); 5863 const QRegister value = locs()->in(0).fpu_reg();
5862 DRegister dvalue0 = EvenDRegisterOf(value); 5864 const DRegister dvalue0 = EvenDRegisterOf(value);
5863 Register lo = locs()->temp(0).reg(); 5865 const Register lo = locs()->temp(0).reg();
5864 Register hi = locs()->temp(1).reg(); 5866 const Register hi = locs()->temp(1).reg();
5865 5867
5866 // Unboxed operations produce smis or mint-sized values. 5868 // Unboxed operations produce smis or mint-sized values.
5867 // Check if value fits into a smi. 5869 // Check if value fits into a smi.
5868 __ Comment("BoxIntegerInstr"); 5870 __ Comment("BoxIntegerInstr");
5869 Label not_smi, done, maybe_pos_smi, maybe_neg_smi, is_smi; 5871 Label not_smi, done, maybe_pos_smi, maybe_neg_smi, is_smi;
5870 __ vmovrrd(lo, hi, dvalue0); 5872 __ vmovrrd(lo, hi, dvalue0);
5871 __ CompareImmediate(hi, 0); 5873 __ CompareImmediate(hi, 0);
5872 __ b(&maybe_pos_smi, EQ); 5874 __ b(&maybe_pos_smi, EQ);
5873 5875
5874 __ CompareImmediate(hi, -1); 5876 __ CompareImmediate(hi, -1);
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
5921 // Need another temp for checking for overflow. 5923 // Need another temp for checking for overflow.
5922 summary->AddTemp(Location::RequiresFpuRegister()); 5924 summary->AddTemp(Location::RequiresFpuRegister());
5923 summary->AddTemp(Location::FpuRegisterLocation(Q7)); 5925 summary->AddTemp(Location::FpuRegisterLocation(Q7));
5924 } 5926 }
5925 summary->set_out(0, Location::RequiresFpuRegister()); 5927 summary->set_out(0, Location::RequiresFpuRegister());
5926 return summary; 5928 return summary;
5927 } 5929 }
5928 5930
5929 5931
5930 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5932 void BinaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5931 QRegister left = locs()->in(0).fpu_reg(); 5933 const QRegister left = locs()->in(0).fpu_reg();
5932 QRegister right = locs()->in(1).fpu_reg(); 5934 const QRegister right = locs()->in(1).fpu_reg();
5933 QRegister out = locs()->out(0).fpu_reg(); 5935 const QRegister out = locs()->out(0).fpu_reg();
5934 5936
5935 Label* deopt = NULL; 5937 Label* deopt = NULL;
5936 if (FLAG_throw_on_javascript_int_overflow) { 5938 if (FLAG_throw_on_javascript_int_overflow) {
5937 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); 5939 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
5938 } 5940 }
5939 switch (op_kind()) { 5941 switch (op_kind()) {
5940 case Token::kBIT_AND: __ vandq(out, left, right); break; 5942 case Token::kBIT_AND: __ vandq(out, left, right); break;
5941 case Token::kBIT_OR: __ vorrq(out, left, right); break; 5943 case Token::kBIT_OR: __ vorrq(out, left, right); break;
5942 case Token::kBIT_XOR: __ veorq(out, left, right); break; 5944 case Token::kBIT_XOR: __ veorq(out, left, right); break;
5943 case Token::kADD: 5945 case Token::kADD:
5944 case Token::kSUB: { 5946 case Token::kSUB: {
5945 const intptr_t tmpidx = FLAG_throw_on_javascript_int_overflow ? 2 : 0; 5947 const intptr_t tmpidx = FLAG_throw_on_javascript_int_overflow ? 2 : 0;
5946 QRegister tmp = locs()->temp(tmpidx).fpu_reg(); 5948 const QRegister tmp = locs()->temp(tmpidx).fpu_reg();
5947 QRegister ro = locs()->temp(tmpidx + 1).fpu_reg(); 5949 const QRegister ro = locs()->temp(tmpidx + 1).fpu_reg();
5948 ASSERT(ro == Q7); 5950 ASSERT(ro == Q7);
5949 if (!FLAG_throw_on_javascript_int_overflow) { 5951 if (!FLAG_throw_on_javascript_int_overflow) {
5950 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); 5952 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
5951 } 5953 }
5952 if (op_kind() == Token::kADD) { 5954 if (op_kind() == Token::kADD) {
5953 __ vaddqi(kWordPair, out, left, right); 5955 __ vaddqi(kWordPair, out, left, right);
5954 } else { 5956 } else {
5955 ASSERT(op_kind() == Token::kSUB); 5957 ASSERT(op_kind() == Token::kSUB);
5956 __ vsubqi(kWordPair, out, left, right); 5958 __ vsubqi(kWordPair, out, left, right);
5957 } 5959 }
5958 __ veorq(ro, out, left); 5960 __ veorq(ro, out, left);
5959 __ veorq(tmp, left, right); 5961 __ veorq(tmp, left, right);
5960 __ vandq(ro, tmp, ro); 5962 __ vandq(ro, tmp, ro);
5961 __ vmovrs(TMP, OddSRegisterOf(EvenDRegisterOf(ro))); 5963 __ vmovrs(TMP, OddSRegisterOf(EvenDRegisterOf(ro)));
5962 // If TMP < 0, there was overflow. 5964 // If TMP < 0, there was overflow.
5963 __ cmp(TMP, ShifterOperand(0)); 5965 __ cmp(TMP, ShifterOperand(0));
5964 __ b(deopt, LT); 5966 __ b(deopt, LT);
5965 break; 5967 break;
5966 } 5968 }
5967 default: UNREACHABLE(); break; 5969 default: UNREACHABLE(); break;
5968 } 5970 }
5969 if (FLAG_throw_on_javascript_int_overflow) { 5971 if (FLAG_throw_on_javascript_int_overflow) {
5970 Register tmp1 = locs()->temp(0).reg(); 5972 const Register tmp1 = locs()->temp(0).reg();
5971 Register tmp2 = locs()->temp(1).reg(); 5973 const Register tmp2 = locs()->temp(1).reg();
5972 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2); 5974 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
5973 } 5975 }
5974 } 5976 }
5975 5977
5976 5978
5977 LocationSummary* ShiftMintOpInstr::MakeLocationSummary(bool opt) const { 5979 LocationSummary* ShiftMintOpInstr::MakeLocationSummary(bool opt) const {
5978 const intptr_t kNumInputs = 2; 5980 const intptr_t kNumInputs = 2;
5979 const intptr_t kNumTemps = 5981 const intptr_t kNumTemps =
5980 FLAG_throw_on_javascript_int_overflow ? 2 : 1; 5982 FLAG_throw_on_javascript_int_overflow ? 2 : 1;
5981 LocationSummary* summary = 5983 LocationSummary* summary =
5982 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 5984 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
5983 summary->set_in(0, Location::RequiresFpuRegister()); 5985 summary->set_in(0, Location::RequiresFpuRegister());
5984 summary->set_in(1, Location::WritableRegister()); 5986 summary->set_in(1, Location::WritableRegister());
5985 summary->set_temp(0, Location::FpuRegisterLocation(Q7)); 5987 summary->set_temp(0, Location::FpuRegisterLocation(Q7));
5986 if (FLAG_throw_on_javascript_int_overflow) { 5988 if (FLAG_throw_on_javascript_int_overflow) {
5987 summary->set_temp(1, Location::RequiresRegister()); 5989 summary->set_temp(1, Location::RequiresRegister());
5988 } 5990 }
5989 summary->set_out(0, Location::RequiresFpuRegister()); 5991 summary->set_out(0, Location::RequiresFpuRegister());
5990 return summary; 5992 return summary;
5991 } 5993 }
5992 5994
5993 5995
5994 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5996 void ShiftMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5995 QRegister value = locs()->in(0).fpu_reg(); 5997 const QRegister value = locs()->in(0).fpu_reg();
5996 Register shift = locs()->in(1).reg(); 5998 const Register shift = locs()->in(1).reg();
5997 QRegister temp = locs()->temp(0).fpu_reg(); 5999 const QRegister temp = locs()->temp(0).fpu_reg();
5998 ASSERT(temp == Q7); 6000 ASSERT(temp == Q7);
5999 QRegister out = locs()->out(0).fpu_reg(); 6001 const QRegister out = locs()->out(0).fpu_reg();
6000 DRegister dtemp0 = EvenDRegisterOf(temp); 6002 const DRegister dtemp0 = EvenDRegisterOf(temp);
6001 SRegister stemp0 = EvenSRegisterOf(dtemp0); 6003 const SRegister stemp0 = EvenSRegisterOf(dtemp0);
6002 SRegister stemp1 = OddSRegisterOf(dtemp0); 6004 const SRegister stemp1 = OddSRegisterOf(dtemp0);
6003 6005
6004 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp); 6006 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp);
6005 Label done; 6007 Label done;
6006 6008
6007 __ CompareImmediate(shift, 0); 6009 __ CompareImmediate(shift, 0);
6008 __ vmovq(out, value); 6010 __ vmovq(out, value);
6009 __ b(&done, EQ); 6011 __ b(&done, EQ);
6010 __ SmiUntag(shift); 6012 __ SmiUntag(shift);
6011 6013
6012 // vshlq takes the shift value from low byte. Deopt if shift is 6014 // vshlq takes the shift value from low byte. Deopt if shift is
(...skipping 30 matching lines...) Expand all
6043 __ b(deopt, NE); 6045 __ b(deopt, NE);
6044 break; 6046 break;
6045 } 6047 }
6046 default: 6048 default:
6047 UNREACHABLE(); 6049 UNREACHABLE();
6048 break; 6050 break;
6049 } 6051 }
6050 6052
6051 __ Bind(&done); 6053 __ Bind(&done);
6052 if (FLAG_throw_on_javascript_int_overflow) { 6054 if (FLAG_throw_on_javascript_int_overflow) {
6053 Register tmp1 = locs()->in(1).reg(); 6055 const Register tmp1 = locs()->in(1).reg();
6054 Register tmp2 = locs()->temp(1).reg(); 6056 const Register tmp2 = locs()->temp(1).reg();
6055 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2); 6057 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
6056 } 6058 }
6057 } 6059 }
6058 6060
6059 6061
6060 LocationSummary* UnaryMintOpInstr::MakeLocationSummary(bool opt) const { 6062 LocationSummary* UnaryMintOpInstr::MakeLocationSummary(bool opt) const {
6061 const intptr_t kNumInputs = 1; 6063 const intptr_t kNumInputs = 1;
6062 const intptr_t kNumTemps = 6064 const intptr_t kNumTemps =
6063 FLAG_throw_on_javascript_int_overflow ? 2 : 0; 6065 FLAG_throw_on_javascript_int_overflow ? 2 : 0;
6064 LocationSummary* summary = 6066 LocationSummary* summary =
6065 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 6067 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
6066 summary->set_in(0, Location::RequiresFpuRegister()); 6068 summary->set_in(0, Location::RequiresFpuRegister());
6067 summary->set_out(0, Location::RequiresFpuRegister()); 6069 summary->set_out(0, Location::RequiresFpuRegister());
6068 if (FLAG_throw_on_javascript_int_overflow) { 6070 if (FLAG_throw_on_javascript_int_overflow) {
6069 summary->set_temp(0, Location::RequiresRegister()); 6071 summary->set_temp(0, Location::RequiresRegister());
6070 summary->set_temp(1, Location::RequiresRegister()); 6072 summary->set_temp(1, Location::RequiresRegister());
6071 } 6073 }
6072 return summary; 6074 return summary;
6073 } 6075 }
6074 6076
6075 6077
6076 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6078 void UnaryMintOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6077 ASSERT(op_kind() == Token::kBIT_NOT); 6079 ASSERT(op_kind() == Token::kBIT_NOT);
6078 QRegister value = locs()->in(0).fpu_reg(); 6080 const QRegister value = locs()->in(0).fpu_reg();
6079 QRegister out = locs()->out(0).fpu_reg(); 6081 const QRegister out = locs()->out(0).fpu_reg();
6080 Label* deopt = NULL; 6082 Label* deopt = NULL;
6081 if (FLAG_throw_on_javascript_int_overflow) { 6083 if (FLAG_throw_on_javascript_int_overflow) {
6082 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp); 6084 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp);
6083 } 6085 }
6084 __ vmvnq(out, value); 6086 __ vmvnq(out, value);
6085 if (FLAG_throw_on_javascript_int_overflow) { 6087 if (FLAG_throw_on_javascript_int_overflow) {
6086 Register tmp1 = locs()->temp(0).reg(); 6088 const Register tmp1 = locs()->temp(0).reg();
6087 Register tmp2 = locs()->temp(1).reg(); 6089 const Register tmp2 = locs()->temp(1).reg();
6088 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2); 6090 EmitJavascriptIntOverflowCheck(compiler, deopt, out, tmp1, tmp2);
6089 } 6091 }
6090 } 6092 }
6091 6093
6092 6094
6093 LocationSummary* ThrowInstr::MakeLocationSummary(bool opt) const { 6095 LocationSummary* ThrowInstr::MakeLocationSummary(bool opt) const {
6094 return new LocationSummary(0, 0, LocationSummary::kCall); 6096 return new LocationSummary(0, 0, LocationSummary::kCall);
6095 } 6097 }
6096 6098
6097 6099
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
6237 } 6239 }
6238 6240
6239 6241
6240 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6242 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6241 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); 6243 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
6242 6244
6243 // The ARM code does not use true- and false-labels here. 6245 // The ARM code does not use true- and false-labels here.
6244 BranchLabels labels = { NULL, NULL, NULL }; 6246 BranchLabels labels = { NULL, NULL, NULL };
6245 Condition true_condition = EmitComparisonCode(compiler, labels); 6247 Condition true_condition = EmitComparisonCode(compiler, labels);
6246 6248
6247 Register result = locs()->out(0).reg(); 6249 const Register result = locs()->out(0).reg();
6248 __ LoadObject(result, Bool::True(), true_condition); 6250 __ LoadObject(result, Bool::True(), true_condition);
6249 __ LoadObject(result, Bool::False(), NegateCondition(true_condition)); 6251 __ LoadObject(result, Bool::False(), NegateCondition(true_condition));
6250 } 6252 }
6251 6253
6252 6254
6253 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler, 6255 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
6254 BranchInstr* branch) { 6256 BranchInstr* branch) {
6255 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); 6257 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
6256 6258
6257 BranchLabels labels = compiler->CreateBranchLabels(branch); 6259 BranchLabels labels = compiler->CreateBranchLabels(branch);
6258 Condition true_condition = EmitComparisonCode(compiler, labels); 6260 Condition true_condition = EmitComparisonCode(compiler, labels);
6259 EmitBranchOnCondition(compiler, true_condition, labels); 6261 EmitBranchOnCondition(compiler, true_condition, labels);
6260 } 6262 }
6261 6263
6262 6264
6263 LocationSummary* BooleanNegateInstr::MakeLocationSummary(bool opt) const { 6265 LocationSummary* BooleanNegateInstr::MakeLocationSummary(bool opt) const {
6264 return LocationSummary::Make(1, 6266 return LocationSummary::Make(1,
6265 Location::RequiresRegister(), 6267 Location::RequiresRegister(),
6266 LocationSummary::kNoCall); 6268 LocationSummary::kNoCall);
6267 } 6269 }
6268 6270
6269 6271
6270 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6272 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6271 Register value = locs()->in(0).reg(); 6273 const Register value = locs()->in(0).reg();
6272 Register result = locs()->out(0).reg(); 6274 const Register result = locs()->out(0).reg();
6273 6275
6274 __ LoadObject(result, Bool::True()); 6276 __ LoadObject(result, Bool::True());
6275 __ cmp(result, ShifterOperand(value)); 6277 __ cmp(result, ShifterOperand(value));
6276 __ LoadObject(result, Bool::False(), EQ); 6278 __ LoadObject(result, Bool::False(), EQ);
6277 } 6279 }
6278 6280
6279 6281
6280 LocationSummary* AllocateObjectInstr::MakeLocationSummary(bool opt) const { 6282 LocationSummary* AllocateObjectInstr::MakeLocationSummary(bool opt) const {
6281 return MakeCallSummary(); 6283 return MakeCallSummary();
6282 } 6284 }
6283 6285
6284 6286
6285 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6287 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6286 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls())); 6288 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls()));
6287 const ExternalLabel label(cls().ToCString(), stub.EntryPoint()); 6289 const ExternalLabel label(cls().ToCString(), stub.EntryPoint());
6288 compiler->GenerateCall(token_pos(), 6290 compiler->GenerateCall(token_pos(),
6289 &label, 6291 &label,
6290 PcDescriptors::kOther, 6292 PcDescriptors::kOther,
6291 locs()); 6293 locs());
6292 __ Drop(ArgumentCount()); // Discard arguments. 6294 __ Drop(ArgumentCount()); // Discard arguments.
6293 } 6295 }
6294 6296
6295 } // namespace dart 6297 } // namespace dart
6296 6298
6297 #endif // defined TARGET_ARCH_ARM 6299 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/flow_graph_compiler_arm64.cc ('k') | runtime/vm/intermediate_language_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698