Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(286)

Side by Side Diff: runtime/vm/intermediate_language_arm64.cc

Issue 293993013: Beings adding SIMD support to arm64. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6 #if defined(TARGET_ARCH_ARM64) 6 #if defined(TARGET_ARCH_ARM64)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/dart_entry.h" 10 #include "vm/dart_entry.h"
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
69 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 69 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
70 locs->set_in(0, Location::RegisterLocation(R0)); 70 locs->set_in(0, Location::RegisterLocation(R0));
71 return locs; 71 return locs;
72 } 72 }
73 73
74 74
75 // Attempt optimized compilation at return instruction instead of at the entry. 75 // Attempt optimized compilation at return instruction instead of at the entry.
76 // The entry needs to be patchable, no inlined objects are allowed in the area 76 // The entry needs to be patchable, no inlined objects are allowed in the area
77 // that will be overwritten by the patch instructions: a branch macro sequence. 77 // that will be overwritten by the patch instructions: a branch macro sequence.
78 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 78 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
79 Register result = locs()->in(0).reg(); 79 const Register result = locs()->in(0).reg();
80 ASSERT(result == R0); 80 ASSERT(result == R0);
81 #if defined(DEBUG) 81 #if defined(DEBUG)
82 Label stack_ok; 82 Label stack_ok;
83 __ Comment("Stack Check"); 83 __ Comment("Stack Check");
84 const intptr_t fp_sp_dist = 84 const intptr_t fp_sp_dist =
85 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; 85 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize;
86 ASSERT(fp_sp_dist <= 0); 86 ASSERT(fp_sp_dist <= 0);
87 // UXTX 0 on a 64-bit register (FP) is a nop, but forces R31 to be 87 // UXTX 0 on a 64-bit register (FP) is a nop, but forces R31 to be
88 // interpreted as SP. 88 // interpreted as SP.
89 __ sub(R2, SP, Operand(FP, UXTX, 0)); 89 __ sub(R2, SP, Operand(FP, UXTX, 0));
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
230 230
231 231
232 LocationSummary* LoadLocalInstr::MakeLocationSummary(bool opt) const { 232 LocationSummary* LoadLocalInstr::MakeLocationSummary(bool opt) const {
233 return LocationSummary::Make(0, 233 return LocationSummary::Make(0,
234 Location::RequiresRegister(), 234 Location::RequiresRegister(),
235 LocationSummary::kNoCall); 235 LocationSummary::kNoCall);
236 } 236 }
237 237
238 238
239 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 239 void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
240 Register result = locs()->out(0).reg(); 240 const Register result = locs()->out(0).reg();
241 __ LoadFromOffset(result, FP, local().index() * kWordSize, PP); 241 __ LoadFromOffset(result, FP, local().index() * kWordSize, PP);
242 } 242 }
243 243
244 244
245 LocationSummary* StoreLocalInstr::MakeLocationSummary(bool opt) const { 245 LocationSummary* StoreLocalInstr::MakeLocationSummary(bool opt) const {
246 return LocationSummary::Make(1, 246 return LocationSummary::Make(1,
247 Location::SameAsFirstInput(), 247 Location::SameAsFirstInput(),
248 LocationSummary::kNoCall); 248 LocationSummary::kNoCall);
249 } 249 }
250 250
251 251
252 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 252 void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
253 Register value = locs()->in(0).reg(); 253 const Register value = locs()->in(0).reg();
254 Register result = locs()->out(0).reg(); 254 const Register result = locs()->out(0).reg();
255 ASSERT(result == value); // Assert that register assignment is correct. 255 ASSERT(result == value); // Assert that register assignment is correct.
256 __ StoreToOffset(value, FP, local().index() * kWordSize, PP); 256 __ StoreToOffset(value, FP, local().index() * kWordSize, PP);
257 } 257 }
258 258
259 259
260 LocationSummary* ConstantInstr::MakeLocationSummary(bool opt) const { 260 LocationSummary* ConstantInstr::MakeLocationSummary(bool opt) const {
261 return LocationSummary::Make(0, 261 return LocationSummary::Make(0,
262 Location::RequiresRegister(), 262 Location::RequiresRegister(),
263 LocationSummary::kNoCall); 263 LocationSummary::kNoCall);
264 } 264 }
265 265
266 266
267 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 267 void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
268 // The register allocator drops constant definitions that have no uses. 268 // The register allocator drops constant definitions that have no uses.
269 if (!locs()->out(0).IsInvalid()) { 269 if (!locs()->out(0).IsInvalid()) {
270 Register result = locs()->out(0).reg(); 270 const Register result = locs()->out(0).reg();
271 __ LoadObject(result, value(), PP); 271 __ LoadObject(result, value(), PP);
272 } 272 }
273 } 273 }
274 274
275 275
276 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(bool opt) const { 276 LocationSummary* UnboxedConstantInstr::MakeLocationSummary(bool opt) const {
277 const intptr_t kNumInputs = 0; 277 const intptr_t kNumInputs = 0;
278 return LocationSummary::Make(kNumInputs, 278 return LocationSummary::Make(kNumInputs,
279 Location::RequiresFpuRegister(), 279 Location::RequiresFpuRegister(),
280 LocationSummary::kNoCall); 280 LocationSummary::kNoCall);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
333 kNonBoolTypeErrorRuntimeEntry, 333 kNonBoolTypeErrorRuntimeEntry,
334 1, 334 1,
335 locs); 335 locs);
336 // We should never return here. 336 // We should never return here.
337 __ hlt(0); 337 __ hlt(0);
338 __ Bind(&done); 338 __ Bind(&done);
339 } 339 }
340 340
341 341
342 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 342 void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
343 Register obj = locs()->in(0).reg(); 343 const Register obj = locs()->in(0).reg();
344 Register result = locs()->out(0).reg(); 344 const Register result = locs()->out(0).reg();
345 345
346 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler); 346 EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
347 ASSERT(obj == result); 347 ASSERT(obj == result);
348 } 348 }
349 349
350 350
351 static Condition TokenKindToSmiCondition(Token::Kind kind) { 351 static Condition TokenKindToSmiCondition(Token::Kind kind) {
352 switch (kind) { 352 switch (kind) {
353 case Token::kEQ: return EQ; 353 case Token::kEQ: return EQ;
354 case Token::kNE: return NE; 354 case Token::kNE: return NE;
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
495 BranchLabels labels = { &is_true, &is_false, &is_false }; 495 BranchLabels labels = { &is_true, &is_false, &is_false };
496 Condition true_condition = EmitComparisonCode(compiler, labels); 496 Condition true_condition = EmitComparisonCode(compiler, labels);
497 if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { 497 if ((operation_cid() == kDoubleCid) && (true_condition != NE)) {
498 // Special case for NaN comparison. Result is always false unless 498 // Special case for NaN comparison. Result is always false unless
499 // relational operator is !=. 499 // relational operator is !=.
500 __ b(&is_false, VS); 500 __ b(&is_false, VS);
501 } 501 }
502 EmitBranchOnCondition(compiler, true_condition, labels); 502 EmitBranchOnCondition(compiler, true_condition, labels);
503 // TODO(zra): instead of branching, use the csel instruction to get 503 // TODO(zra): instead of branching, use the csel instruction to get
504 // True or False into result. 504 // True or False into result.
505 Register result = locs()->out(0).reg(); 505 const Register result = locs()->out(0).reg();
506 Label done; 506 Label done;
507 __ Bind(&is_false); 507 __ Bind(&is_false);
508 __ LoadObject(result, Bool::False(), PP); 508 __ LoadObject(result, Bool::False(), PP);
509 __ b(&done); 509 __ b(&done);
510 __ Bind(&is_true); 510 __ Bind(&is_true);
511 __ LoadObject(result, Bool::True(), PP); 511 __ LoadObject(result, Bool::True(), PP);
512 __ Bind(&done); 512 __ Bind(&done);
513 } 513 }
514 514
515 515
(...skipping 20 matching lines...) Expand all
536 locs->set_in(0, Location::RequiresRegister()); 536 locs->set_in(0, Location::RequiresRegister());
537 // Only one input can be a constant operand. The case of two constant 537 // Only one input can be a constant operand. The case of two constant
538 // operands should be handled by constant propagation. 538 // operands should be handled by constant propagation.
539 locs->set_in(1, Location::RegisterOrConstant(right())); 539 locs->set_in(1, Location::RegisterOrConstant(right()));
540 return locs; 540 return locs;
541 } 541 }
542 542
543 543
544 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 544 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
545 BranchLabels labels) { 545 BranchLabels labels) {
546 Register left = locs()->in(0).reg(); 546 const Register left = locs()->in(0).reg();
547 Location right = locs()->in(1); 547 Location right = locs()->in(1);
548 if (right.IsConstant()) { 548 if (right.IsConstant()) {
549 ASSERT(right.constant().IsSmi()); 549 ASSERT(right.constant().IsSmi());
550 const int64_t imm = 550 const int64_t imm =
551 reinterpret_cast<int64_t>(right.constant().raw()); 551 reinterpret_cast<int64_t>(right.constant().raw());
552 __ TestImmediate(left, imm, PP); 552 __ TestImmediate(left, imm, PP);
553 } else { 553 } else {
554 __ tst(left, Operand(right.reg())); 554 __ tst(left, Operand(right.reg()));
555 } 555 }
556 Condition true_condition = (kind() == Token::kNE) ? NE : EQ; 556 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
(...skipping 23 matching lines...) Expand all
580 locs->set_in(0, Location::RequiresRegister()); 580 locs->set_in(0, Location::RequiresRegister());
581 locs->set_temp(0, Location::RequiresRegister()); 581 locs->set_temp(0, Location::RequiresRegister());
582 locs->set_out(0, Location::RequiresRegister()); 582 locs->set_out(0, Location::RequiresRegister());
583 return locs; 583 return locs;
584 } 584 }
585 585
586 586
587 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 587 Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
588 BranchLabels labels) { 588 BranchLabels labels) {
589 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT)); 589 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
590 Register val_reg = locs()->in(0).reg(); 590 const Register val_reg = locs()->in(0).reg();
591 Register cid_reg = locs()->temp(0).reg(); 591 const Register cid_reg = locs()->temp(0).reg();
592 592
593 Label* deopt = CanDeoptimize() ? 593 Label* deopt = CanDeoptimize() ?
594 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL; 594 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
595 595
596 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; 596 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
597 const ZoneGrowableArray<intptr_t>& data = cid_results(); 597 const ZoneGrowableArray<intptr_t>& data = cid_results();
598 ASSERT(data[0] == kSmiCid); 598 ASSERT(data[0] == kSmiCid);
599 bool result = data[1] == true_result; 599 bool result = data[1] == true_result;
600 __ tsti(val_reg, kSmiTagMask); 600 __ tsti(val_reg, kSmiTagMask);
601 __ b(result ? labels.true_label : labels.false_label, EQ); 601 __ b(result ? labels.true_label : labels.false_label, EQ);
(...skipping 22 matching lines...) Expand all
624 624
625 625
626 void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler, 626 void TestCidsInstr::EmitBranchCode(FlowGraphCompiler* compiler,
627 BranchInstr* branch) { 627 BranchInstr* branch) {
628 BranchLabels labels = compiler->CreateBranchLabels(branch); 628 BranchLabels labels = compiler->CreateBranchLabels(branch);
629 EmitComparisonCode(compiler, labels); 629 EmitComparisonCode(compiler, labels);
630 } 630 }
631 631
632 632
633 void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 633 void TestCidsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
634 Register result_reg = locs()->out(0).reg(); 634 const Register result_reg = locs()->out(0).reg();
635 Label is_true, is_false, done; 635 Label is_true, is_false, done;
636 BranchLabels labels = { &is_true, &is_false, &is_false }; 636 BranchLabels labels = { &is_true, &is_false, &is_false };
637 EmitComparisonCode(compiler, labels); 637 EmitComparisonCode(compiler, labels);
638 // TODO(zra): instead of branching, use the csel instruction to get 638 // TODO(zra): instead of branching, use the csel instruction to get
639 // True or False into result. 639 // True or False into result.
640 __ Bind(&is_false); 640 __ Bind(&is_false);
641 __ LoadObject(result_reg, Bool::False(), PP); 641 __ LoadObject(result_reg, Bool::False(), PP);
642 __ b(&done); 642 __ b(&done);
643 __ Bind(&is_true); 643 __ Bind(&is_true);
644 __ LoadObject(result_reg, Bool::True(), PP); 644 __ LoadObject(result_reg, Bool::True(), PP);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
687 BranchLabels labels = { &is_true, &is_false, &is_false }; 687 BranchLabels labels = { &is_true, &is_false, &is_false };
688 Condition true_condition = EmitComparisonCode(compiler, labels); 688 Condition true_condition = EmitComparisonCode(compiler, labels);
689 if ((operation_cid() == kDoubleCid) && (true_condition != NE)) { 689 if ((operation_cid() == kDoubleCid) && (true_condition != NE)) {
690 // Special case for NaN comparison. Result is always false unless 690 // Special case for NaN comparison. Result is always false unless
691 // relational operator is !=. 691 // relational operator is !=.
692 __ b(&is_false, VS); 692 __ b(&is_false, VS);
693 } 693 }
694 EmitBranchOnCondition(compiler, true_condition, labels); 694 EmitBranchOnCondition(compiler, true_condition, labels);
695 // TODO(zra): instead of branching, use the csel instruction to get 695 // TODO(zra): instead of branching, use the csel instruction to get
696 // True or False into result. 696 // True or False into result.
697 Register result = locs()->out(0).reg(); 697 const Register result = locs()->out(0).reg();
698 Label done; 698 Label done;
699 __ Bind(&is_false); 699 __ Bind(&is_false);
700 __ LoadObject(result, Bool::False(), PP); 700 __ LoadObject(result, Bool::False(), PP);
701 __ b(&done); 701 __ b(&done);
702 __ Bind(&is_true); 702 __ Bind(&is_true);
703 __ LoadObject(result, Bool::True(), PP); 703 __ LoadObject(result, Bool::True(), PP);
704 __ Bind(&done); 704 __ Bind(&done);
705 } 705 }
706 706
707 707
(...skipping 20 matching lines...) Expand all
728 locs->set_temp(2, Location::RegisterLocation(R5)); 728 locs->set_temp(2, Location::RegisterLocation(R5));
729 locs->set_out(0, Location::RegisterLocation(R0)); 729 locs->set_out(0, Location::RegisterLocation(R0));
730 return locs; 730 return locs;
731 } 731 }
732 732
733 733
734 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 734 void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
735 ASSERT(locs()->temp(0).reg() == R1); 735 ASSERT(locs()->temp(0).reg() == R1);
736 ASSERT(locs()->temp(1).reg() == R2); 736 ASSERT(locs()->temp(1).reg() == R2);
737 ASSERT(locs()->temp(2).reg() == R5); 737 ASSERT(locs()->temp(2).reg() == R5);
738 Register result = locs()->out(0).reg(); 738 const Register result = locs()->out(0).reg();
739 739
740 // Push the result place holder initialized to NULL. 740 // Push the result place holder initialized to NULL.
741 __ PushObject(Object::ZoneHandle(), PP); 741 __ PushObject(Object::ZoneHandle(), PP);
742 // Pass a pointer to the first argument in R2. 742 // Pass a pointer to the first argument in R2.
743 if (!function().HasOptionalParameters()) { 743 if (!function().HasOptionalParameters()) {
744 __ AddImmediate(R2, FP, (kParamEndSlotFromFp + 744 __ AddImmediate(R2, FP, (kParamEndSlotFromFp +
745 function().NumParameters()) * kWordSize, PP); 745 function().NumParameters()) * kWordSize, PP);
746 } else { 746 } else {
747 __ AddImmediate(R2, FP, kFirstLocalSlotFromFp * kWordSize, PP); 747 __ AddImmediate(R2, FP, kFirstLocalSlotFromFp * kWordSize, PP);
748 } 748 }
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
826 const intptr_t kNumTemps = 0; 826 const intptr_t kNumTemps = 0;
827 LocationSummary* summary = 827 LocationSummary* summary =
828 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 828 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
829 summary->set_in(0, Location::RegisterLocation(R0)); 829 summary->set_in(0, Location::RegisterLocation(R0));
830 summary->set_out(0, Location::RegisterLocation(R0)); 830 summary->set_out(0, Location::RegisterLocation(R0));
831 return summary; 831 return summary;
832 } 832 }
833 833
834 834
835 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 835 void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
836 Register array = locs()->in(0).reg(); 836 const Register array = locs()->in(0).reg();
837 __ Push(array); 837 __ Push(array);
838 const int kNumberOfArguments = 1; 838 const int kNumberOfArguments = 1;
839 const Array& kNoArgumentNames = Object::null_array(); 839 const Array& kNoArgumentNames = Object::null_array();
840 compiler->GenerateStaticCall(deopt_id(), 840 compiler->GenerateStaticCall(deopt_id(),
841 token_pos(), 841 token_pos(),
842 CallFunction(), 842 CallFunction(),
843 kNumberOfArguments, 843 kNumberOfArguments,
844 kNoArgumentNames, 844 kNoArgumentNames,
845 locs()); 845 locs());
846 ASSERT(locs()->out(0).reg() == R0); 846 ASSERT(locs()->out(0).reg() == R0);
847 } 847 }
848 848
849 849
850 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(bool opt) const { 850 LocationSummary* LoadUntaggedInstr::MakeLocationSummary(bool opt) const {
851 const intptr_t kNumInputs = 1; 851 const intptr_t kNumInputs = 1;
852 return LocationSummary::Make(kNumInputs, 852 return LocationSummary::Make(kNumInputs,
853 Location::RequiresRegister(), 853 Location::RequiresRegister(),
854 LocationSummary::kNoCall); 854 LocationSummary::kNoCall);
855 } 855 }
856 856
857 857
858 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 858 void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
859 Register object = locs()->in(0).reg(); 859 const Register object = locs()->in(0).reg();
860 Register result = locs()->out(0).reg(); 860 const Register result = locs()->out(0).reg();
861 __ LoadFieldFromOffset(result, object, offset(), PP); 861 __ LoadFieldFromOffset(result, object, offset(), PP);
862 } 862 }
863 863
864 864
865 LocationSummary* LoadClassIdInstr::MakeLocationSummary(bool opt) const { 865 LocationSummary* LoadClassIdInstr::MakeLocationSummary(bool opt) const {
866 const intptr_t kNumInputs = 1; 866 const intptr_t kNumInputs = 1;
867 return LocationSummary::Make(kNumInputs, 867 return LocationSummary::Make(kNumInputs,
868 Location::RequiresRegister(), 868 Location::RequiresRegister(),
869 LocationSummary::kNoCall); 869 LocationSummary::kNoCall);
870 } 870 }
871 871
872 872
873 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 873 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
874 Register object = locs()->in(0).reg(); 874 const Register object = locs()->in(0).reg();
875 Register result = locs()->out(0).reg(); 875 const Register result = locs()->out(0).reg();
876 Label load, done; 876 Label load, done;
877 __ tsti(object, kSmiTagMask); 877 __ tsti(object, kSmiTagMask);
878 __ b(&load, NE); 878 __ b(&load, NE);
879 __ LoadImmediate(result, Smi::RawValue(kSmiCid), PP); 879 __ LoadImmediate(result, Smi::RawValue(kSmiCid), PP);
880 __ b(&done); 880 __ b(&done);
881 __ Bind(&load); 881 __ Bind(&load);
882 __ LoadClassId(result, object, PP); 882 __ LoadClassId(result, object, PP);
883 __ SmiTag(result); 883 __ SmiTag(result);
884 __ Bind(&done); 884 __ Bind(&done);
885 } 885 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
969 (representation() == kUnboxedFloat64x2)) { 969 (representation() == kUnboxedFloat64x2)) {
970 locs->set_out(0, Location::RequiresFpuRegister()); 970 locs->set_out(0, Location::RequiresFpuRegister());
971 } else { 971 } else {
972 locs->set_out(0, Location::RequiresRegister()); 972 locs->set_out(0, Location::RequiresRegister());
973 } 973 }
974 return locs; 974 return locs;
975 } 975 }
976 976
977 977
978 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 978 void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
979 Register array = locs()->in(0).reg(); 979 const Register array = locs()->in(0).reg();
980 ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit. 980 ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit.
981 Register index = locs()->in(1).reg(); 981 const Register index = locs()->in(1).reg();
982 982
983 Address element_address(kNoRegister, 0); 983 Address element_address(kNoRegister, 0);
984 984
985 // The array register points to the backing store for external arrays. 985 // The array register points to the backing store for external arrays.
986 intptr_t offset = 0; 986 intptr_t offset = 0;
987 if (!IsExternal()) { 987 if (!IsExternal()) {
988 ASSERT(this->array()->definition()->representation() == kTagged); 988 ASSERT(this->array()->definition()->representation() == kTagged);
989 offset = FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag; 989 offset = FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag;
990 } 990 }
991 991
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1038 break; 1038 break;
1039 case kTypedDataFloat64x2ArrayCid: 1039 case kTypedDataFloat64x2ArrayCid:
1040 case kTypedDataInt32x4ArrayCid: 1040 case kTypedDataInt32x4ArrayCid:
1041 case kTypedDataFloat32x4ArrayCid: 1041 case kTypedDataFloat32x4ArrayCid:
1042 __ fldrq(result, element_address); 1042 __ fldrq(result, element_address);
1043 break; 1043 break;
1044 } 1044 }
1045 return; 1045 return;
1046 } 1046 }
1047 1047
1048 Register result = locs()->out(0).reg(); 1048 const Register result = locs()->out(0).reg();
1049 switch (class_id()) { 1049 switch (class_id()) {
1050 case kTypedDataInt8ArrayCid: 1050 case kTypedDataInt8ArrayCid:
1051 ASSERT(index_scale() == 1); 1051 ASSERT(index_scale() == 1);
1052 __ ldr(result, element_address, kByte); 1052 __ ldr(result, element_address, kByte);
1053 __ SmiTag(result); 1053 __ SmiTag(result);
1054 break; 1054 break;
1055 case kTypedDataUint8ArrayCid: 1055 case kTypedDataUint8ArrayCid:
1056 case kTypedDataUint8ClampedArrayCid: 1056 case kTypedDataUint8ClampedArrayCid:
1057 case kExternalTypedDataUint8ArrayCid: 1057 case kExternalTypedDataUint8ArrayCid:
1058 case kExternalTypedDataUint8ClampedArrayCid: 1058 case kExternalTypedDataUint8ClampedArrayCid:
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1164 UNREACHABLE(); 1164 UNREACHABLE();
1165 return NULL; 1165 return NULL;
1166 } 1166 }
1167 return locs; 1167 return locs;
1168 } 1168 }
1169 1169
1170 1170
1171 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1171 void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1172 const Register array = locs()->in(0).reg(); 1172 const Register array = locs()->in(0).reg();
1173 ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit. 1173 ASSERT(locs()->in(1).IsRegister()); // TODO(regis): Revisit.
1174 Register index = locs()->in(1).reg(); 1174 const Register index = locs()->in(1).reg();
1175 1175
1176 Address element_address(kNoRegister, 0); 1176 Address element_address(kNoRegister, 0);
1177 1177
1178 // The array register points to the backing store for external arrays. 1178 // The array register points to the backing store for external arrays.
1179 intptr_t offset = 0; 1179 intptr_t offset = 0;
1180 if (!IsExternal()) { 1180 if (!IsExternal()) {
1181 ASSERT(this->array()->definition()->representation() == kTagged); 1181 ASSERT(this->array()->definition()->representation() == kTagged);
1182 offset = FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag; 1182 offset = FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag;
1183 } 1183 }
1184 1184
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
1362 ASSERT(field().is_final()); 1362 ASSERT(field().is_final());
1363 } 1363 }
1364 1364
1365 if (field_cid == kDynamicCid) { 1365 if (field_cid == kDynamicCid) {
1366 ASSERT(!compiler->is_optimizing()); 1366 ASSERT(!compiler->is_optimizing());
1367 return; // Nothing to emit. 1367 return; // Nothing to emit.
1368 } 1368 }
1369 1369
1370 const intptr_t value_cid = value()->Type()->ToCid(); 1370 const intptr_t value_cid = value()->Type()->ToCid();
1371 1371
1372 Register value_reg = locs()->in(0).reg(); 1372 const Register value_reg = locs()->in(0).reg();
1373 1373
1374 Register value_cid_reg = locs()->temp(0).reg(); 1374 const Register value_cid_reg = locs()->temp(0).reg();
1375 1375
1376 Register temp_reg = locs()->temp(1).reg(); 1376 const Register temp_reg = locs()->temp(1).reg();
1377 1377
1378 Register field_reg = needs_field_temp_reg ? 1378 Register field_reg = needs_field_temp_reg ?
1379 locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister; 1379 locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister;
1380 1380
1381 Label ok, fail_label; 1381 Label ok, fail_label;
1382 1382
1383 Label* deopt = compiler->is_optimizing() ? 1383 Label* deopt = compiler->is_optimizing() ?
1384 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL; 1384 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL;
1385 1385
1386 Label* fail = (deopt != NULL) ? deopt : &fail_label; 1386 Label* fail = (deopt != NULL) ? deopt : &fail_label;
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after
1700 if (IsUnboxedStore() && opt) { 1700 if (IsUnboxedStore() && opt) {
1701 summary->set_in(1, Location::RequiresFpuRegister()); 1701 summary->set_in(1, Location::RequiresFpuRegister());
1702 summary->AddTemp(Location::RequiresRegister()); 1702 summary->AddTemp(Location::RequiresRegister());
1703 summary->AddTemp(Location::RequiresRegister()); 1703 summary->AddTemp(Location::RequiresRegister());
1704 } else if (IsPotentialUnboxedStore()) { 1704 } else if (IsPotentialUnboxedStore()) {
1705 summary->set_in(1, ShouldEmitStoreBarrier() 1705 summary->set_in(1, ShouldEmitStoreBarrier()
1706 ? Location::WritableRegister() 1706 ? Location::WritableRegister()
1707 : Location::RequiresRegister()); 1707 : Location::RequiresRegister());
1708 summary->AddTemp(Location::RequiresRegister()); 1708 summary->AddTemp(Location::RequiresRegister());
1709 summary->AddTemp(Location::RequiresRegister()); 1709 summary->AddTemp(Location::RequiresRegister());
1710 summary->AddTemp(opt ? Location::RequiresFpuRegister()
1711 : Location::FpuRegisterLocation(V1));
1712 } else { 1710 } else {
1713 summary->set_in(1, ShouldEmitStoreBarrier() 1711 summary->set_in(1, ShouldEmitStoreBarrier()
1714 ? Location::WritableRegister() 1712 ? Location::WritableRegister()
1715 : Location::RegisterOrConstant(value())); 1713 : Location::RegisterOrConstant(value()));
1716 } 1714 }
1717 return summary; 1715 return summary;
1718 } 1716 }
1719 1717
1720 1718
1721 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1719 void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1722 Label skip_store; 1720 Label skip_store;
1723 1721
1724 Register instance_reg = locs()->in(0).reg(); 1722 const Register instance_reg = locs()->in(0).reg();
1725 1723
1726 if (IsUnboxedStore() && compiler->is_optimizing()) { 1724 if (IsUnboxedStore() && compiler->is_optimizing()) {
1727 const VRegister value = locs()->in(1).fpu_reg(); 1725 const VRegister value = locs()->in(1).fpu_reg();
1728 const Register temp = locs()->temp(0).reg(); 1726 const Register temp = locs()->temp(0).reg();
1729 const Register temp2 = locs()->temp(1).reg(); 1727 const Register temp2 = locs()->temp(1).reg();
1730 const intptr_t cid = field().UnboxedFieldCid(); 1728 const intptr_t cid = field().UnboxedFieldCid();
1731 1729
1732 if (is_initialization_) { 1730 if (is_initialization_) {
1733 const Class* cls = NULL; 1731 const Class* cls = NULL;
1734 switch (cid) { 1732 switch (cid) {
(...skipping 10 matching lines...) Expand all
1745 UNREACHABLE(); 1743 UNREACHABLE();
1746 } 1744 }
1747 1745
1748 StoreInstanceFieldSlowPath* slow_path = 1746 StoreInstanceFieldSlowPath* slow_path =
1749 new StoreInstanceFieldSlowPath(this, *cls); 1747 new StoreInstanceFieldSlowPath(this, *cls);
1750 compiler->AddSlowPathCode(slow_path); 1748 compiler->AddSlowPathCode(slow_path);
1751 1749
1752 __ TryAllocate(*cls, 1750 __ TryAllocate(*cls,
1753 slow_path->entry_label(), 1751 slow_path->entry_label(),
1754 temp, 1752 temp,
1755 temp2,
1756 PP); 1753 PP);
1757 __ Bind(slow_path->exit_label()); 1754 __ Bind(slow_path->exit_label());
1758 __ mov(temp2, temp); 1755 __ mov(temp2, temp);
1759 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP); 1756 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
1760 } else { 1757 } else {
1761 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP); 1758 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP);
1762 } 1759 }
1763 switch (cid) { 1760 switch (cid) {
1764 case kDoubleCid: 1761 case kDoubleCid:
1765 __ Comment("UnboxedDoubleStoreInstanceFieldInstr"); 1762 __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
(...skipping 11 matching lines...) Expand all
1777 UNREACHABLE(); 1774 UNREACHABLE();
1778 } 1775 }
1779 1776
1780 return; 1777 return;
1781 } 1778 }
1782 1779
1783 if (IsPotentialUnboxedStore()) { 1780 if (IsPotentialUnboxedStore()) {
1784 const Register value_reg = locs()->in(1).reg(); 1781 const Register value_reg = locs()->in(1).reg();
1785 const Register temp = locs()->temp(0).reg(); 1782 const Register temp = locs()->temp(0).reg();
1786 const Register temp2 = locs()->temp(1).reg(); 1783 const Register temp2 = locs()->temp(1).reg();
1787 const VRegister fpu_temp = locs()->temp(2).fpu_reg();
1788 1784
1789 Label store_pointer; 1785 Label store_pointer;
1790 Label store_double; 1786 Label store_double;
1791 Label store_float32x4; 1787 Label store_float32x4;
1792 Label store_float64x2; 1788 Label store_float64x2;
1793 1789
1794 __ LoadObject(temp, Field::ZoneHandle(field().raw()), PP); 1790 __ LoadObject(temp, Field::ZoneHandle(field().raw()), PP);
1795 1791
1796 __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(), PP); 1792 __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(), PP);
1797 __ CompareImmediate(temp2, kNullCid, PP); 1793 __ CompareImmediate(temp2, kNullCid, PP);
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1830 new StoreInstanceFieldSlowPath(this, compiler->double_class()); 1826 new StoreInstanceFieldSlowPath(this, compiler->double_class());
1831 compiler->AddSlowPathCode(slow_path); 1827 compiler->AddSlowPathCode(slow_path);
1832 1828
1833 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP); 1829 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP);
1834 __ CompareObject(temp, Object::null_object(), PP); 1830 __ CompareObject(temp, Object::null_object(), PP);
1835 __ b(&copy_double, NE); 1831 __ b(&copy_double, NE);
1836 1832
1837 __ TryAllocate(compiler->double_class(), 1833 __ TryAllocate(compiler->double_class(),
1838 slow_path->entry_label(), 1834 slow_path->entry_label(),
1839 temp, 1835 temp,
1840 temp2,
1841 PP); 1836 PP);
1842 __ Bind(slow_path->exit_label()); 1837 __ Bind(slow_path->exit_label());
1843 __ mov(temp2, temp); 1838 __ mov(temp2, temp);
1844 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP); 1839 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
1845 __ Bind(&copy_double); 1840 __ Bind(&copy_double);
1846 __ LoadDFieldFromOffset(fpu_temp, value_reg, Double::value_offset(), PP); 1841 __ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset(), PP);
1847 __ StoreDFieldToOffset(fpu_temp, temp, Double::value_offset(), PP); 1842 __ StoreDFieldToOffset(VTMP, temp, Double::value_offset(), PP);
1848 __ b(&skip_store); 1843 __ b(&skip_store);
1849 } 1844 }
1850 1845
1851 { 1846 {
1852 __ Bind(&store_float32x4); 1847 __ Bind(&store_float32x4);
1853 Label copy_float32x4; 1848 Label copy_float32x4;
1854 StoreInstanceFieldSlowPath* slow_path = 1849 StoreInstanceFieldSlowPath* slow_path =
1855 new StoreInstanceFieldSlowPath(this, compiler->float32x4_class()); 1850 new StoreInstanceFieldSlowPath(this, compiler->float32x4_class());
1856 compiler->AddSlowPathCode(slow_path); 1851 compiler->AddSlowPathCode(slow_path);
1857 1852
1858 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP); 1853 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP);
1859 __ CompareObject(temp, Object::null_object(), PP); 1854 __ CompareObject(temp, Object::null_object(), PP);
1860 __ b(&copy_float32x4, NE); 1855 __ b(&copy_float32x4, NE);
1861 1856
1862 __ TryAllocate(compiler->float32x4_class(), 1857 __ TryAllocate(compiler->float32x4_class(),
1863 slow_path->entry_label(), 1858 slow_path->entry_label(),
1864 temp, 1859 temp,
1865 temp2,
1866 PP); 1860 PP);
1867 __ Bind(slow_path->exit_label()); 1861 __ Bind(slow_path->exit_label());
1868 __ mov(temp2, temp); 1862 __ mov(temp2, temp);
1869 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP); 1863 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
1870 __ Bind(&copy_float32x4); 1864 __ Bind(&copy_float32x4);
1871 __ LoadQFieldFromOffset( 1865 __ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset(), PP);
1872 fpu_temp, value_reg, Float32x4::value_offset(), PP); 1866 __ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset(), PP);
1873 __ StoreQFieldToOffset(
1874 fpu_temp, value_reg, Float32x4::value_offset(), PP);
1875 __ b(&skip_store); 1867 __ b(&skip_store);
1876 } 1868 }
1877 1869
1878 { 1870 {
1879 __ Bind(&store_float64x2); 1871 __ Bind(&store_float64x2);
1880 Label copy_float64x2; 1872 Label copy_float64x2;
1881 StoreInstanceFieldSlowPath* slow_path = 1873 StoreInstanceFieldSlowPath* slow_path =
1882 new StoreInstanceFieldSlowPath(this, compiler->float64x2_class()); 1874 new StoreInstanceFieldSlowPath(this, compiler->float64x2_class());
1883 compiler->AddSlowPathCode(slow_path); 1875 compiler->AddSlowPathCode(slow_path);
1884 1876
1885 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP); 1877 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes_, PP);
1886 __ CompareObject(temp, Object::null_object(), PP); 1878 __ CompareObject(temp, Object::null_object(), PP);
1887 __ b(&copy_float64x2, NE); 1879 __ b(&copy_float64x2, NE);
1888 1880
1889 __ TryAllocate(compiler->float64x2_class(), 1881 __ TryAllocate(compiler->float64x2_class(),
1890 slow_path->entry_label(), 1882 slow_path->entry_label(),
1891 temp, 1883 temp,
1892 temp2,
1893 PP); 1884 PP);
1894 __ Bind(slow_path->exit_label()); 1885 __ Bind(slow_path->exit_label());
1895 __ mov(temp2, temp); 1886 __ mov(temp2, temp);
1896 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP); 1887 __ StoreIntoObjectOffset(instance_reg, offset_in_bytes_, temp2, PP);
1897 __ Bind(&copy_float64x2); 1888 __ Bind(&copy_float64x2);
1898 __ LoadQFieldFromOffset( 1889 __ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset(), PP);
1899 fpu_temp, value_reg, Float64x2::value_offset(), PP); 1890 __ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset(), PP);
1900 __ StoreQFieldToOffset(
1901 fpu_temp, value_reg, Float64x2::value_offset(), PP);
1902 __ b(&skip_store); 1891 __ b(&skip_store);
1903 } 1892 }
1904 1893
1905 __ Bind(&store_pointer); 1894 __ Bind(&store_pointer);
1906 } 1895 }
1907 1896
1908 if (ShouldEmitStoreBarrier()) { 1897 if (ShouldEmitStoreBarrier()) {
1909 const Register value_reg = locs()->in(1).reg(); 1898 const Register value_reg = locs()->in(1).reg();
1910 __ StoreIntoObjectOffset( 1899 __ StoreIntoObjectOffset(
1911 instance_reg, offset_in_bytes_, value_reg, PP, CanValueBeSmi()); 1900 instance_reg, offset_in_bytes_, value_reg, PP, CanValueBeSmi());
(...skipping 27 matching lines...) Expand all
1939 return summary; 1928 return summary;
1940 } 1929 }
1941 1930
1942 1931
1943 // When the parser is building an implicit static getter for optimization, 1932 // When the parser is building an implicit static getter for optimization,
1944 // it can generate a function body where deoptimization ids do not line up 1933 // it can generate a function body where deoptimization ids do not line up
1945 // with the unoptimized code. 1934 // with the unoptimized code.
1946 // 1935 //
1947 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize. 1936 // This is safe only so long as LoadStaticFieldInstr cannot deoptimize.
1948 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1937 void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1949 Register field = locs()->in(0).reg(); 1938 const Register field = locs()->in(0).reg();
1950 Register result = locs()->out(0).reg(); 1939 const Register result = locs()->out(0).reg();
1951 __ LoadFieldFromOffset(result, field, Field::value_offset(), PP); 1940 __ LoadFieldFromOffset(result, field, Field::value_offset(), PP);
1952 } 1941 }
1953 1942
1954 1943
1955 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(bool opt) const { 1944 LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(bool opt) const {
1956 LocationSummary* locs = new LocationSummary(1, 1, LocationSummary::kNoCall); 1945 LocationSummary* locs = new LocationSummary(1, 1, LocationSummary::kNoCall);
1957 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() 1946 locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
1958 : Location::RequiresRegister()); 1947 : Location::RequiresRegister());
1959 locs->set_temp(0, Location::RequiresRegister()); 1948 locs->set_temp(0, Location::RequiresRegister());
1960 return locs; 1949 return locs;
1961 } 1950 }
1962 1951
1963 1952
1964 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1953 void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1965 Register value = locs()->in(0).reg(); 1954 const Register value = locs()->in(0).reg();
1966 Register temp = locs()->temp(0).reg(); 1955 const Register temp = locs()->temp(0).reg();
1967 1956
1968 __ LoadObject(temp, field(), PP); 1957 __ LoadObject(temp, field(), PP);
1969 if (this->value()->NeedsStoreBuffer()) { 1958 if (this->value()->NeedsStoreBuffer()) {
1970 __ StoreIntoObjectOffset( 1959 __ StoreIntoObjectOffset(
1971 temp, Field::value_offset(), value, PP, CanValueBeSmi()); 1960 temp, Field::value_offset(), value, PP, CanValueBeSmi());
1972 } else { 1961 } else {
1973 __ StoreIntoObjectOffsetNoBarrier(temp, Field::value_offset(), value, PP); 1962 __ StoreIntoObjectOffsetNoBarrier(temp, Field::value_offset(), value, PP);
1974 } 1963 }
1975 } 1964 }
1976 1965
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
2051 compiler->RestoreLiveRegisters(locs); 2040 compiler->RestoreLiveRegisters(locs);
2052 2041
2053 __ b(exit_label()); 2042 __ b(exit_label());
2054 } 2043 }
2055 2044
2056 private: 2045 private:
2057 Instruction* instruction_; 2046 Instruction* instruction_;
2058 }; 2047 };
2059 2048
2060 2049
2050 class BoxFloat32x4SlowPath : public SlowPathCode {
2051 public:
2052 explicit BoxFloat32x4SlowPath(Instruction* instruction)
2053 : instruction_(instruction) { }
2054
2055 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2056 __ Comment("BoxFloat32x4SlowPath");
2057 __ Bind(entry_label());
2058 const Class& float32x4_class = compiler->float32x4_class();
2059 const Code& stub =
2060 Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
2061 const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
2062
2063 LocationSummary* locs = instruction_->locs();
2064 locs->live_registers()->Remove(locs->out(0));
2065
2066 compiler->SaveLiveRegisters(locs);
2067 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2068 &label,
2069 PcDescriptors::kOther,
2070 locs);
2071 __ mov(locs->out(0).reg(), R0);
2072 compiler->RestoreLiveRegisters(locs);
2073
2074 __ b(exit_label());
2075 }
2076
2077 private:
2078 Instruction* instruction_;
2079 };
2080
2081
2082 class BoxFloat64x2SlowPath : public SlowPathCode {
2083 public:
2084 explicit BoxFloat64x2SlowPath(Instruction* instruction)
2085 : instruction_(instruction) { }
2086
2087 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2088 __ Comment("BoxFloat64x2SlowPath");
2089 __ Bind(entry_label());
2090 const Class& float64x2_class = compiler->float64x2_class();
2091 const Code& stub =
2092 Code::Handle(StubCode::GetAllocationStubForClass(float64x2_class));
2093 const ExternalLabel label(float64x2_class.ToCString(), stub.EntryPoint());
2094
2095 LocationSummary* locs = instruction_->locs();
2096 locs->live_registers()->Remove(locs->out(0));
2097
2098 compiler->SaveLiveRegisters(locs);
2099 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2100 &label,
2101 PcDescriptors::kOther,
2102 locs);
2103 __ mov(locs->out(0).reg(), R0);
2104 compiler->RestoreLiveRegisters(locs);
2105
2106 __ b(exit_label());
2107 }
2108
2109 private:
2110 Instruction* instruction_;
2111 };
2112
2113
2061 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const { 2114 LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const {
2062 const intptr_t kNumInputs = 1; 2115 const intptr_t kNumInputs = 1;
2063 const intptr_t kNumTemps = 0; 2116 const intptr_t kNumTemps = 0;
2064 LocationSummary* locs = 2117 LocationSummary* locs =
2065 new LocationSummary( 2118 new LocationSummary(
2066 kNumInputs, kNumTemps, 2119 kNumInputs, kNumTemps,
2067 (opt && !IsPotentialUnboxedLoad()) 2120 (opt && !IsPotentialUnboxedLoad())
2068 ? LocationSummary::kNoCall 2121 ? LocationSummary::kNoCall
2069 : LocationSummary::kCallOnSlowPath); 2122 : LocationSummary::kCallOnSlowPath);
2070 2123
2071 locs->set_in(0, Location::RequiresRegister()); 2124 locs->set_in(0, Location::RequiresRegister());
2072 2125
2073 if (IsUnboxedLoad() && opt) { 2126 if (IsUnboxedLoad() && opt) {
2074 locs->AddTemp(Location::RequiresRegister()); 2127 locs->AddTemp(Location::RequiresRegister());
2075 } else if (IsPotentialUnboxedLoad()) { 2128 } else if (IsPotentialUnboxedLoad()) {
2076 locs->AddTemp(opt ? Location::RequiresFpuRegister()
2077 : Location::FpuRegisterLocation(V1));
2078 locs->AddTemp(Location::RequiresRegister()); 2129 locs->AddTemp(Location::RequiresRegister());
2079 } 2130 }
2080 locs->set_out(0, Location::RequiresRegister()); 2131 locs->set_out(0, Location::RequiresRegister());
2081 return locs; 2132 return locs;
2082 } 2133 }
2083 2134
2084 2135
2085 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2136 void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2086 Register instance_reg = locs()->in(0).reg(); 2137 const Register instance_reg = locs()->in(0).reg();
2087 if (IsUnboxedLoad() && compiler->is_optimizing()) { 2138 if (IsUnboxedLoad() && compiler->is_optimizing()) {
2088 const VRegister result = locs()->out(0).fpu_reg(); 2139 const VRegister result = locs()->out(0).fpu_reg();
2089 const Register temp = locs()->temp(0).reg(); 2140 const Register temp = locs()->temp(0).reg();
2090 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP); 2141 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
2091 const intptr_t cid = field()->UnboxedFieldCid(); 2142 const intptr_t cid = field()->UnboxedFieldCid();
2092 switch (cid) { 2143 switch (cid) {
2093 case kDoubleCid: 2144 case kDoubleCid:
2094 __ Comment("UnboxedDoubleLoadFieldInstr"); 2145 __ Comment("UnboxedDoubleLoadFieldInstr");
2095 __ LoadDFieldFromOffset(result, temp, Double::value_offset(), PP); 2146 __ LoadDFieldFromOffset(result, temp, Double::value_offset(), PP);
2096 break; 2147 break;
2097 case kFloat32x4Cid: 2148 case kFloat32x4Cid:
2149 __ LoadQFieldFromOffset(result, temp, Float32x4::value_offset(), PP);
2150 break;
2098 case kFloat64x2Cid: 2151 case kFloat64x2Cid:
2099 UNIMPLEMENTED(); 2152 __ LoadQFieldFromOffset(result, temp, Float64x2::value_offset(), PP);
2100 break; 2153 break;
2101 default: 2154 default:
2102 UNREACHABLE(); 2155 UNREACHABLE();
2103 } 2156 }
2104 return; 2157 return;
2105 } 2158 }
2106 2159
2107 Label done; 2160 Label done;
2108 Register result_reg = locs()->out(0).reg(); 2161 const Register result_reg = locs()->out(0).reg();
2109 if (IsPotentialUnboxedLoad()) { 2162 if (IsPotentialUnboxedLoad()) {
2110 const Register temp = locs()->temp(1).reg(); 2163 const Register temp = locs()->temp(0).reg();
2111 const VRegister value = locs()->temp(0).fpu_reg();
2112 2164
2113 Label load_pointer; 2165 Label load_pointer;
2114 Label load_double; 2166 Label load_double;
2115 Label load_float32x4; 2167 Label load_float32x4;
2116 Label load_float64x2; 2168 Label load_float64x2;
2117 2169
2118 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()), PP); 2170 __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()), PP);
2119 2171
2120 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); 2172 FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
2121 FieldAddress field_nullability_operand(result_reg, 2173 FieldAddress field_nullability_operand(result_reg,
(...skipping 23 matching lines...) Expand all
2145 } 2197 }
2146 2198
2147 { 2199 {
2148 __ Bind(&load_double); 2200 __ Bind(&load_double);
2149 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 2201 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2150 compiler->AddSlowPathCode(slow_path); 2202 compiler->AddSlowPathCode(slow_path);
2151 2203
2152 __ TryAllocate(compiler->double_class(), 2204 __ TryAllocate(compiler->double_class(),
2153 slow_path->entry_label(), 2205 slow_path->entry_label(),
2154 result_reg, 2206 result_reg,
2155 temp,
2156 PP); 2207 PP);
2157 __ Bind(slow_path->exit_label()); 2208 __ Bind(slow_path->exit_label());
2158 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP); 2209 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
2159 __ LoadDFieldFromOffset(value, temp, Double::value_offset(), PP); 2210 __ LoadDFieldFromOffset(VTMP, temp, Double::value_offset(), PP);
2160 __ StoreDFieldToOffset(value, result_reg, Double::value_offset(), PP); 2211 __ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset(), PP);
2161 __ b(&done); 2212 __ b(&done);
2162 } 2213 }
2163 2214
2164 // TODO(zra): Implement these when we add simd loads and stores.
2165 { 2215 {
2166 __ Bind(&load_float32x4); 2216 __ Bind(&load_float32x4);
2167 __ Stop("Float32x4 Unimplemented"); 2217 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
2218 compiler->AddSlowPathCode(slow_path);
2219
2220 __ TryAllocate(compiler->float32x4_class(),
2221 slow_path->entry_label(),
2222 result_reg,
2223 PP);
2224 __ Bind(slow_path->exit_label());
2225 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
2226 __ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset(), PP);
2227 __ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset(), PP);
2228 __ b(&done);
2168 } 2229 }
2169 2230
2170 { 2231 {
2171 __ Bind(&load_float64x2); 2232 __ Bind(&load_float64x2);
2172 __ Stop("Float64x2 Unimplemented"); 2233 BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
2234 compiler->AddSlowPathCode(slow_path);
2235
2236 __ TryAllocate(compiler->float64x2_class(),
2237 slow_path->entry_label(),
2238 result_reg,
2239 PP);
2240 __ Bind(slow_path->exit_label());
2241 __ LoadFieldFromOffset(temp, instance_reg, offset_in_bytes(), PP);
2242 __ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset(), PP);
2243 __ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset(), PP);
2244 __ b(&done);
2173 } 2245 }
2174 2246
2175 __ Bind(&load_pointer); 2247 __ Bind(&load_pointer);
2176 } 2248 }
2177 __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes(), PP); 2249 __ LoadFieldFromOffset(result_reg, instance_reg, offset_in_bytes(), PP);
2178 __ Bind(&done); 2250 __ Bind(&done);
2179 } 2251 }
2180 2252
2181 2253
2182 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const { 2254 LocationSummary* InstantiateTypeInstr::MakeLocationSummary(bool opt) const {
2183 const intptr_t kNumInputs = 1; 2255 const intptr_t kNumInputs = 1;
2184 const intptr_t kNumTemps = 0; 2256 const intptr_t kNumTemps = 0;
2185 LocationSummary* locs = 2257 LocationSummary* locs =
2186 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2258 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2187 locs->set_in(0, Location::RegisterLocation(R0)); 2259 locs->set_in(0, Location::RegisterLocation(R0));
2188 locs->set_out(0, Location::RegisterLocation(R0)); 2260 locs->set_out(0, Location::RegisterLocation(R0));
2189 return locs; 2261 return locs;
2190 } 2262 }
2191 2263
2192 2264
2193 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2265 void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2194 Register instantiator_reg = locs()->in(0).reg(); 2266 const Register instantiator_reg = locs()->in(0).reg();
2195 Register result_reg = locs()->out(0).reg(); 2267 const Register result_reg = locs()->out(0).reg();
2196 2268
2197 // 'instantiator_reg' is the instantiator TypeArguments object (or null). 2269 // 'instantiator_reg' is the instantiator TypeArguments object (or null).
2198 // A runtime call to instantiate the type is required. 2270 // A runtime call to instantiate the type is required.
2199 __ PushObject(Object::ZoneHandle(), PP); // Make room for the result. 2271 __ PushObject(Object::ZoneHandle(), PP); // Make room for the result.
2200 __ PushObject(type(), PP); 2272 __ PushObject(type(), PP);
2201 __ Push(instantiator_reg); // Push instantiator type arguments. 2273 __ Push(instantiator_reg); // Push instantiator type arguments.
2202 compiler->GenerateRuntimeCall(token_pos(), 2274 compiler->GenerateRuntimeCall(token_pos(),
2203 deopt_id(), 2275 deopt_id(),
2204 kInstantiateTypeRuntimeEntry, 2276 kInstantiateTypeRuntimeEntry,
2205 2, 2277 2,
(...skipping 11 matching lines...) Expand all
2217 LocationSummary* locs = 2289 LocationSummary* locs =
2218 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2290 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2219 locs->set_in(0, Location::RegisterLocation(R0)); 2291 locs->set_in(0, Location::RegisterLocation(R0));
2220 locs->set_out(0, Location::RegisterLocation(R0)); 2292 locs->set_out(0, Location::RegisterLocation(R0));
2221 return locs; 2293 return locs;
2222 } 2294 }
2223 2295
2224 2296
2225 void InstantiateTypeArgumentsInstr::EmitNativeCode( 2297 void InstantiateTypeArgumentsInstr::EmitNativeCode(
2226 FlowGraphCompiler* compiler) { 2298 FlowGraphCompiler* compiler) {
2227 Register instantiator_reg = locs()->in(0).reg(); 2299 const Register instantiator_reg = locs()->in(0).reg();
2228 Register result_reg = locs()->out(0).reg(); 2300 const Register result_reg = locs()->out(0).reg();
2229 ASSERT(instantiator_reg == R0); 2301 ASSERT(instantiator_reg == R0);
2230 ASSERT(instantiator_reg == result_reg); 2302 ASSERT(instantiator_reg == result_reg);
2231 2303
2232 // 'instantiator_reg' is the instantiator TypeArguments object (or null). 2304 // 'instantiator_reg' is the instantiator TypeArguments object (or null).
2233 ASSERT(!type_arguments().IsUninstantiatedIdentity() && 2305 ASSERT(!type_arguments().IsUninstantiatedIdentity() &&
2234 !type_arguments().CanShareInstantiatorTypeArguments( 2306 !type_arguments().CanShareInstantiatorTypeArguments(
2235 instantiator_class())); 2307 instantiator_class()));
2236 // If the instantiator is null and if the type argument vector 2308 // If the instantiator is null and if the type argument vector
2237 // instantiated from null becomes a vector of dynamic, then use null as 2309 // instantiated from null becomes a vector of dynamic, then use null as
2238 // the type arguments. 2310 // the type arguments.
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
2308 const intptr_t kNumTemps = 0; 2380 const intptr_t kNumTemps = 0;
2309 LocationSummary* locs = 2381 LocationSummary* locs =
2310 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 2382 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
2311 locs->set_in(0, Location::RegisterLocation(R0)); 2383 locs->set_in(0, Location::RegisterLocation(R0));
2312 locs->set_out(0, Location::RegisterLocation(R0)); 2384 locs->set_out(0, Location::RegisterLocation(R0));
2313 return locs; 2385 return locs;
2314 } 2386 }
2315 2387
2316 2388
2317 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2389 void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2318 Register context_value = locs()->in(0).reg(); 2390 const Register context_value = locs()->in(0).reg();
2319 Register result = locs()->out(0).reg(); 2391 const Register result = locs()->out(0).reg();
2320 2392
2321 __ PushObject(Object::ZoneHandle(), PP); // Make room for the result. 2393 __ PushObject(Object::ZoneHandle(), PP); // Make room for the result.
2322 __ Push(context_value); 2394 __ Push(context_value);
2323 compiler->GenerateRuntimeCall(token_pos(), 2395 compiler->GenerateRuntimeCall(token_pos(),
2324 deopt_id(), 2396 deopt_id(),
2325 kCloneContextRuntimeEntry, 2397 kCloneContextRuntimeEntry,
2326 1, 2398 1,
2327 locs()); 2399 locs());
2328 __ Drop(1); // Remove argument. 2400 __ Drop(1); // Remove argument.
2329 __ Pop(result); // Get result (cloned context). 2401 __ Pop(result); // Get result (cloned context).
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
2380 2452
2381 2453
2382 class CheckStackOverflowSlowPath : public SlowPathCode { 2454 class CheckStackOverflowSlowPath : public SlowPathCode {
2383 public: 2455 public:
2384 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction) 2456 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2385 : instruction_(instruction) { } 2457 : instruction_(instruction) { }
2386 2458
2387 virtual void EmitNativeCode(FlowGraphCompiler* compiler) { 2459 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2388 if (FLAG_use_osr) { 2460 if (FLAG_use_osr) {
2389 uword flags_address = Isolate::Current()->stack_overflow_flags_address(); 2461 uword flags_address = Isolate::Current()->stack_overflow_flags_address();
2390 Register value = instruction_->locs()->temp(0).reg(); 2462 const Register value = instruction_->locs()->temp(0).reg();
2391 __ Comment("CheckStackOverflowSlowPathOsr"); 2463 __ Comment("CheckStackOverflowSlowPathOsr");
2392 __ Bind(osr_entry_label()); 2464 __ Bind(osr_entry_label());
2393 __ LoadImmediate(TMP, flags_address, PP); 2465 __ LoadImmediate(TMP, flags_address, PP);
2394 __ LoadImmediate(value, Isolate::kOsrRequest, PP); 2466 __ LoadImmediate(value, Isolate::kOsrRequest, PP);
2395 __ str(value, Address(TMP)); 2467 __ str(value, Address(TMP));
2396 } 2468 }
2397 __ Comment("CheckStackOverflowSlowPath"); 2469 __ Comment("CheckStackOverflowSlowPath");
2398 __ Bind(entry_label()); 2470 __ Bind(entry_label());
2399 compiler->SaveLiveRegisters(instruction_->locs()); 2471 compiler->SaveLiveRegisters(instruction_->locs());
2400 // pending_deoptimization_env_ is needed to generate a runtime call that 2472 // pending_deoptimization_env_ is needed to generate a runtime call that
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2432 2504
2433 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2505 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2434 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); 2506 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
2435 compiler->AddSlowPathCode(slow_path); 2507 compiler->AddSlowPathCode(slow_path);
2436 2508
2437 __ LoadImmediate(TMP, Isolate::Current()->stack_limit_address(), PP); 2509 __ LoadImmediate(TMP, Isolate::Current()->stack_limit_address(), PP);
2438 __ ldr(TMP, Address(TMP)); 2510 __ ldr(TMP, Address(TMP));
2439 __ CompareRegisters(SP, TMP); 2511 __ CompareRegisters(SP, TMP);
2440 __ b(slow_path->entry_label(), LS); 2512 __ b(slow_path->entry_label(), LS);
2441 if (compiler->CanOSRFunction() && in_loop()) { 2513 if (compiler->CanOSRFunction() && in_loop()) {
2442 Register temp = locs()->temp(0).reg(); 2514 const Register temp = locs()->temp(0).reg();
2443 // In unoptimized code check the usage counter to trigger OSR at loop 2515 // In unoptimized code check the usage counter to trigger OSR at loop
2444 // stack checks. Use progressively higher thresholds for more deeply 2516 // stack checks. Use progressively higher thresholds for more deeply
2445 // nested loops to attempt to hit outer loops with OSR when possible. 2517 // nested loops to attempt to hit outer loops with OSR when possible.
2446 __ LoadObject(temp, compiler->parsed_function().function(), PP); 2518 __ LoadObject(temp, compiler->parsed_function().function(), PP);
2447 intptr_t threshold = 2519 intptr_t threshold =
2448 FLAG_optimization_counter_threshold * (loop_depth() + 1); 2520 FLAG_optimization_counter_threshold * (loop_depth() + 1);
2449 __ LoadFieldFromOffset(temp, temp, Function::usage_counter_offset(), PP); 2521 __ LoadFieldFromOffset(temp, temp, Function::usage_counter_offset(), PP);
2450 __ CompareImmediate(temp, threshold, PP); 2522 __ CompareImmediate(temp, threshold, PP);
2451 __ b(slow_path->osr_entry_label(), GE); 2523 __ b(slow_path->osr_entry_label(), GE);
2452 } 2524 }
(...skipping 15 matching lines...) Expand all
2468 __ cmp(TMP2, Operand(TMP, LSL, 1)); 2540 __ cmp(TMP2, Operand(TMP, LSL, 1));
2469 __ b(overflow, HI); 2541 __ b(overflow, HI);
2470 } 2542 }
2471 } 2543 }
2472 2544
2473 2545
2474 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler, 2546 static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
2475 BinarySmiOpInstr* shift_left) { 2547 BinarySmiOpInstr* shift_left) {
2476 const bool is_truncating = shift_left->is_truncating(); 2548 const bool is_truncating = shift_left->is_truncating();
2477 const LocationSummary& locs = *shift_left->locs(); 2549 const LocationSummary& locs = *shift_left->locs();
2478 Register left = locs.in(0).reg(); 2550 const Register left = locs.in(0).reg();
2479 Register result = locs.out(0).reg(); 2551 const Register result = locs.out(0).reg();
2480 Label* deopt = shift_left->CanDeoptimize() ? 2552 Label* deopt = shift_left->CanDeoptimize() ?
2481 compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp) 2553 compiler->AddDeoptStub(shift_left->deopt_id(), ICData::kDeoptBinarySmiOp)
2482 : NULL; 2554 : NULL;
2483 if (locs.in(1).IsConstant()) { 2555 if (locs.in(1).IsConstant()) {
2484 const Object& constant = locs.in(1).constant(); 2556 const Object& constant = locs.in(1).constant();
2485 ASSERT(constant.IsSmi()); 2557 ASSERT(constant.IsSmi());
2486 // Immediate shift operation takes 6 bits for the count. 2558 // Immediate shift operation takes 6 bits for the count.
2487 const intptr_t kCountLimit = 0x3F; 2559 const intptr_t kCountLimit = 0x3F;
2488 const intptr_t value = Smi::Cast(constant).Value(); 2560 const intptr_t value = Smi::Cast(constant).Value();
2489 if (value == 0) { 2561 if (value == 0) {
(...skipping 17 matching lines...) Expand all
2507 // Shift for result now we know there is no overflow. 2579 // Shift for result now we know there is no overflow.
2508 __ Lsl(result, left, value); 2580 __ Lsl(result, left, value);
2509 } 2581 }
2510 if (FLAG_throw_on_javascript_int_overflow) { 2582 if (FLAG_throw_on_javascript_int_overflow) {
2511 EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result); 2583 EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result);
2512 } 2584 }
2513 return; 2585 return;
2514 } 2586 }
2515 2587
2516 // Right (locs.in(1)) is not constant. 2588 // Right (locs.in(1)) is not constant.
2517 Register right = locs.in(1).reg(); 2589 const Register right = locs.in(1).reg();
2518 Range* right_range = shift_left->right()->definition()->range(); 2590 Range* right_range = shift_left->right()->definition()->range();
2519 if (shift_left->left()->BindsToConstant() && !is_truncating) { 2591 if (shift_left->left()->BindsToConstant() && !is_truncating) {
2520 // TODO(srdjan): Implement code below for is_truncating(). 2592 // TODO(srdjan): Implement code below for is_truncating().
2521 // If left is constant, we know the maximal allowed size for right. 2593 // If left is constant, we know the maximal allowed size for right.
2522 const Object& obj = shift_left->left()->BoundConstant(); 2594 const Object& obj = shift_left->left()->BoundConstant();
2523 if (obj.IsSmi()) { 2595 if (obj.IsSmi()) {
2524 const intptr_t left_int = Smi::Cast(obj).Value(); 2596 const intptr_t left_int = Smi::Cast(obj).Value();
2525 if (left_int == 0) { 2597 if (left_int == 0) {
2526 __ CompareRegisters(right, ZR); 2598 __ CompareRegisters(right, ZR);
2527 __ b(deopt, MI); 2599 __ b(deopt, MI);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
2573 if (right_needs_check) { 2645 if (right_needs_check) {
2574 ASSERT(shift_left->CanDeoptimize()); 2646 ASSERT(shift_left->CanDeoptimize());
2575 __ CompareImmediate( 2647 __ CompareImmediate(
2576 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)), PP); 2648 right, reinterpret_cast<int64_t>(Smi::New(Smi::kBits)), PP);
2577 __ b(deopt, CS); 2649 __ b(deopt, CS);
2578 } 2650 }
2579 // Left is not a constant. 2651 // Left is not a constant.
2580 // Check if count too large for handling it inlined. 2652 // Check if count too large for handling it inlined.
2581 __ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP. 2653 __ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP.
2582 // Overflow test (preserve left, right, and TMP); 2654 // Overflow test (preserve left, right, and TMP);
2583 Register temp = locs.temp(0).reg(); 2655 const Register temp = locs.temp(0).reg();
2584 __ lslv(temp, left, TMP); 2656 __ lslv(temp, left, TMP);
2585 __ asrv(TMP2, temp, TMP); 2657 __ asrv(TMP2, temp, TMP);
2586 __ CompareRegisters(left, TMP2); 2658 __ CompareRegisters(left, TMP2);
2587 __ b(deopt, NE); // Overflow. 2659 __ b(deopt, NE); // Overflow.
2588 // Shift for result now we know there is no overflow. 2660 // Shift for result now we know there is no overflow.
2589 __ lslv(result, left, TMP); 2661 __ lslv(result, left, TMP);
2590 } 2662 }
2591 if (FLAG_throw_on_javascript_int_overflow) { 2663 if (FLAG_throw_on_javascript_int_overflow) {
2592 EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result); 2664 EmitJavascriptOverflowCheck(compiler, shift_left->range(), deopt, result);
2593 } 2665 }
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
2764 default: 2836 default:
2765 UNREACHABLE(); 2837 UNREACHABLE();
2766 break; 2838 break;
2767 } 2839 }
2768 if (FLAG_throw_on_javascript_int_overflow) { 2840 if (FLAG_throw_on_javascript_int_overflow) {
2769 EmitJavascriptOverflowCheck(compiler, range(), deopt, result); 2841 EmitJavascriptOverflowCheck(compiler, range(), deopt, result);
2770 } 2842 }
2771 return; 2843 return;
2772 } 2844 }
2773 2845
2774 Register right = locs()->in(1).reg(); 2846 const Register right = locs()->in(1).reg();
2775 Range* right_range = this->right()->definition()->range(); 2847 Range* right_range = this->right()->definition()->range();
2776 switch (op_kind()) { 2848 switch (op_kind()) {
2777 case Token::kADD: { 2849 case Token::kADD: {
2778 if (deopt == NULL) { 2850 if (deopt == NULL) {
2779 __ add(result, left, Operand(right)); 2851 __ add(result, left, Operand(right));
2780 } else { 2852 } else {
2781 __ adds(result, left, Operand(right)); 2853 __ adds(result, left, Operand(right));
2782 __ b(deopt, VS); 2854 __ b(deopt, VS);
2783 } 2855 }
2784 break; 2856 break;
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2880 } 2952 }
2881 __ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP. 2953 __ Asr(TMP, right, kSmiTagSize); // SmiUntag right into TMP.
2882 // sarl operation masks the count to 6 bits. 2954 // sarl operation masks the count to 6 bits.
2883 const intptr_t kCountLimit = 0x3F; 2955 const intptr_t kCountLimit = 0x3F;
2884 if ((right_range == NULL) || 2956 if ((right_range == NULL) ||
2885 !right_range->IsWithin(RangeBoundary::kMinusInfinity, kCountLimit)) { 2957 !right_range->IsWithin(RangeBoundary::kMinusInfinity, kCountLimit)) {
2886 __ LoadImmediate(TMP2, kCountLimit, PP); 2958 __ LoadImmediate(TMP2, kCountLimit, PP);
2887 __ CompareRegisters(TMP, TMP2); 2959 __ CompareRegisters(TMP, TMP2);
2888 __ csel(TMP, TMP2, TMP, GT); 2960 __ csel(TMP, TMP2, TMP, GT);
2889 } 2961 }
2890 Register temp = locs()->temp(0).reg(); 2962 const Register temp = locs()->temp(0).reg();
2891 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 2963 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
2892 __ asrv(result, temp, TMP); 2964 __ asrv(result, temp, TMP);
2893 __ SmiTag(result); 2965 __ SmiTag(result);
2894 break; 2966 break;
2895 } 2967 }
2896 case Token::kDIV: { 2968 case Token::kDIV: {
2897 // Dispatches to 'Double./'. 2969 // Dispatches to 'Double./'.
2898 // TODO(srdjan): Implement as conversion to double and double division. 2970 // TODO(srdjan): Implement as conversion to double and double division.
2899 UNREACHABLE(); 2971 UNREACHABLE();
2900 break; 2972 break;
(...skipping 27 matching lines...) Expand all
2928 summary->set_in(1, Location::RequiresRegister()); 3000 summary->set_in(1, Location::RequiresRegister());
2929 return summary; 3001 return summary;
2930 } 3002 }
2931 3003
2932 3004
2933 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3005 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2934 Label* deopt = compiler->AddDeoptStub(deopt_id(), 3006 Label* deopt = compiler->AddDeoptStub(deopt_id(),
2935 ICData::kDeoptBinaryDoubleOp); 3007 ICData::kDeoptBinaryDoubleOp);
2936 intptr_t left_cid = left()->Type()->ToCid(); 3008 intptr_t left_cid = left()->Type()->ToCid();
2937 intptr_t right_cid = right()->Type()->ToCid(); 3009 intptr_t right_cid = right()->Type()->ToCid();
2938 Register left = locs()->in(0).reg(); 3010 const Register left = locs()->in(0).reg();
2939 Register right = locs()->in(1).reg(); 3011 const Register right = locs()->in(1).reg();
2940 if (left_cid == kSmiCid) { 3012 if (left_cid == kSmiCid) {
2941 __ tsti(right, kSmiTagMask); 3013 __ tsti(right, kSmiTagMask);
2942 } else if (right_cid == kSmiCid) { 3014 } else if (right_cid == kSmiCid) {
2943 __ tsti(left, kSmiTagMask); 3015 __ tsti(left, kSmiTagMask);
2944 } else { 3016 } else {
2945 __ orr(TMP, left, Operand(right)); 3017 __ orr(TMP, left, Operand(right));
2946 __ tsti(TMP, kSmiTagMask); 3018 __ tsti(TMP, kSmiTagMask);
2947 } 3019 }
2948 __ b(deopt, EQ); 3020 __ b(deopt, EQ);
2949 } 3021 }
2950 3022
2951 3023
2952 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const { 3024 LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const {
2953 const intptr_t kNumInputs = 1; 3025 const intptr_t kNumInputs = 1;
2954 const intptr_t kNumTemps = 1; 3026 const intptr_t kNumTemps = 0;
2955 LocationSummary* summary = 3027 LocationSummary* summary =
2956 new LocationSummary(kNumInputs, 3028 new LocationSummary(kNumInputs,
2957 kNumTemps, 3029 kNumTemps,
2958 LocationSummary::kCallOnSlowPath); 3030 LocationSummary::kCallOnSlowPath);
2959 summary->set_in(0, Location::RequiresFpuRegister()); 3031 summary->set_in(0, Location::RequiresFpuRegister());
2960 summary->set_temp(0, Location::RequiresRegister());
2961 summary->set_out(0, Location::RequiresRegister()); 3032 summary->set_out(0, Location::RequiresRegister());
2962 return summary; 3033 return summary;
2963 } 3034 }
2964 3035
2965 3036
2966 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3037 void BoxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2967 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); 3038 BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
2968 compiler->AddSlowPathCode(slow_path); 3039 compiler->AddSlowPathCode(slow_path);
2969 3040
2970 const Register out_reg = locs()->out(0).reg(); 3041 const Register out_reg = locs()->out(0).reg();
2971 const VRegister value = locs()->in(0).fpu_reg(); 3042 const VRegister value = locs()->in(0).fpu_reg();
2972 3043
2973 __ TryAllocate(compiler->double_class(), 3044 __ TryAllocate(compiler->double_class(),
2974 slow_path->entry_label(), 3045 slow_path->entry_label(),
2975 out_reg, 3046 out_reg,
2976 locs()->temp(0).reg(),
2977 PP); 3047 PP);
2978 __ Bind(slow_path->exit_label()); 3048 __ Bind(slow_path->exit_label());
2979 __ StoreDFieldToOffset(value, out_reg, Double::value_offset(), PP); 3049 __ StoreDFieldToOffset(value, out_reg, Double::value_offset(), PP);
2980 } 3050 }
2981 3051
2982 3052
2983 LocationSummary* UnboxDoubleInstr::MakeLocationSummary(bool opt) const { 3053 LocationSummary* UnboxDoubleInstr::MakeLocationSummary(bool opt) const {
2984 const intptr_t kNumInputs = 1; 3054 const intptr_t kNumInputs = 1;
2985 const intptr_t kNumTemps = 0; 3055 const intptr_t kNumTemps = 0;
2986 LocationSummary* summary = 3056 LocationSummary* summary =
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
3022 __ Bind(&is_smi); 3092 __ Bind(&is_smi);
3023 __ Asr(TMP, value, kSmiTagSize); // Copy and untag. 3093 __ Asr(TMP, value, kSmiTagSize); // Copy and untag.
3024 __ scvtfd(result, TMP); 3094 __ scvtfd(result, TMP);
3025 __ Bind(&done); 3095 __ Bind(&done);
3026 } 3096 }
3027 } 3097 }
3028 } 3098 }
3029 3099
3030 3100
3031 LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(bool opt) const { 3101 LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(bool opt) const {
3032 UNIMPLEMENTED(); 3102 const intptr_t kNumInputs = 1;
3033 return NULL; 3103 const intptr_t kNumTemps = 0;
3104 LocationSummary* summary =
3105 new LocationSummary(kNumInputs,
3106 kNumTemps,
3107 LocationSummary::kCallOnSlowPath);
3108 summary->set_in(0, Location::RequiresFpuRegister());
3109 summary->set_out(0, Location::RequiresRegister());
3110 return summary;
3034 } 3111 }
3035 3112
3036 3113
3037 void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3114 void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3038 UNIMPLEMENTED(); 3115 BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
3116 compiler->AddSlowPathCode(slow_path);
3117
3118 const Register out_reg = locs()->out(0).reg();
3119 const VRegister value = locs()->in(0).fpu_reg();
3120
3121 __ TryAllocate(compiler->float32x4_class(),
3122 slow_path->entry_label(),
3123 out_reg,
3124 PP);
3125 __ Bind(slow_path->exit_label());
3126
3127 __ StoreQFieldToOffset(value, out_reg, Float32x4::value_offset(), PP);
3039 } 3128 }
3040 3129
3041 3130
3042 LocationSummary* UnboxFloat32x4Instr::MakeLocationSummary(bool opt) const { 3131 LocationSummary* UnboxFloat32x4Instr::MakeLocationSummary(bool opt) const {
3043 UNIMPLEMENTED(); 3132 const intptr_t kNumInputs = 1;
3044 return NULL; 3133 const intptr_t kNumTemps = 0;
3134 LocationSummary* summary =
3135 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3136 summary->set_in(0, Location::RequiresRegister());
3137 summary->set_out(0, Location::RequiresFpuRegister());
3138 return summary;
3045 } 3139 }
3046 3140
3047 3141
3048 void UnboxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3142 void UnboxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3049 UNIMPLEMENTED(); 3143 const intptr_t value_cid = value()->Type()->ToCid();
3144 const Register value = locs()->in(0).reg();
3145 const VRegister result = locs()->out(0).fpu_reg();
3146
3147 if (value_cid != kFloat32x4Cid) {
3148 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3149 __ tsti(value, kSmiTagMask);
3150 __ b(deopt, EQ);
3151 __ CompareClassId(value, kFloat32x4Cid, PP);
3152 __ b(deopt, NE);
3153 }
3154
3155 __ LoadQFieldFromOffset(result, value, Float32x4::value_offset(), PP);
3050 } 3156 }
3051 3157
3052 3158
3053 LocationSummary* BoxFloat64x2Instr::MakeLocationSummary(bool opt) const { 3159 LocationSummary* BoxFloat64x2Instr::MakeLocationSummary(bool opt) const {
3054 UNIMPLEMENTED(); 3160 const intptr_t kNumInputs = 1;
3055 return NULL; 3161 const intptr_t kNumTemps = 0;
3162 LocationSummary* summary =
3163 new LocationSummary(kNumInputs,
3164 kNumTemps,
3165 LocationSummary::kCallOnSlowPath);
3166 summary->set_in(0, Location::RequiresFpuRegister());
3167 summary->set_out(0, Location::RequiresRegister());
3168 return summary;
3056 } 3169 }
3057 3170
3058 3171
3059 void BoxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3172 void BoxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3060 UNIMPLEMENTED(); 3173 BoxFloat64x2SlowPath* slow_path = new BoxFloat64x2SlowPath(this);
3174 compiler->AddSlowPathCode(slow_path);
3175
3176 const Register out_reg = locs()->out(0).reg();
3177 const VRegister value = locs()->in(0).fpu_reg();
3178
3179 __ TryAllocate(compiler->float64x2_class(),
3180 slow_path->entry_label(),
3181 out_reg,
3182 PP);
3183 __ Bind(slow_path->exit_label());
3184
3185 __ StoreQFieldToOffset(value, out_reg, Float64x2::value_offset(), PP);
3061 } 3186 }
3062 3187
3063 3188
3064 LocationSummary* UnboxFloat64x2Instr::MakeLocationSummary(bool opt) const { 3189 LocationSummary* UnboxFloat64x2Instr::MakeLocationSummary(bool opt) const {
3065 UNIMPLEMENTED(); 3190 const intptr_t kNumInputs = 1;
3066 return NULL; 3191 const intptr_t kNumTemps = 0;
3192 LocationSummary* summary =
3193 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3194 summary->set_in(0, Location::RequiresRegister());
3195 summary->set_out(0, Location::RequiresFpuRegister());
3196 return summary;
3067 } 3197 }
3068 3198
3069 3199
3070 void UnboxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3200 void UnboxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3071 UNIMPLEMENTED(); 3201 const intptr_t value_cid = value()->Type()->ToCid();
3202 const Register value = locs()->in(0).reg();
3203 const VRegister result = locs()->out(0).fpu_reg();
3204
3205 if (value_cid != kFloat64x2Cid) {
3206 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3207 __ tsti(value, kSmiTagMask);
3208 __ b(deopt, EQ);
3209 __ CompareClassId(value, kFloat64x2Cid, PP);
3210 __ b(deopt, NE);
3211 }
3212
3213 __ LoadQFieldFromOffset(result, value, Float64x2::value_offset(), PP);
3072 } 3214 }
3073 3215
3074 3216
3075 LocationSummary* BoxInt32x4Instr::MakeLocationSummary(bool opt) const { 3217 LocationSummary* BoxInt32x4Instr::MakeLocationSummary(bool opt) const {
3076 UNIMPLEMENTED(); 3218 const intptr_t kNumInputs = 1;
3077 return NULL; 3219 const intptr_t kNumTemps = 0;
3078 } 3220 LocationSummary* summary =
3221 new LocationSummary(kNumInputs,
3222 kNumTemps,
3223 LocationSummary::kCallOnSlowPath);
3224 summary->set_in(0, Location::RequiresFpuRegister());
3225 summary->set_out(0, Location::RequiresRegister());
3226 return summary;
3227 }
3228
3229
3230 class BoxInt32x4SlowPath : public SlowPathCode {
3231 public:
3232 explicit BoxInt32x4SlowPath(BoxInt32x4Instr* instruction)
3233 : instruction_(instruction) { }
3234
3235 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3236 __ Comment("BoxInt32x4SlowPath");
3237 __ Bind(entry_label());
3238 const Class& int32x4_class = compiler->int32x4_class();
3239 const Code& stub =
3240 Code::Handle(StubCode::GetAllocationStubForClass(int32x4_class));
3241 const ExternalLabel label(int32x4_class.ToCString(), stub.EntryPoint());
3242
3243 LocationSummary* locs = instruction_->locs();
3244 locs->live_registers()->Remove(locs->out(0));
3245
3246 compiler->SaveLiveRegisters(locs);
3247 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
3248 &label,
3249 PcDescriptors::kOther,
3250 locs);
3251 __ mov(locs->out(0).reg(), R0);
3252 compiler->RestoreLiveRegisters(locs);
3253
3254 __ b(exit_label());
3255 }
3256
3257 private:
3258 BoxInt32x4Instr* instruction_;
3259 };
3079 3260
3080 3261
3081 void BoxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3262 void BoxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3082 UNIMPLEMENTED(); 3263 BoxInt32x4SlowPath* slow_path = new BoxInt32x4SlowPath(this);
3264 compiler->AddSlowPathCode(slow_path);
3265
3266 const Register out_reg = locs()->out(0).reg();
3267 const VRegister value = locs()->in(0).fpu_reg();
3268
3269 __ TryAllocate(compiler->int32x4_class(),
3270 slow_path->entry_label(),
3271 out_reg,
3272 PP);
3273 __ Bind(slow_path->exit_label());
3274
3275 __ StoreQFieldToOffset(value, out_reg, Int32x4::value_offset(), PP);
3083 } 3276 }
3084 3277
3085 3278
3086 LocationSummary* UnboxInt32x4Instr::MakeLocationSummary(bool opt) const { 3279 LocationSummary* UnboxInt32x4Instr::MakeLocationSummary(bool opt) const {
3087 UNIMPLEMENTED(); 3280 const intptr_t kNumInputs = 1;
3088 return NULL; 3281 const intptr_t kNumTemps = 0;
3282 LocationSummary* summary =
3283 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3284 summary->set_in(0, Location::RequiresRegister());
3285 summary->set_out(0, Location::RequiresFpuRegister());
3286 return summary;
3089 } 3287 }
3090 3288
3091 3289
3092 void UnboxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3290 void UnboxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3093 UNIMPLEMENTED(); 3291 const intptr_t value_cid = value()->Type()->ToCid();
3094 } 3292 const Register value = locs()->in(0).reg();
3095 3293 const VRegister result = locs()->out(0).fpu_reg();
3096 3294
3295 if (value_cid != kInt32x4Cid) {
3296 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3297 __ tsti(value, kSmiTagMask);
3298 __ b(deopt, EQ);
3299 __ CompareClassId(value, kInt32x4Cid, PP);
3300 __ b(deopt, NE);
3301 }
3302
3303 __ LoadQFieldFromOffset(result, value, Int32x4::value_offset(), PP);
3304 }
3305
3306
3097 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(bool opt) const { 3307 LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(bool opt) const {
3098 const intptr_t kNumInputs = 2; 3308 const intptr_t kNumInputs = 2;
3099 const intptr_t kNumTemps = 0; 3309 const intptr_t kNumTemps = 0;
3100 LocationSummary* summary = 3310 LocationSummary* summary =
3101 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3311 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3102 summary->set_in(0, Location::RequiresFpuRegister()); 3312 summary->set_in(0, Location::RequiresFpuRegister());
3103 summary->set_in(1, Location::RequiresFpuRegister()); 3313 summary->set_in(1, Location::RequiresFpuRegister());
3104 summary->set_out(0, Location::RequiresFpuRegister()); 3314 summary->set_out(0, Location::RequiresFpuRegister());
3105 return summary; 3315 return summary;
3106 } 3316 }
3107 3317
3108 3318
3109 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3319 void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3110 const VRegister left = locs()->in(0).fpu_reg(); 3320 const VRegister left = locs()->in(0).fpu_reg();
3111 const VRegister right = locs()->in(1).fpu_reg(); 3321 const VRegister right = locs()->in(1).fpu_reg();
3112 const VRegister result = locs()->out(0).fpu_reg(); 3322 const VRegister result = locs()->out(0).fpu_reg();
3113 switch (op_kind()) { 3323 switch (op_kind()) {
3114 case Token::kADD: __ faddd(result, left, right); break; 3324 case Token::kADD: __ faddd(result, left, right); break;
3115 case Token::kSUB: __ fsubd(result, left, right); break; 3325 case Token::kSUB: __ fsubd(result, left, right); break;
3116 case Token::kMUL: __ fmuld(result, left, right); break; 3326 case Token::kMUL: __ fmuld(result, left, right); break;
3117 case Token::kDIV: __ fdivd(result, left, right); break; 3327 case Token::kDIV: __ fdivd(result, left, right); break;
3118 default: UNREACHABLE(); 3328 default: UNREACHABLE();
3119 } 3329 }
3120 } 3330 }
3121 3331
3122 3332
3123 LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(bool opt) const { 3333 LocationSummary* BinaryFloat32x4OpInstr::MakeLocationSummary(bool opt) const {
3124 UNIMPLEMENTED(); 3334 const intptr_t kNumInputs = 2;
3125 return NULL; 3335 const intptr_t kNumTemps = 0;
3336 LocationSummary* summary =
3337 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3338 summary->set_in(0, Location::RequiresFpuRegister());
3339 summary->set_in(1, Location::RequiresFpuRegister());
3340 summary->set_out(0, Location::RequiresFpuRegister());
3341 return summary;
3126 } 3342 }
3127 3343
3128 3344
3129 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3345 void BinaryFloat32x4OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3130 UNIMPLEMENTED(); 3346 const VRegister left = locs()->in(0).fpu_reg();
3347 const VRegister right = locs()->in(1).fpu_reg();
3348 const VRegister result = locs()->out(0).fpu_reg();
3349
3350 switch (op_kind()) {
3351 case Token::kADD: __ vadds(result, left, right); break;
3352 case Token::kSUB: __ vsubs(result, left, right); break;
3353 case Token::kMUL: __ vmuls(result, left, right); break;
3354 case Token::kDIV: __ vdivs(result, left, right); break;
3355 default: UNREACHABLE();
3356 }
3131 } 3357 }
3132 3358
3133 3359
3134 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(bool opt) const { 3360 LocationSummary* BinaryFloat64x2OpInstr::MakeLocationSummary(bool opt) const {
3135 UNIMPLEMENTED(); 3361 const intptr_t kNumInputs = 2;
3136 return NULL; 3362 const intptr_t kNumTemps = 0;
3363 LocationSummary* summary =
3364 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3365 summary->set_in(0, Location::RequiresFpuRegister());
3366 summary->set_in(1, Location::RequiresFpuRegister());
3367 summary->set_out(0, Location::RequiresFpuRegister());
3368 return summary;
3137 } 3369 }
3138 3370
3139 3371
3140 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3372 void BinaryFloat64x2OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3141 UNIMPLEMENTED(); 3373 const VRegister left = locs()->in(0).fpu_reg();
3374 const VRegister right = locs()->in(1).fpu_reg();
3375 const VRegister result = locs()->out(0).fpu_reg();
3376
3377 switch (op_kind()) {
3378 case Token::kADD: __ vaddd(result, left, right); break;
3379 case Token::kSUB: __ vsubd(result, left, right); break;
3380 case Token::kMUL: __ vmuld(result, left, right); break;
3381 case Token::kDIV: __ vdivd(result, left, right); break;
3382 default: UNREACHABLE();
3383 }
3142 } 3384 }
3143 3385
3144 3386
3145 LocationSummary* Simd32x4ShuffleInstr::MakeLocationSummary(bool opt) const { 3387 LocationSummary* Simd32x4ShuffleInstr::MakeLocationSummary(bool opt) const {
3146 UNIMPLEMENTED(); 3388 UNIMPLEMENTED();
3147 return NULL; 3389 return NULL;
3148 } 3390 }
3149 3391
3150 3392
3151 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3393 void Simd32x4ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 18 matching lines...) Expand all
3170 } 3412 }
3171 3413
3172 3414
3173 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3415 void Simd32x4GetSignMaskInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3174 UNIMPLEMENTED(); 3416 UNIMPLEMENTED();
3175 } 3417 }
3176 3418
3177 3419
3178 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary( 3420 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary(
3179 bool opt) const { 3421 bool opt) const {
3180 UNIMPLEMENTED(); 3422 const intptr_t kNumInputs = 4;
3181 return NULL; 3423 const intptr_t kNumTemps = 0;
3424 LocationSummary* summary =
3425 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3426 summary->set_in(0, Location::RequiresFpuRegister());
3427 summary->set_in(1, Location::RequiresFpuRegister());
3428 summary->set_in(2, Location::RequiresFpuRegister());
3429 summary->set_in(3, Location::RequiresFpuRegister());
3430 summary->set_out(0, Location::RequiresFpuRegister());
3431 return summary;
3182 } 3432 }
3183 3433
3184 3434
3185 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3435 void Float32x4ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3186 UNIMPLEMENTED(); 3436 const VRegister v0 = locs()->in(0).fpu_reg();
3437 const VRegister v1 = locs()->in(1).fpu_reg();
3438 const VRegister v2 = locs()->in(2).fpu_reg();
3439 const VRegister v3 = locs()->in(3).fpu_reg();
3440 const VRegister r = locs()->out(0).fpu_reg();
3441
3442 __ fcvtsd(v0, v0);
3443 __ vinss(r, 0, v0, 0);
3444 __ fcvtsd(v1, v1);
3445 __ vinss(r, 1, v1, 1);
3446 __ fcvtsd(v2, v2);
3447 __ vinss(r, 2, v2, 2);
3448 __ fcvtsd(v3, v3);
3449 __ vinss(r, 3, v3, 3);
3187 } 3450 }
3188 3451
3189 3452
3190 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(bool opt) const { 3453 LocationSummary* Float32x4ZeroInstr::MakeLocationSummary(bool opt) const {
3191 UNIMPLEMENTED(); 3454 const intptr_t kNumInputs = 0;
3192 return NULL; 3455 const intptr_t kNumTemps = 0;
3456 LocationSummary* summary =
3457 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3458 summary->set_out(0, Location::RequiresFpuRegister());
3459 return summary;
3193 } 3460 }
3194 3461
3195 3462
3196 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3463 void Float32x4ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3197 UNIMPLEMENTED(); 3464 const VRegister v = locs()->out(0).fpu_reg();
3465 __ LoadDImmediate(v, 0.0, PP);
3198 } 3466 }
3199 3467
3200 3468
3201 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(bool opt) const { 3469 LocationSummary* Float32x4SplatInstr::MakeLocationSummary(bool opt) const {
3202 UNIMPLEMENTED(); 3470 const intptr_t kNumInputs = 1;
3203 return NULL; 3471 const intptr_t kNumTemps = 0;
3472 LocationSummary* summary =
3473 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3474 summary->set_in(0, Location::RequiresFpuRegister());
3475 summary->set_out(0, Location::RequiresFpuRegister());
3476 return summary;
3204 } 3477 }
3205 3478
3206 3479
3207 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3480 void Float32x4SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3208 UNIMPLEMENTED(); 3481 const VRegister value = locs()->in(0).fpu_reg();
3482 const VRegister result = locs()->out(0).fpu_reg();
3483
3484 // Convert to Float32.
3485 __ fcvtsd(VTMP, value);
3486
3487 // Splat across all lanes.
3488 __ vdups(result, VTMP, 0);
3209 } 3489 }
3210 3490
3211 3491
3212 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(bool opt) const { 3492 LocationSummary* Float32x4ComparisonInstr::MakeLocationSummary(bool opt) const {
3213 UNIMPLEMENTED(); 3493 UNIMPLEMENTED();
3214 return NULL; 3494 return NULL;
3215 } 3495 }
3216 3496
3217 3497
3218 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3498 void Float32x4ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 17 matching lines...) Expand all
3236 return NULL; 3516 return NULL;
3237 } 3517 }
3238 3518
3239 3519
3240 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3520 void Float32x4SqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3241 UNIMPLEMENTED(); 3521 UNIMPLEMENTED();
3242 } 3522 }
3243 3523
3244 3524
3245 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(bool opt) const { 3525 LocationSummary* Float32x4ScaleInstr::MakeLocationSummary(bool opt) const {
3246 UNIMPLEMENTED(); 3526 const intptr_t kNumInputs = 2;
3247 return NULL; 3527 const intptr_t kNumTemps = 0;
3528 LocationSummary* summary =
3529 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3530 summary->set_in(0, Location::RequiresFpuRegister());
3531 summary->set_in(1, Location::RequiresFpuRegister());
3532 summary->set_out(0, Location::RequiresFpuRegister());
3533 return summary;
3248 } 3534 }
3249 3535
3250 3536
3251 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3537 void Float32x4ScaleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3252 UNIMPLEMENTED(); 3538 const VRegister left = locs()->in(0).fpu_reg();
3539 const VRegister right = locs()->in(1).fpu_reg();
3540 const VRegister result = locs()->out(0).fpu_reg();
3541
3542 switch (op_kind()) {
3543 case MethodRecognizer::kFloat32x4Scale:
3544 __ fcvtsd(VTMP, left);
3545 __ vdups(result, VTMP, 0);
3546 __ vmuls(result, result, right);
3547 break;
3548 default: UNREACHABLE();
3549 }
3253 } 3550 }
3254 3551
3255 3552
3256 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(bool opt) const { 3553 LocationSummary* Float32x4ZeroArgInstr::MakeLocationSummary(bool opt) const {
3257 UNIMPLEMENTED(); 3554 UNIMPLEMENTED();
3258 return NULL; 3555 return NULL;
3259 } 3556 }
3260 3557
3261 3558
3262 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3559 void Float32x4ZeroArgInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
(...skipping 28 matching lines...) Expand all
3291 return NULL; 3588 return NULL;
3292 } 3589 }
3293 3590
3294 3591
3295 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3592 void Float32x4ToInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3296 UNIMPLEMENTED(); 3593 UNIMPLEMENTED();
3297 } 3594 }
3298 3595
3299 3596
3300 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(bool opt) const { 3597 LocationSummary* Simd64x2ShuffleInstr::MakeLocationSummary(bool opt) const {
3301 UNIMPLEMENTED(); 3598 const intptr_t kNumInputs = 1;
3302 return NULL; 3599 const intptr_t kNumTemps = 0;
3600 LocationSummary* summary =
3601 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3602 summary->set_in(0, Location::RequiresFpuRegister());
3603 summary->set_out(0, Location::RequiresFpuRegister());
3604 return summary;
3303 } 3605 }
3304 3606
3305 3607
3306 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3608 void Simd64x2ShuffleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3307 UNIMPLEMENTED(); 3609 const VRegister value = locs()->in(0).fpu_reg();
3610 const VRegister result = locs()->out(0).fpu_reg();
3611
3612 switch (op_kind()) {
3613 case MethodRecognizer::kFloat64x2GetX:
3614 __ vinsd(result, 0, value, 0);
3615 break;
3616 case MethodRecognizer::kFloat64x2GetY:
3617 __ vinsd(result, 0, value, 1);
3618 break;
3619 default: UNREACHABLE();
3620 }
3308 } 3621 }
3309 3622
3310 3623
3311 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(bool opt) const { 3624 LocationSummary* Float64x2ZeroInstr::MakeLocationSummary(bool opt) const {
3312 UNIMPLEMENTED(); 3625 const intptr_t kNumInputs = 0;
3313 return NULL; 3626 const intptr_t kNumTemps = 0;
3627 LocationSummary* summary =
3628 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3629 summary->set_out(0, Location::RequiresFpuRegister());
3630 return summary;
3314 } 3631 }
3315 3632
3316 3633
3317 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3634 void Float64x2ZeroInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3318 UNIMPLEMENTED(); 3635 const VRegister v = locs()->out(0).fpu_reg();
3636 __ LoadDImmediate(v, 0.0, PP);
3319 } 3637 }
3320 3638
3321 3639
3322 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(bool opt) const { 3640 LocationSummary* Float64x2SplatInstr::MakeLocationSummary(bool opt) const {
3323 UNIMPLEMENTED(); 3641 const intptr_t kNumInputs = 1;
3324 return NULL; 3642 const intptr_t kNumTemps = 0;
3643 LocationSummary* summary =
3644 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3645 summary->set_in(0, Location::RequiresFpuRegister());
3646 summary->set_out(0, Location::RequiresFpuRegister());
3647 return summary;
3325 } 3648 }
3326 3649
3327 3650
3328 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3651 void Float64x2SplatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3329 UNIMPLEMENTED(); 3652 const VRegister value = locs()->in(0).fpu_reg();
3653 const VRegister result = locs()->out(0).fpu_reg();
3654 __ vdupd(result, value, 0);
3330 } 3655 }
3331 3656
3332 3657
3333 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary( 3658 LocationSummary* Float64x2ConstructorInstr::MakeLocationSummary(
3334 bool opt) const { 3659 bool opt) const {
3335 UNIMPLEMENTED(); 3660 const intptr_t kNumInputs = 2;
3336 return NULL; 3661 const intptr_t kNumTemps = 0;
3662 LocationSummary* summary =
3663 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3664 summary->set_in(0, Location::RequiresFpuRegister());
3665 summary->set_in(1, Location::RequiresFpuRegister());
3666 summary->set_out(0, Location::RequiresFpuRegister());
3667 return summary;
3337 } 3668 }
3338 3669
3339 3670
3340 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3671 void Float64x2ConstructorInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3341 UNIMPLEMENTED(); 3672 const VRegister v0 = locs()->in(0).fpu_reg();
3673 const VRegister v1 = locs()->in(1).fpu_reg();
3674 const VRegister r = locs()->out(0).fpu_reg();
3675 __ vinsd(r, 0, v0, 0);
3676 __ vinsd(r, 0, v1, 0);
3342 } 3677 }
3343 3678
3344 3679
3345 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary( 3680 LocationSummary* Float64x2ToFloat32x4Instr::MakeLocationSummary(
3346 bool opt) const { 3681 bool opt) const {
3347 UNIMPLEMENTED(); 3682 UNIMPLEMENTED();
3348 return NULL; 3683 return NULL;
3349 } 3684 }
3350 3685
3351 3686
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
3472 LocationSummary* summary = 3807 LocationSummary* summary =
3473 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3808 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3474 summary->set_in(0, Location::RequiresFpuRegister()); 3809 summary->set_in(0, Location::RequiresFpuRegister());
3475 summary->set_out(0, Location::RequiresFpuRegister()); 3810 summary->set_out(0, Location::RequiresFpuRegister());
3476 return summary; 3811 return summary;
3477 } 3812 }
3478 3813
3479 3814
3480 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3815 void MathUnaryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3481 if (kind() == MathUnaryInstr::kSqrt) { 3816 if (kind() == MathUnaryInstr::kSqrt) {
3482 VRegister val = locs()->in(0).fpu_reg(); 3817 const VRegister val = locs()->in(0).fpu_reg();
3483 VRegister result = locs()->out(0).fpu_reg(); 3818 const VRegister result = locs()->out(0).fpu_reg();
3484 __ fsqrtd(result, val); 3819 __ fsqrtd(result, val);
3485 } else if (kind() == MathUnaryInstr::kDoubleSquare) { 3820 } else if (kind() == MathUnaryInstr::kDoubleSquare) {
3486 VRegister val = locs()->in(0).fpu_reg(); 3821 const VRegister val = locs()->in(0).fpu_reg();
3487 VRegister result = locs()->out(0).fpu_reg(); 3822 const VRegister result = locs()->out(0).fpu_reg();
3488 __ fmuld(result, val, val); 3823 __ fmuld(result, val, val);
3489 } else { 3824 } else {
3490 ASSERT((kind() == MathUnaryInstr::kSin) || 3825 ASSERT((kind() == MathUnaryInstr::kSin) ||
3491 (kind() == MathUnaryInstr::kCos)); 3826 (kind() == MathUnaryInstr::kCos));
3492 __ CallRuntime(TargetFunction(), InputCount()); 3827 __ CallRuntime(TargetFunction(), InputCount());
3493 } 3828 }
3494 } 3829 }
3495 3830
3496 3831
3497 LocationSummary* MathMinMaxInstr::MakeLocationSummary(bool opt) const { 3832 LocationSummary* MathMinMaxInstr::MakeLocationSummary(bool opt) const {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
3559 } else { 3894 } else {
3560 __ b(&done, GE); 3895 __ b(&done, GE);
3561 __ fmovdd(result, right); 3896 __ fmovdd(result, right);
3562 ASSERT(left == result); 3897 ASSERT(left == result);
3563 } 3898 }
3564 __ Bind(&done); 3899 __ Bind(&done);
3565 return; 3900 return;
3566 } 3901 }
3567 3902
3568 ASSERT(result_cid() == kSmiCid); 3903 ASSERT(result_cid() == kSmiCid);
3569 Register left = locs()->in(0).reg(); 3904 const Register left = locs()->in(0).reg();
3570 Register right = locs()->in(1).reg(); 3905 const Register right = locs()->in(1).reg();
3571 Register result = locs()->out(0).reg(); 3906 const Register result = locs()->out(0).reg();
3572 __ CompareRegisters(left, right); 3907 __ CompareRegisters(left, right);
3573 ASSERT(result == left); 3908 ASSERT(result == left);
3574 if (is_min) { 3909 if (is_min) {
3575 __ csel(result, right, left, GT); 3910 __ csel(result, right, left, GT);
3576 } else { 3911 } else {
3577 __ csel(result, right, left, LT); 3912 __ csel(result, right, left, LT);
3578 } 3913 }
3579 } 3914 }
3580 3915
3581 3916
3582 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(bool opt) const { 3917 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(bool opt) const {
3583 const intptr_t kNumInputs = 1; 3918 const intptr_t kNumInputs = 1;
3584 const intptr_t kNumTemps = 0; 3919 const intptr_t kNumTemps = 0;
3585 LocationSummary* summary = 3920 LocationSummary* summary =
3586 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3921 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3587 summary->set_in(0, Location::RequiresRegister()); 3922 summary->set_in(0, Location::RequiresRegister());
3588 // We make use of 3-operand instructions by not requiring result register 3923 // We make use of 3-operand instructions by not requiring result register
3589 // to be identical to first input register as on Intel. 3924 // to be identical to first input register as on Intel.
3590 summary->set_out(0, Location::RequiresRegister()); 3925 summary->set_out(0, Location::RequiresRegister());
3591 return summary; 3926 return summary;
3592 } 3927 }
3593 3928
3594 3929
3595 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3930 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3596 Register value = locs()->in(0).reg(); 3931 const Register value = locs()->in(0).reg();
3597 Register result = locs()->out(0).reg(); 3932 const Register result = locs()->out(0).reg();
3598 switch (op_kind()) { 3933 switch (op_kind()) {
3599 case Token::kNEGATE: { 3934 case Token::kNEGATE: {
3600 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); 3935 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
3601 __ subs(result, ZR, Operand(value)); 3936 __ subs(result, ZR, Operand(value));
3602 __ b(deopt, VS); 3937 __ b(deopt, VS);
3603 if (FLAG_throw_on_javascript_int_overflow) { 3938 if (FLAG_throw_on_javascript_int_overflow) {
3604 EmitJavascriptOverflowCheck(compiler, range(), deopt, value); 3939 EmitJavascriptOverflowCheck(compiler, range(), deopt, value);
3605 } 3940 }
3606 break; 3941 break;
3607 } 3942 }
(...skipping 13 matching lines...) Expand all
3621 const intptr_t kNumTemps = 0; 3956 const intptr_t kNumTemps = 0;
3622 LocationSummary* summary = 3957 LocationSummary* summary =
3623 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3958 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3624 summary->set_in(0, Location::RequiresFpuRegister()); 3959 summary->set_in(0, Location::RequiresFpuRegister());
3625 summary->set_out(0, Location::RequiresFpuRegister()); 3960 summary->set_out(0, Location::RequiresFpuRegister());
3626 return summary; 3961 return summary;
3627 } 3962 }
3628 3963
3629 3964
3630 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3965 void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3631 VRegister result = locs()->out(0).fpu_reg(); 3966 const VRegister result = locs()->out(0).fpu_reg();
3632 VRegister value = locs()->in(0).fpu_reg(); 3967 const VRegister value = locs()->in(0).fpu_reg();
3633 __ fnegd(result, value); 3968 __ fnegd(result, value);
3634 } 3969 }
3635 3970
3636 3971
3637 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(bool opt) const { 3972 LocationSummary* SmiToDoubleInstr::MakeLocationSummary(bool opt) const {
3638 const intptr_t kNumInputs = 1; 3973 const intptr_t kNumInputs = 1;
3639 const intptr_t kNumTemps = 0; 3974 const intptr_t kNumTemps = 0;
3640 LocationSummary* result = 3975 LocationSummary* result =
3641 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 3976 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
3642 result->set_in(0, Location::WritableRegister()); 3977 result->set_in(0, Location::WritableRegister());
3643 result->set_out(0, Location::RequiresFpuRegister()); 3978 result->set_out(0, Location::RequiresFpuRegister());
3644 return result; 3979 return result;
3645 } 3980 }
3646 3981
3647 3982
3648 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3983 void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3649 Register value = locs()->in(0).reg(); 3984 const Register value = locs()->in(0).reg();
3650 VRegister result = locs()->out(0).fpu_reg(); 3985 const VRegister result = locs()->out(0).fpu_reg();
3651 __ SmiUntag(value); 3986 __ SmiUntag(value);
3652 __ scvtfd(result, value); 3987 __ scvtfd(result, value);
3653 } 3988 }
3654 3989
3655 3990
3656 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(bool opt) const { 3991 LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(bool opt) const {
3657 const intptr_t kNumInputs = 1; 3992 const intptr_t kNumInputs = 1;
3658 const intptr_t kNumTemps = 0; 3993 const intptr_t kNumTemps = 0;
3659 LocationSummary* result = 3994 LocationSummary* result =
3660 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); 3995 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall);
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
3954 } 4289 }
3955 return summary; 4290 return summary;
3956 } 4291 }
3957 4292
3958 4293
3959 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4294 void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3960 ASSERT(locs()->in(0).IsPairLocation()); 4295 ASSERT(locs()->in(0).IsPairLocation());
3961 PairLocation* pair = locs()->in(0).AsPairLocation(); 4296 PairLocation* pair = locs()->in(0).AsPairLocation();
3962 Location in_loc = pair->At(index()); 4297 Location in_loc = pair->At(index());
3963 if (representation() == kUnboxedDouble) { 4298 if (representation() == kUnboxedDouble) {
3964 VRegister out = locs()->out(0).fpu_reg(); 4299 const VRegister out = locs()->out(0).fpu_reg();
3965 VRegister in = in_loc.fpu_reg(); 4300 const VRegister in = in_loc.fpu_reg();
3966 __ fmovdd(out, in); 4301 __ fmovdd(out, in);
3967 } else { 4302 } else {
3968 ASSERT(representation() == kTagged); 4303 ASSERT(representation() == kTagged);
3969 Register out = locs()->out(0).reg(); 4304 const Register out = locs()->out(0).reg();
3970 Register in = in_loc.reg(); 4305 const Register in = in_loc.reg();
3971 __ mov(out, in); 4306 __ mov(out, in);
3972 } 4307 }
3973 } 4308 }
3974 4309
3975 4310
3976 LocationSummary* MergedMathInstr::MakeLocationSummary(bool opt) const { 4311 LocationSummary* MergedMathInstr::MakeLocationSummary(bool opt) const {
3977 if (kind() == MergedMathInstr::kTruncDivMod) { 4312 if (kind() == MergedMathInstr::kTruncDivMod) {
3978 const intptr_t kNumInputs = 2; 4313 const intptr_t kNumInputs = 2;
3979 const intptr_t kNumTemps = 0; 4314 const intptr_t kNumTemps = 0;
3980 LocationSummary* summary = 4315 LocationSummary* summary =
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
4128 ICData::kDeoptHoistedCheckClass : ICData::kDeoptCheckClass; 4463 ICData::kDeoptHoistedCheckClass : ICData::kDeoptCheckClass;
4129 if (IsNullCheck()) { 4464 if (IsNullCheck()) {
4130 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason); 4465 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
4131 __ CompareObject(locs()->in(0).reg(), Object::null_object(), PP); 4466 __ CompareObject(locs()->in(0).reg(), Object::null_object(), PP);
4132 __ b(deopt, EQ); 4467 __ b(deopt, EQ);
4133 return; 4468 return;
4134 } 4469 }
4135 4470
4136 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) || 4471 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) ||
4137 (unary_checks().NumberOfChecks() > 1)); 4472 (unary_checks().NumberOfChecks() > 1));
4138 Register value = locs()->in(0).reg(); 4473 const Register value = locs()->in(0).reg();
4139 Register temp = locs()->temp(0).reg(); 4474 const Register temp = locs()->temp(0).reg();
4140 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason); 4475 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
4141 Label is_ok; 4476 Label is_ok;
4142 intptr_t cix = 0; 4477 intptr_t cix = 0;
4143 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) { 4478 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) {
4144 __ tsti(value, kSmiTagMask); 4479 __ tsti(value, kSmiTagMask);
4145 __ b(&is_ok, EQ); 4480 __ b(&is_ok, EQ);
4146 cix++; // Skip first check. 4481 cix++; // Skip first check.
4147 } else { 4482 } else {
4148 __ tsti(value, kSmiTagMask); 4483 __ tsti(value, kSmiTagMask);
4149 __ b(deopt, EQ); 4484 __ b(deopt, EQ);
(...skipping 17 matching lines...) Expand all
4167 const intptr_t kNumInputs = 1; 4502 const intptr_t kNumInputs = 1;
4168 const intptr_t kNumTemps = 0; 4503 const intptr_t kNumTemps = 0;
4169 LocationSummary* summary = 4504 LocationSummary* summary =
4170 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); 4505 new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
4171 summary->set_in(0, Location::RequiresRegister()); 4506 summary->set_in(0, Location::RequiresRegister());
4172 return summary; 4507 return summary;
4173 } 4508 }
4174 4509
4175 4510
4176 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4511 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4177 Register value = locs()->in(0).reg(); 4512 const Register value = locs()->in(0).reg();
4178 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi); 4513 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
4179 __ tsti(value, kSmiTagMask); 4514 __ tsti(value, kSmiTagMask);
4180 __ b(deopt, NE); 4515 __ b(deopt, NE);
4181 } 4516 }
4182 4517
4183 4518
4184 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(bool opt) const { 4519 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(bool opt) const {
4185 const intptr_t kNumInputs = 2; 4520 const intptr_t kNumInputs = 2;
4186 const intptr_t kNumTemps = 0; 4521 const intptr_t kNumTemps = 0;
4187 LocationSummary* locs = 4522 LocationSummary* locs =
(...skipping 22 matching lines...) Expand all
4210 ASSERT((Smi::Cast(length_loc.constant()).Value() <= 4545 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
4211 Smi::Cast(index_loc.constant()).Value()) || 4546 Smi::Cast(index_loc.constant()).Value()) ||
4212 (Smi::Cast(index_loc.constant()).Value() < 0)); 4547 (Smi::Cast(index_loc.constant()).Value() < 0));
4213 // Unconditionally deoptimize for constant bounds checks because they 4548 // Unconditionally deoptimize for constant bounds checks because they
4214 // only occur only when index is out-of-bounds. 4549 // only occur only when index is out-of-bounds.
4215 __ b(deopt); 4550 __ b(deopt);
4216 return; 4551 return;
4217 } 4552 }
4218 4553
4219 if (index_loc.IsConstant()) { 4554 if (index_loc.IsConstant()) {
4220 Register length = length_loc.reg(); 4555 const Register length = length_loc.reg();
4221 const Smi& index = Smi::Cast(index_loc.constant()); 4556 const Smi& index = Smi::Cast(index_loc.constant());
4222 __ CompareImmediate(length, reinterpret_cast<int64_t>(index.raw()), PP); 4557 __ CompareImmediate(length, reinterpret_cast<int64_t>(index.raw()), PP);
4223 __ b(deopt, LS); 4558 __ b(deopt, LS);
4224 } else if (length_loc.IsConstant()) { 4559 } else if (length_loc.IsConstant()) {
4225 const Smi& length = Smi::Cast(length_loc.constant()); 4560 const Smi& length = Smi::Cast(length_loc.constant());
4226 Register index = index_loc.reg(); 4561 const Register index = index_loc.reg();
4227 __ CompareImmediate(index, reinterpret_cast<int64_t>(length.raw()), PP); 4562 __ CompareImmediate(index, reinterpret_cast<int64_t>(length.raw()), PP);
4228 __ b(deopt, CS); 4563 __ b(deopt, CS);
4229 } else { 4564 } else {
4230 Register length = length_loc.reg(); 4565 const Register length = length_loc.reg();
4231 Register index = index_loc.reg(); 4566 const Register index = index_loc.reg();
4232 __ CompareRegisters(index, length); 4567 __ CompareRegisters(index, length);
4233 __ b(deopt, CS); 4568 __ b(deopt, CS);
4234 } 4569 }
4235 } 4570 }
4236 4571
4237 4572
4238 LocationSummary* UnboxIntegerInstr::MakeLocationSummary(bool opt) const { 4573 LocationSummary* UnboxIntegerInstr::MakeLocationSummary(bool opt) const {
4239 UNIMPLEMENTED(); 4574 UNIMPLEMENTED();
4240 return NULL; 4575 return NULL;
4241 } 4576 }
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
4439 4774
4440 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4775 void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4441 __ Comment("StrictCompareInstr"); 4776 __ Comment("StrictCompareInstr");
4442 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); 4777 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
4443 4778
4444 Label is_true, is_false; 4779 Label is_true, is_false;
4445 BranchLabels labels = { &is_true, &is_false, &is_false }; 4780 BranchLabels labels = { &is_true, &is_false, &is_false };
4446 Condition true_condition = EmitComparisonCode(compiler, labels); 4781 Condition true_condition = EmitComparisonCode(compiler, labels);
4447 EmitBranchOnCondition(compiler, true_condition, labels); 4782 EmitBranchOnCondition(compiler, true_condition, labels);
4448 4783
4449 Register result = locs()->out(0).reg(); 4784 const Register result = locs()->out(0).reg();
4450 Label done; 4785 Label done;
4451 __ Bind(&is_false); 4786 __ Bind(&is_false);
4452 __ LoadObject(result, Bool::False(), PP); 4787 __ LoadObject(result, Bool::False(), PP);
4453 __ b(&done); 4788 __ b(&done);
4454 __ Bind(&is_true); 4789 __ Bind(&is_true);
4455 __ LoadObject(result, Bool::True(), PP); 4790 __ LoadObject(result, Bool::True(), PP);
4456 __ Bind(&done); 4791 __ Bind(&done);
4457 } 4792 }
4458 4793
4459 4794
4460 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler, 4795 void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
4461 BranchInstr* branch) { 4796 BranchInstr* branch) {
4462 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT); 4797 ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
4463 4798
4464 BranchLabels labels = compiler->CreateBranchLabels(branch); 4799 BranchLabels labels = compiler->CreateBranchLabels(branch);
4465 Condition true_condition = EmitComparisonCode(compiler, labels); 4800 Condition true_condition = EmitComparisonCode(compiler, labels);
4466 EmitBranchOnCondition(compiler, true_condition, labels); 4801 EmitBranchOnCondition(compiler, true_condition, labels);
4467 } 4802 }
4468 4803
4469 4804
4470 LocationSummary* BooleanNegateInstr::MakeLocationSummary(bool opt) const { 4805 LocationSummary* BooleanNegateInstr::MakeLocationSummary(bool opt) const {
4471 return LocationSummary::Make(1, 4806 return LocationSummary::Make(1,
4472 Location::RequiresRegister(), 4807 Location::RequiresRegister(),
4473 LocationSummary::kNoCall); 4808 LocationSummary::kNoCall);
4474 } 4809 }
4475 4810
4476 4811
4477 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4812 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4478 Register value = locs()->in(0).reg(); 4813 const Register value = locs()->in(0).reg();
4479 Register result = locs()->out(0).reg(); 4814 const Register result = locs()->out(0).reg();
4480 4815
4481 __ LoadObject(result, Bool::True(), PP); 4816 __ LoadObject(result, Bool::True(), PP);
4482 __ LoadObject(TMP, Bool::False(), PP); 4817 __ LoadObject(TMP, Bool::False(), PP);
4483 __ CompareRegisters(result, value); 4818 __ CompareRegisters(result, value);
4484 __ csel(result, TMP, result, EQ); 4819 __ csel(result, TMP, result, EQ);
4485 } 4820 }
4486 4821
4487 4822
4488 LocationSummary* AllocateObjectInstr::MakeLocationSummary(bool opt) const { 4823 LocationSummary* AllocateObjectInstr::MakeLocationSummary(bool opt) const {
4489 return MakeCallSummary(); 4824 return MakeCallSummary();
4490 } 4825 }
4491 4826
4492 4827
4493 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 4828 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4494 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls())); 4829 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls()));
4495 const ExternalLabel label(cls().ToCString(), stub.EntryPoint()); 4830 const ExternalLabel label(cls().ToCString(), stub.EntryPoint());
4496 compiler->GenerateCall(token_pos(), 4831 compiler->GenerateCall(token_pos(),
4497 &label, 4832 &label,
4498 PcDescriptors::kOther, 4833 PcDescriptors::kOther,
4499 locs()); 4834 locs());
4500 __ Drop(ArgumentCount()); // Discard arguments. 4835 __ Drop(ArgumentCount()); // Discard arguments.
4501 } 4836 }
4502 4837
4503 } // namespace dart 4838 } // namespace dart
4504 4839
4505 #endif // defined TARGET_ARCH_ARM64 4840 #endif // defined TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/intermediate_language_x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698