Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(839)

Side by Side Diff: runtime/vm/intermediate_language_arm.cc

Issue 297163012: Rename ShifterOperand to Operand on ARM. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/instructions_arm.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intermediate_language.h" 8 #include "vm/intermediate_language.h"
9 9
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
84 // that will be overwritten by the patch instructions: a branch macro sequence. 84 // that will be overwritten by the patch instructions: a branch macro sequence.
85 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 85 void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
86 const Register result = locs()->in(0).reg(); 86 const Register result = locs()->in(0).reg();
87 ASSERT(result == R0); 87 ASSERT(result == R0);
88 #if defined(DEBUG) 88 #if defined(DEBUG)
89 Label stack_ok; 89 Label stack_ok;
90 __ Comment("Stack Check"); 90 __ Comment("Stack Check");
91 const intptr_t fp_sp_dist = 91 const intptr_t fp_sp_dist =
92 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; 92 (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize;
93 ASSERT(fp_sp_dist <= 0); 93 ASSERT(fp_sp_dist <= 0);
94 __ sub(R2, SP, ShifterOperand(FP)); 94 __ sub(R2, SP, Operand(FP));
95 __ CompareImmediate(R2, fp_sp_dist); 95 __ CompareImmediate(R2, fp_sp_dist);
96 __ b(&stack_ok, EQ); 96 __ b(&stack_ok, EQ);
97 __ bkpt(0); 97 __ bkpt(0);
98 __ Bind(&stack_ok); 98 __ Bind(&stack_ok);
99 #endif 99 #endif
100 __ LeaveDartFrame(); 100 __ LeaveDartFrame();
101 __ Ret(); 101 __ Ret();
102 } 102 }
103 103
104 104
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 136
137 137
138 void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 138 void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
139 const Register result = locs()->out(0).reg(); 139 const Register result = locs()->out(0).reg();
140 140
141 Location left = locs()->in(0); 141 Location left = locs()->in(0);
142 Location right = locs()->in(1); 142 Location right = locs()->in(1);
143 ASSERT(!left.IsConstant() || !right.IsConstant()); 143 ASSERT(!left.IsConstant() || !right.IsConstant());
144 144
145 // Clear out register. 145 // Clear out register.
146 __ eor(result, result, ShifterOperand(result)); 146 __ eor(result, result, Operand(result));
147 147
148 // Emit comparison code. This must not overwrite the result register. 148 // Emit comparison code. This must not overwrite the result register.
149 BranchLabels labels = { NULL, NULL, NULL }; 149 BranchLabels labels = { NULL, NULL, NULL };
150 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels); 150 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
151 151
152 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_); 152 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
153 153
154 intptr_t true_value = if_true_; 154 intptr_t true_value = if_true_;
155 intptr_t false_value = if_false_; 155 intptr_t false_value = if_false_;
156 156
157 if (is_power_of_two_kind) { 157 if (is_power_of_two_kind) {
158 if (true_value == 0) { 158 if (true_value == 0) {
159 // We need to have zero in result on true_condition. 159 // We need to have zero in result on true_condition.
160 true_condition = NegateCondition(true_condition); 160 true_condition = NegateCondition(true_condition);
161 } 161 }
162 } else { 162 } else {
163 if (true_value == 0) { 163 if (true_value == 0) {
164 // Swap values so that false_value is zero. 164 // Swap values so that false_value is zero.
165 intptr_t temp = true_value; 165 intptr_t temp = true_value;
166 true_value = false_value; 166 true_value = false_value;
167 false_value = temp; 167 false_value = temp;
168 } else { 168 } else {
169 true_condition = NegateCondition(true_condition); 169 true_condition = NegateCondition(true_condition);
170 } 170 }
171 } 171 }
172 172
173 __ mov(result, ShifterOperand(1), true_condition); 173 __ mov(result, Operand(1), true_condition);
174 174
175 if (is_power_of_two_kind) { 175 if (is_power_of_two_kind) {
176 const intptr_t shift = 176 const intptr_t shift =
177 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value)); 177 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
178 __ Lsl(result, result, shift + kSmiTagSize); 178 __ Lsl(result, result, shift + kSmiTagSize);
179 } else { 179 } else {
180 __ sub(result, result, ShifterOperand(1)); 180 __ sub(result, result, Operand(1));
181 const int32_t val = 181 const int32_t val =
182 Smi::RawValue(true_value) - Smi::RawValue(false_value); 182 Smi::RawValue(true_value) - Smi::RawValue(false_value);
183 __ AndImmediate(result, result, val); 183 __ AndImmediate(result, result, val);
184 if (false_value != 0) { 184 if (false_value != 0) {
185 __ AddImmediate(result, Smi::RawValue(false_value)); 185 __ AddImmediate(result, Smi::RawValue(false_value));
186 } 186 }
187 } 187 }
188 } 188 }
189 189
190 190
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 return NULL; 433 return NULL;
434 } 434 }
435 435
436 436
437 static void LoadValueCid(FlowGraphCompiler* compiler, 437 static void LoadValueCid(FlowGraphCompiler* compiler,
438 Register value_cid_reg, 438 Register value_cid_reg,
439 Register value_reg, 439 Register value_reg,
440 Label* value_is_smi = NULL) { 440 Label* value_is_smi = NULL) {
441 Label done; 441 Label done;
442 if (value_is_smi == NULL) { 442 if (value_is_smi == NULL) {
443 __ mov(value_cid_reg, ShifterOperand(kSmiCid)); 443 __ mov(value_cid_reg, Operand(kSmiCid));
444 } 444 }
445 __ tst(value_reg, ShifterOperand(kSmiTagMask)); 445 __ tst(value_reg, Operand(kSmiTagMask));
446 if (value_is_smi == NULL) { 446 if (value_is_smi == NULL) {
447 __ b(&done, EQ); 447 __ b(&done, EQ);
448 } else { 448 } else {
449 __ b(value_is_smi, EQ); 449 __ b(value_is_smi, EQ);
450 } 450 }
451 __ LoadClassId(value_cid_reg, value_reg); 451 __ LoadClassId(value_cid_reg, value_reg);
452 __ Bind(&done); 452 __ Bind(&done);
453 } 453 }
454 454
455 455
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
499 ASSERT(!left.IsConstant() || !right.IsConstant()); 499 ASSERT(!left.IsConstant() || !right.IsConstant());
500 500
501 Condition true_condition = TokenKindToSmiCondition(kind); 501 Condition true_condition = TokenKindToSmiCondition(kind);
502 502
503 if (left.IsConstant()) { 503 if (left.IsConstant()) {
504 __ CompareObject(right.reg(), left.constant()); 504 __ CompareObject(right.reg(), left.constant());
505 true_condition = FlipCondition(true_condition); 505 true_condition = FlipCondition(true_condition);
506 } else if (right.IsConstant()) { 506 } else if (right.IsConstant()) {
507 __ CompareObject(left.reg(), right.constant()); 507 __ CompareObject(left.reg(), right.constant());
508 } else { 508 } else {
509 __ cmp(left.reg(), ShifterOperand(right.reg())); 509 __ cmp(left.reg(), Operand(right.reg()));
510 } 510 }
511 return true_condition; 511 return true_condition;
512 } 512 }
513 513
514 514
515 static Condition TokenKindToMintCondition(Token::Kind kind) { 515 static Condition TokenKindToMintCondition(Token::Kind kind) {
516 switch (kind) { 516 switch (kind) {
517 case Token::kEQ: return EQ; 517 case Token::kEQ: return EQ;
518 case Token::kNE: return NE; 518 case Token::kNE: return NE;
519 case Token::kLT: return LT; 519 case Token::kLT: return LT;
(...skipping 12 matching lines...) Expand all
532 Token::Kind kind) { 532 Token::Kind kind) {
533 ASSERT(Token::IsEqualityOperator(kind)); 533 ASSERT(Token::IsEqualityOperator(kind));
534 PairLocation* left_pair = locs->in(0).AsPairLocation(); 534 PairLocation* left_pair = locs->in(0).AsPairLocation();
535 Register left1 = left_pair->At(0).reg(); 535 Register left1 = left_pair->At(0).reg();
536 Register left2 = left_pair->At(1).reg(); 536 Register left2 = left_pair->At(1).reg();
537 PairLocation* right_pair = locs->in(1).AsPairLocation(); 537 PairLocation* right_pair = locs->in(1).AsPairLocation();
538 Register right1 = right_pair->At(0).reg(); 538 Register right1 = right_pair->At(0).reg();
539 Register right2 = right_pair->At(1).reg(); 539 Register right2 = right_pair->At(1).reg();
540 540
541 // Compare lower. 541 // Compare lower.
542 __ cmp(left1, ShifterOperand(right1)); 542 __ cmp(left1, Operand(right1));
543 // Compare upper if lower is equal. 543 // Compare upper if lower is equal.
544 __ cmp(left2, ShifterOperand(right2), EQ); 544 __ cmp(left2, Operand(right2), EQ);
545 return TokenKindToMintCondition(kind); 545 return TokenKindToMintCondition(kind);
546 } 546 }
547 547
548 548
549 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler, 549 static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
550 LocationSummary* locs, 550 LocationSummary* locs,
551 Token::Kind kind) { 551 Token::Kind kind) {
552 PairLocation* left_pair = locs->in(0).AsPairLocation(); 552 PairLocation* left_pair = locs->in(0).AsPairLocation();
553 Register left1 = left_pair->At(0).reg(); 553 Register left1 = left_pair->At(0).reg();
554 Register left2 = left_pair->At(1).reg(); 554 Register left2 = left_pair->At(1).reg();
(...skipping 18 matching lines...) Expand all
573 hi_false_cond = LT; 573 hi_false_cond = LT;
574 lo_false_cond = (kind == Token::kGT) ? LS : CC; 574 lo_false_cond = (kind == Token::kGT) ? LS : CC;
575 break; 575 break;
576 default: 576 default:
577 UNREACHABLE(); 577 UNREACHABLE();
578 hi_true_cond = hi_false_cond = lo_false_cond = VS; 578 hi_true_cond = hi_false_cond = lo_false_cond = VS;
579 } 579 }
580 580
581 Label is_true, is_false, done; 581 Label is_true, is_false, done;
582 // Compare upper halves first. 582 // Compare upper halves first.
583 __ cmp(left2, ShifterOperand(right2)); 583 __ cmp(left2, Operand(right2));
584 __ LoadImmediate(out, 0, hi_false_cond); 584 __ LoadImmediate(out, 0, hi_false_cond);
585 __ LoadImmediate(out, 1, hi_true_cond); 585 __ LoadImmediate(out, 1, hi_true_cond);
586 // If higher words aren't equal, skip comparing lower words. 586 // If higher words aren't equal, skip comparing lower words.
587 __ b(&done, NE); 587 __ b(&done, NE);
588 588
589 __ cmp(left1, ShifterOperand(right1)); 589 __ cmp(left1, Operand(right1));
590 __ LoadImmediate(out, 1); 590 __ LoadImmediate(out, 1);
591 __ LoadImmediate(out, 0, lo_false_cond); 591 __ LoadImmediate(out, 0, lo_false_cond);
592 __ Bind(&done); 592 __ Bind(&done);
593 593
594 return NegateCondition(lo_false_cond); 594 return NegateCondition(lo_false_cond);
595 } 595 }
596 596
597 597
598 static Condition TokenKindToDoubleCondition(Token::Kind kind) { 598 static Condition TokenKindToDoubleCondition(Token::Kind kind) {
599 switch (kind) { 599 switch (kind) {
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
694 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler, 694 Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
695 BranchLabels labels) { 695 BranchLabels labels) {
696 const Register left = locs()->in(0).reg(); 696 const Register left = locs()->in(0).reg();
697 Location right = locs()->in(1); 697 Location right = locs()->in(1);
698 if (right.IsConstant()) { 698 if (right.IsConstant()) {
699 ASSERT(right.constant().IsSmi()); 699 ASSERT(right.constant().IsSmi());
700 const int32_t imm = 700 const int32_t imm =
701 reinterpret_cast<int32_t>(right.constant().raw()); 701 reinterpret_cast<int32_t>(right.constant().raw());
702 __ TestImmediate(left, imm); 702 __ TestImmediate(left, imm);
703 } else { 703 } else {
704 __ tst(left, ShifterOperand(right.reg())); 704 __ tst(left, Operand(right.reg()));
705 } 705 }
706 Condition true_condition = (kind() == Token::kNE) ? NE : EQ; 706 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
707 return true_condition; 707 return true_condition;
708 } 708 }
709 709
710 710
711 void TestSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 711 void TestSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
712 // Never emitted outside of the BranchInstr. 712 // Never emitted outside of the BranchInstr.
713 UNREACHABLE(); 713 UNREACHABLE();
714 } 714 }
(...skipping 26 matching lines...) Expand all
741 const Register val_reg = locs()->in(0).reg(); 741 const Register val_reg = locs()->in(0).reg();
742 const Register cid_reg = locs()->temp(0).reg(); 742 const Register cid_reg = locs()->temp(0).reg();
743 743
744 Label* deopt = CanDeoptimize() ? 744 Label* deopt = CanDeoptimize() ?
745 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL; 745 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids) : NULL;
746 746
747 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0; 747 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
748 const ZoneGrowableArray<intptr_t>& data = cid_results(); 748 const ZoneGrowableArray<intptr_t>& data = cid_results();
749 ASSERT(data[0] == kSmiCid); 749 ASSERT(data[0] == kSmiCid);
750 bool result = data[1] == true_result; 750 bool result = data[1] == true_result;
751 __ tst(val_reg, ShifterOperand(kSmiTagMask)); 751 __ tst(val_reg, Operand(kSmiTagMask));
752 __ b(result ? labels.true_label : labels.false_label, EQ); 752 __ b(result ? labels.true_label : labels.false_label, EQ);
753 __ LoadClassId(cid_reg, val_reg); 753 __ LoadClassId(cid_reg, val_reg);
754 754
755 for (intptr_t i = 2; i < data.length(); i += 2) { 755 for (intptr_t i = 2; i < data.length(); i += 2) {
756 const intptr_t test_cid = data[i]; 756 const intptr_t test_cid = data[i];
757 ASSERT(test_cid != kSmiCid); 757 ASSERT(test_cid != kSmiCid);
758 result = data[i + 1] == true_result; 758 result = data[i + 1] == true_result;
759 __ CompareImmediate(cid_reg, test_cid); 759 __ CompareImmediate(cid_reg, test_cid);
760 __ b(result ? labels.true_label : labels.false_label, EQ); 760 __ b(result ? labels.true_label : labels.false_label, EQ);
761 } 761 }
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
985 Location::RequiresRegister(), 985 Location::RequiresRegister(),
986 LocationSummary::kNoCall); 986 LocationSummary::kNoCall);
987 } 987 }
988 988
989 989
990 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 990 void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
991 ASSERT(cid_ == kOneByteStringCid); 991 ASSERT(cid_ == kOneByteStringCid);
992 const Register str = locs()->in(0).reg(); 992 const Register str = locs()->in(0).reg();
993 const Register result = locs()->out(0).reg(); 993 const Register result = locs()->out(0).reg();
994 __ ldr(result, FieldAddress(str, String::length_offset())); 994 __ ldr(result, FieldAddress(str, String::length_offset()));
995 __ cmp(result, ShifterOperand(Smi::RawValue(1))); 995 __ cmp(result, Operand(Smi::RawValue(1)));
996 __ LoadImmediate(result, -1, NE); 996 __ LoadImmediate(result, -1, NE);
997 __ ldrb(result, FieldAddress(str, OneByteString::data_offset()), EQ); 997 __ ldrb(result, FieldAddress(str, OneByteString::data_offset()), EQ);
998 __ SmiTag(result); 998 __ SmiTag(result);
999 } 999 }
1000 1000
1001 1001
1002 LocationSummary* StringInterpolateInstr::MakeLocationSummary(Isolate* isolate, 1002 LocationSummary* StringInterpolateInstr::MakeLocationSummary(Isolate* isolate,
1003 bool opt) const { 1003 bool opt) const {
1004 const intptr_t kNumInputs = 1; 1004 const intptr_t kNumInputs = 1;
1005 const intptr_t kNumTemps = 0; 1005 const intptr_t kNumTemps = 0;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1050 kNumInputs, 1050 kNumInputs,
1051 Location::RequiresRegister(), 1051 Location::RequiresRegister(),
1052 LocationSummary::kNoCall); 1052 LocationSummary::kNoCall);
1053 } 1053 }
1054 1054
1055 1055
1056 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 1056 void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1057 const Register object = locs()->in(0).reg(); 1057 const Register object = locs()->in(0).reg();
1058 const Register result = locs()->out(0).reg(); 1058 const Register result = locs()->out(0).reg();
1059 Label load, done; 1059 Label load, done;
1060 __ tst(object, ShifterOperand(kSmiTagMask)); 1060 __ tst(object, Operand(kSmiTagMask));
1061 __ b(&load, NE); 1061 __ b(&load, NE);
1062 __ LoadImmediate(result, Smi::RawValue(kSmiCid)); 1062 __ LoadImmediate(result, Smi::RawValue(kSmiCid));
1063 __ b(&done); 1063 __ b(&done);
1064 __ Bind(&load); 1064 __ Bind(&load);
1065 __ LoadClassId(result, object); 1065 __ LoadClassId(result, object);
1066 __ SmiTag(result); 1066 __ SmiTag(result);
1067 __ Bind(&done); 1067 __ Bind(&done);
1068 } 1068 }
1069 1069
1070 1070
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
1233 int32_t offset = 1233 int32_t offset =
1234 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag); 1234 is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
1235 const OperandSize size = Address::OperandSizeFor(cid); 1235 const OperandSize size = Address::OperandSizeFor(cid);
1236 ASSERT(array != IP); 1236 ASSERT(array != IP);
1237 ASSERT(index != IP); 1237 ASSERT(index != IP);
1238 const Register base = is_load ? IP : index; 1238 const Register base = is_load ? IP : index;
1239 if ((offset != 0) || 1239 if ((offset != 0) ||
1240 (size == kSWord) || (size == kDWord) || (size == kRegList)) { 1240 (size == kSWord) || (size == kDWord) || (size == kRegList)) {
1241 if (shift < 0) { 1241 if (shift < 0) {
1242 ASSERT(shift == -1); 1242 ASSERT(shift == -1);
1243 assembler->add(base, array, ShifterOperand(index, ASR, 1)); 1243 assembler->add(base, array, Operand(index, ASR, 1));
1244 } else { 1244 } else {
1245 assembler->add(base, array, ShifterOperand(index, LSL, shift)); 1245 assembler->add(base, array, Operand(index, LSL, shift));
1246 } 1246 }
1247 } else { 1247 } else {
1248 if (shift < 0) { 1248 if (shift < 0) {
1249 ASSERT(shift == -1); 1249 ASSERT(shift == -1);
1250 return Address(array, index, ASR, 1); 1250 return Address(array, index, ASR, 1);
1251 } else { 1251 } else {
1252 return Address(array, index, LSL, shift); 1252 return Address(array, index, LSL, shift);
1253 } 1253 }
1254 } 1254 }
1255 int32_t offset_mask = 0; 1255 int32_t offset_mask = 0;
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1317 case kTypedDataInt32ArrayCid: 1317 case kTypedDataInt32ArrayCid:
1318 // Load low word. 1318 // Load low word.
1319 __ ldr(result1, element_address); 1319 __ ldr(result1, element_address);
1320 // Sign extend into high word. 1320 // Sign extend into high word.
1321 __ SignFill(result2, result1); 1321 __ SignFill(result2, result1);
1322 break; 1322 break;
1323 case kTypedDataUint32ArrayCid: 1323 case kTypedDataUint32ArrayCid:
1324 // Load low word. 1324 // Load low word.
1325 __ ldr(result1, element_address); 1325 __ ldr(result1, element_address);
1326 // Zero high word. 1326 // Zero high word.
1327 __ eor(result2, result2, ShifterOperand(result2)); 1327 __ eor(result2, result2, Operand(result2));
1328 break; 1328 break;
1329 default: 1329 default:
1330 UNREACHABLE(); 1330 UNREACHABLE();
1331 break; 1331 break;
1332 } 1332 }
1333 return; 1333 return;
1334 } 1334 }
1335 1335
1336 ASSERT(representation() == kTagged); 1336 ASSERT(representation() == kTagged);
1337 1337
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
1535 value = 0xFF; 1535 value = 0xFF;
1536 } else if (value < 0) { 1536 } else if (value < 0) {
1537 value = 0; 1537 value = 0;
1538 } 1538 }
1539 __ LoadImmediate(IP, static_cast<int8_t>(value)); 1539 __ LoadImmediate(IP, static_cast<int8_t>(value));
1540 __ strb(IP, element_address); 1540 __ strb(IP, element_address);
1541 } else { 1541 } else {
1542 const Register value = locs()->in(2).reg(); 1542 const Register value = locs()->in(2).reg();
1543 Label store_value; 1543 Label store_value;
1544 __ SmiUntag(value); 1544 __ SmiUntag(value);
1545 __ cmp(value, ShifterOperand(0xFF)); 1545 __ cmp(value, Operand(0xFF));
1546 // Clamp to 0x00 or 0xFF respectively. 1546 // Clamp to 0x00 or 0xFF respectively.
1547 __ b(&store_value, LS); 1547 __ b(&store_value, LS);
1548 __ mov(value, ShifterOperand(0x00), LE); 1548 __ mov(value, Operand(0x00), LE);
1549 __ mov(value, ShifterOperand(0xFF), GT); 1549 __ mov(value, Operand(0xFF), GT);
1550 __ Bind(&store_value); 1550 __ Bind(&store_value);
1551 __ strb(value, element_address); 1551 __ strb(value, element_address);
1552 } 1552 }
1553 break; 1553 break;
1554 } 1554 }
1555 case kTypedDataInt16ArrayCid: 1555 case kTypedDataInt16ArrayCid:
1556 case kTypedDataUint16ArrayCid: { 1556 case kTypedDataUint16ArrayCid: {
1557 const Register value = locs()->in(2).reg(); 1557 const Register value = locs()->in(2).reg();
1558 __ SmiUntag(value); 1558 __ SmiUntag(value);
1559 __ strh(value, element_address); 1559 __ strh(value, element_address);
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
1670 FieldAddress field_length_operand( 1670 FieldAddress field_length_operand(
1671 field_reg, Field::guarded_list_length_offset()); 1671 field_reg, Field::guarded_list_length_offset());
1672 1672
1673 ASSERT(value_cid_reg != kNoRegister); 1673 ASSERT(value_cid_reg != kNoRegister);
1674 ASSERT((value_cid_reg != value_reg) && (field_reg != value_cid_reg)); 1674 ASSERT((value_cid_reg != value_reg) && (field_reg != value_cid_reg));
1675 1675
1676 if (value_cid == kDynamicCid) { 1676 if (value_cid == kDynamicCid) {
1677 LoadValueCid(compiler, value_cid_reg, value_reg); 1677 LoadValueCid(compiler, value_cid_reg, value_reg);
1678 Label skip_length_check; 1678 Label skip_length_check;
1679 __ ldr(IP, field_cid_operand); 1679 __ ldr(IP, field_cid_operand);
1680 __ cmp(value_cid_reg, ShifterOperand(IP)); 1680 __ cmp(value_cid_reg, Operand(IP));
1681 __ b(&skip_length_check, NE); 1681 __ b(&skip_length_check, NE);
1682 if (field_has_length) { 1682 if (field_has_length) {
1683 ASSERT(temp_reg != kNoRegister); 1683 ASSERT(temp_reg != kNoRegister);
1684 // Field guard may have remembered list length, check it. 1684 // Field guard may have remembered list length, check it.
1685 if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) { 1685 if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) {
1686 __ ldr(temp_reg, 1686 __ ldr(temp_reg,
1687 FieldAddress(value_reg, Array::length_offset())); 1687 FieldAddress(value_reg, Array::length_offset()));
1688 __ CompareImmediate(temp_reg, Smi::RawValue(field_length)); 1688 __ CompareImmediate(temp_reg, Smi::RawValue(field_length));
1689 } else if (RawObject::IsTypedDataClassId(field_cid)) { 1689 } else if (RawObject::IsTypedDataClassId(field_cid)) {
1690 __ ldr(temp_reg, 1690 __ ldr(temp_reg,
(...skipping 18 matching lines...) Expand all
1709 __ b(&no_fixed_length, EQ); 1709 __ b(&no_fixed_length, EQ);
1710 // Check for typed data array. 1710 // Check for typed data array.
1711 __ CompareImmediate(value_cid_reg, kTypedDataInt32x4ArrayCid); 1711 __ CompareImmediate(value_cid_reg, kTypedDataInt32x4ArrayCid);
1712 __ b(&no_fixed_length, GT); 1712 __ b(&no_fixed_length, GT);
1713 __ CompareImmediate(value_cid_reg, kTypedDataInt8ArrayCid); 1713 __ CompareImmediate(value_cid_reg, kTypedDataInt8ArrayCid);
1714 // Could still be a regular array. 1714 // Could still be a regular array.
1715 __ b(&check_array, LT); 1715 __ b(&check_array, LT);
1716 __ ldr(temp_reg, 1716 __ ldr(temp_reg,
1717 FieldAddress(value_reg, TypedData::length_offset())); 1717 FieldAddress(value_reg, TypedData::length_offset()));
1718 __ ldr(IP, field_length_operand); 1718 __ ldr(IP, field_length_operand);
1719 __ cmp(temp_reg, ShifterOperand(IP)); 1719 __ cmp(temp_reg, Operand(IP));
1720 __ b(&length_compared); 1720 __ b(&length_compared);
1721 // Check for regular array. 1721 // Check for regular array.
1722 __ Bind(&check_array); 1722 __ Bind(&check_array);
1723 __ CompareImmediate(value_cid_reg, kImmutableArrayCid); 1723 __ CompareImmediate(value_cid_reg, kImmutableArrayCid);
1724 __ b(&no_fixed_length, GT); 1724 __ b(&no_fixed_length, GT);
1725 __ CompareImmediate(value_cid_reg, kArrayCid); 1725 __ CompareImmediate(value_cid_reg, kArrayCid);
1726 __ b(&no_fixed_length, LT); 1726 __ b(&no_fixed_length, LT);
1727 __ ldr(temp_reg, 1727 __ ldr(temp_reg,
1728 FieldAddress(value_reg, Array::length_offset())); 1728 FieldAddress(value_reg, Array::length_offset()));
1729 __ ldr(IP, field_length_operand); 1729 __ ldr(IP, field_length_operand);
1730 __ cmp(temp_reg, ShifterOperand(IP)); 1730 __ cmp(temp_reg, Operand(IP));
1731 __ b(&length_compared); 1731 __ b(&length_compared);
1732 __ Bind(&no_fixed_length); 1732 __ Bind(&no_fixed_length);
1733 __ b(fail); 1733 __ b(fail);
1734 __ Bind(&length_compared); 1734 __ Bind(&length_compared);
1735 // Following branch cannot not occur, fall through. 1735 // Following branch cannot not occur, fall through.
1736 } 1736 }
1737 __ b(fail, NE); 1737 __ b(fail, NE);
1738 } 1738 }
1739 __ Bind(&skip_length_check); 1739 __ Bind(&skip_length_check);
1740 __ ldr(IP, field_nullability_operand); 1740 __ ldr(IP, field_nullability_operand);
1741 __ cmp(value_cid_reg, ShifterOperand(IP)); 1741 __ cmp(value_cid_reg, Operand(IP));
1742 } else if (value_cid == kNullCid) { 1742 } else if (value_cid == kNullCid) {
1743 __ ldr(value_cid_reg, field_nullability_operand); 1743 __ ldr(value_cid_reg, field_nullability_operand);
1744 __ CompareImmediate(value_cid_reg, value_cid); 1744 __ CompareImmediate(value_cid_reg, value_cid);
1745 } else { 1745 } else {
1746 Label skip_length_check; 1746 Label skip_length_check;
1747 __ ldr(value_cid_reg, field_cid_operand); 1747 __ ldr(value_cid_reg, field_cid_operand);
1748 __ CompareImmediate(value_cid_reg, value_cid); 1748 __ CompareImmediate(value_cid_reg, value_cid);
1749 __ b(&skip_length_check, NE); 1749 __ b(&skip_length_check, NE);
1750 if (field_has_length) { 1750 if (field_has_length) {
1751 ASSERT(value_cid_reg != kNoRegister); 1751 ASSERT(value_cid_reg != kNoRegister);
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
1853 } 1853 }
1854 } else { 1854 } else {
1855 ASSERT(compiler->is_optimizing()); 1855 ASSERT(compiler->is_optimizing());
1856 ASSERT(deopt != NULL); 1856 ASSERT(deopt != NULL);
1857 // Field guard class has been initialized and is known. 1857 // Field guard class has been initialized and is known.
1858 if (field_reg != kNoRegister) { 1858 if (field_reg != kNoRegister) {
1859 __ LoadObject(field_reg, Field::ZoneHandle(field().raw())); 1859 __ LoadObject(field_reg, Field::ZoneHandle(field().raw()));
1860 } 1860 }
1861 if (value_cid == kDynamicCid) { 1861 if (value_cid == kDynamicCid) {
1862 // Field's guarded class id is fixed by value's class id is not known. 1862 // Field's guarded class id is fixed by value's class id is not known.
1863 __ tst(value_reg, ShifterOperand(kSmiTagMask)); 1863 __ tst(value_reg, Operand(kSmiTagMask));
1864 1864
1865 if (field_cid != kSmiCid) { 1865 if (field_cid != kSmiCid) {
1866 __ b(fail, EQ); 1866 __ b(fail, EQ);
1867 __ LoadClassId(value_cid_reg, value_reg); 1867 __ LoadClassId(value_cid_reg, value_reg);
1868 __ CompareImmediate(value_cid_reg, field_cid); 1868 __ CompareImmediate(value_cid_reg, field_cid);
1869 } 1869 }
1870 1870
1871 if (field_has_length) { 1871 if (field_has_length) {
1872 __ b(fail, NE); 1872 __ b(fail, NE);
1873 // Classes are same, perform guarded list length check. 1873 // Classes are same, perform guarded list length check.
1874 ASSERT(field_reg != kNoRegister); 1874 ASSERT(field_reg != kNoRegister);
1875 ASSERT(value_cid_reg != kNoRegister); 1875 ASSERT(value_cid_reg != kNoRegister);
1876 FieldAddress field_length_operand( 1876 FieldAddress field_length_operand(
1877 field_reg, Field::guarded_list_length_offset()); 1877 field_reg, Field::guarded_list_length_offset());
1878 if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) { 1878 if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) {
1879 // Destroy value_cid_reg (safe because we are finished with it). 1879 // Destroy value_cid_reg (safe because we are finished with it).
1880 __ ldr(value_cid_reg, 1880 __ ldr(value_cid_reg,
1881 FieldAddress(value_reg, Array::length_offset())); 1881 FieldAddress(value_reg, Array::length_offset()));
1882 } else if (RawObject::IsTypedDataClassId(field_cid)) { 1882 } else if (RawObject::IsTypedDataClassId(field_cid)) {
1883 // Destroy value_cid_reg (safe because we are finished with it). 1883 // Destroy value_cid_reg (safe because we are finished with it).
1884 __ ldr(value_cid_reg, 1884 __ ldr(value_cid_reg,
1885 FieldAddress(value_reg, TypedData::length_offset())); 1885 FieldAddress(value_reg, TypedData::length_offset()));
1886 } 1886 }
1887 __ ldr(IP, field_length_operand); 1887 __ ldr(IP, field_length_operand);
1888 __ cmp(value_cid_reg, ShifterOperand(IP)); 1888 __ cmp(value_cid_reg, Operand(IP));
1889 } 1889 }
1890 1890
1891 if (field().is_nullable() && (field_cid != kNullCid)) { 1891 if (field().is_nullable() && (field_cid != kNullCid)) {
1892 __ b(&ok, EQ); 1892 __ b(&ok, EQ);
1893 __ CompareImmediate(value_reg, 1893 __ CompareImmediate(value_reg,
1894 reinterpret_cast<intptr_t>(Object::null())); 1894 reinterpret_cast<intptr_t>(Object::null()));
1895 } 1895 }
1896 __ b(fail, NE); 1896 __ b(fail, NE);
1897 } else { 1897 } else {
1898 // Both value's and field's class id is known. 1898 // Both value's and field's class id is known.
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
2063 Label store_float32x4; 2063 Label store_float32x4;
2064 Label store_float64x2; 2064 Label store_float64x2;
2065 2065
2066 __ LoadObject(temp, Field::ZoneHandle(field().raw())); 2066 __ LoadObject(temp, Field::ZoneHandle(field().raw()));
2067 2067
2068 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset())); 2068 __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset()));
2069 __ CompareImmediate(temp2, kNullCid); 2069 __ CompareImmediate(temp2, kNullCid);
2070 __ b(&store_pointer, EQ); 2070 __ b(&store_pointer, EQ);
2071 2071
2072 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset())); 2072 __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
2073 __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit)); 2073 __ tst(temp2, Operand(1 << Field::kUnboxingCandidateBit));
2074 __ b(&store_pointer, EQ); 2074 __ b(&store_pointer, EQ);
2075 2075
2076 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); 2076 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
2077 __ CompareImmediate(temp2, kDoubleCid); 2077 __ CompareImmediate(temp2, kDoubleCid);
2078 __ b(&store_double, EQ); 2078 __ b(&store_double, EQ);
2079 2079
2080 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); 2080 __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
2081 __ CompareImmediate(temp2, kFloat32x4Cid); 2081 __ CompareImmediate(temp2, kFloat32x4Cid);
2082 __ b(&store_float32x4, EQ); 2082 __ b(&store_float32x4, EQ);
2083 2083
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
2303 __ LoadImmediate(R6, heap->TopAddress()); 2303 __ LoadImmediate(R6, heap->TopAddress());
2304 __ ldr(R0, Address(R6, 0)); // Potential new object start. 2304 __ ldr(R0, Address(R6, 0)); // Potential new object start.
2305 __ AddImmediate(R7, R0, kArraySize); // Potential next object start. 2305 __ AddImmediate(R7, R0, kArraySize); // Potential next object start.
2306 __ b(slow_path, VS); 2306 __ b(slow_path, VS);
2307 2307
2308 // Check if the allocation fits into the remaining space. 2308 // Check if the allocation fits into the remaining space.
2309 // R0: potential new object start. 2309 // R0: potential new object start.
2310 // R7: potential next object start. 2310 // R7: potential next object start.
2311 __ LoadImmediate(R3, heap->EndAddress()); 2311 __ LoadImmediate(R3, heap->EndAddress());
2312 __ ldr(R3, Address(R3, 0)); 2312 __ ldr(R3, Address(R3, 0));
2313 __ cmp(R7, ShifterOperand(R3)); 2313 __ cmp(R7, Operand(R3));
2314 __ b(slow_path, CS); 2314 __ b(slow_path, CS);
2315 2315
2316 // Successfully allocated the object(s), now update top to point to 2316 // Successfully allocated the object(s), now update top to point to
2317 // next object start and initialize the object. 2317 // next object start and initialize the object.
2318 __ str(R7, Address(R6, 0)); 2318 __ str(R7, Address(R6, 0));
2319 __ add(R0, R0, ShifterOperand(kHeapObjectTag)); 2319 __ add(R0, R0, Operand(kHeapObjectTag));
2320 __ LoadImmediate(R8, heap->TopAddress()); 2320 __ LoadImmediate(R8, heap->TopAddress());
2321 __ UpdateAllocationStatsWithSize(kArrayCid, R8, R4); 2321 __ UpdateAllocationStatsWithSize(kArrayCid, R8, R4);
2322 2322
2323 2323
2324 // Initialize the tags. 2324 // Initialize the tags.
2325 // R0: new object start as a tagged pointer. 2325 // R0: new object start as a tagged pointer.
2326 { 2326 {
2327 uword tags = 0; 2327 uword tags = 0;
2328 tags = RawObject::ClassIdTag::update(kArrayCid, tags); 2328 tags = RawObject::ClassIdTag::update(kArrayCid, tags);
2329 tags = RawObject::SizeTag::update(kArraySize, tags); 2329 tags = RawObject::SizeTag::update(kArraySize, tags);
(...skipping 17 matching lines...) Expand all
2347 // R0: new object start as a tagged pointer. 2347 // R0: new object start as a tagged pointer.
2348 // R7: new object end address. 2348 // R7: new object end address.
2349 // R8: iterator which initially points to the start of the variable 2349 // R8: iterator which initially points to the start of the variable
2350 // data area to be initialized. 2350 // data area to be initialized.
2351 // R3: null 2351 // R3: null
2352 __ LoadImmediate(R3, reinterpret_cast<intptr_t>(Object::null())); 2352 __ LoadImmediate(R3, reinterpret_cast<intptr_t>(Object::null()));
2353 __ AddImmediate(R8, R0, sizeof(RawArray) - kHeapObjectTag); 2353 __ AddImmediate(R8, R0, sizeof(RawArray) - kHeapObjectTag);
2354 2354
2355 Label init_loop; 2355 Label init_loop;
2356 __ Bind(&init_loop); 2356 __ Bind(&init_loop);
2357 __ cmp(R8, ShifterOperand(R7)); 2357 __ cmp(R8, Operand(R7));
2358 __ str(R3, Address(R8, 0), CC); 2358 __ str(R3, Address(R8, 0), CC);
2359 __ AddImmediate(R8, kWordSize, CC); 2359 __ AddImmediate(R8, kWordSize, CC);
2360 __ b(&init_loop, CC); 2360 __ b(&init_loop, CC);
2361 __ b(done); 2361 __ b(done);
2362 } 2362 }
2363 2363
2364 2364
2365 void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2365 void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2366 const Register kLengthReg = R2; 2366 const Register kLengthReg = R2;
2367 const Register kElemTypeReg = R1; 2367 const Register kElemTypeReg = R1;
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
2446 const ExternalLabel label(stub.EntryPoint()); 2446 const ExternalLabel label(stub.EntryPoint());
2447 2447
2448 LocationSummary* locs = instruction_->locs(); 2448 LocationSummary* locs = instruction_->locs();
2449 locs->live_registers()->Remove(locs->out(0)); 2449 locs->live_registers()->Remove(locs->out(0));
2450 2450
2451 compiler->SaveLiveRegisters(locs); 2451 compiler->SaveLiveRegisters(locs);
2452 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 2452 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2453 &label, 2453 &label,
2454 PcDescriptors::kOther, 2454 PcDescriptors::kOther,
2455 locs); 2455 locs);
2456 __ mov(locs->out(0).reg(), ShifterOperand(R0)); 2456 __ mov(locs->out(0).reg(), Operand(R0));
2457 compiler->RestoreLiveRegisters(locs); 2457 compiler->RestoreLiveRegisters(locs);
2458 2458
2459 __ b(exit_label()); 2459 __ b(exit_label());
2460 } 2460 }
2461 2461
2462 private: 2462 private:
2463 Instruction* instruction_; 2463 Instruction* instruction_;
2464 }; 2464 };
2465 2465
2466 2466
(...skipping 11 matching lines...) Expand all
2478 const ExternalLabel label(stub.EntryPoint()); 2478 const ExternalLabel label(stub.EntryPoint());
2479 2479
2480 LocationSummary* locs = instruction_->locs(); 2480 LocationSummary* locs = instruction_->locs();
2481 locs->live_registers()->Remove(locs->out(0)); 2481 locs->live_registers()->Remove(locs->out(0));
2482 2482
2483 compiler->SaveLiveRegisters(locs); 2483 compiler->SaveLiveRegisters(locs);
2484 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 2484 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
2485 &label, 2485 &label,
2486 PcDescriptors::kOther, 2486 PcDescriptors::kOther,
2487 locs); 2487 locs);
2488 __ mov(locs->out(0).reg(), ShifterOperand(R0)); 2488 __ mov(locs->out(0).reg(), Operand(R0));
2489 compiler->RestoreLiveRegisters(locs); 2489 compiler->RestoreLiveRegisters(locs);
2490 2490
2491 __ b(exit_label()); 2491 __ b(exit_label());
2492 } 2492 }
2493 2493
2494 private: 2494 private:
2495 Instruction* instruction_; 2495 Instruction* instruction_;
2496 }; 2496 };
2497 2497
2498 2498
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after
2699 ASSERT(!type_arguments().IsUninstantiatedIdentity() && 2699 ASSERT(!type_arguments().IsUninstantiatedIdentity() &&
2700 !type_arguments().CanShareInstantiatorTypeArguments( 2700 !type_arguments().CanShareInstantiatorTypeArguments(
2701 instantiator_class())); 2701 instantiator_class()));
2702 // If the instantiator is null and if the type argument vector 2702 // If the instantiator is null and if the type argument vector
2703 // instantiated from null becomes a vector of dynamic, then use null as 2703 // instantiated from null becomes a vector of dynamic, then use null as
2704 // the type arguments. 2704 // the type arguments.
2705 Label type_arguments_instantiated; 2705 Label type_arguments_instantiated;
2706 const intptr_t len = type_arguments().Length(); 2706 const intptr_t len = type_arguments().Length();
2707 if (type_arguments().IsRawInstantiatedRaw(len)) { 2707 if (type_arguments().IsRawInstantiatedRaw(len)) {
2708 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null())); 2708 __ LoadImmediate(IP, reinterpret_cast<intptr_t>(Object::null()));
2709 __ cmp(instantiator_reg, ShifterOperand(IP)); 2709 __ cmp(instantiator_reg, Operand(IP));
2710 __ b(&type_arguments_instantiated, EQ); 2710 __ b(&type_arguments_instantiated, EQ);
2711 } 2711 }
2712 2712
2713 __ LoadObject(R2, type_arguments()); 2713 __ LoadObject(R2, type_arguments());
2714 __ ldr(R2, FieldAddress(R2, TypeArguments::instantiations_offset())); 2714 __ ldr(R2, FieldAddress(R2, TypeArguments::instantiations_offset()));
2715 __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag); 2715 __ AddImmediate(R2, Array::data_offset() - kHeapObjectTag);
2716 // The instantiations cache is initialized with Object::zero_array() and is 2716 // The instantiations cache is initialized with Object::zero_array() and is
2717 // therefore guaranteed to contain kNoInstantiator. No length check needed. 2717 // therefore guaranteed to contain kNoInstantiator. No length check needed.
2718 Label loop, found, slow_case; 2718 Label loop, found, slow_case;
2719 __ Bind(&loop); 2719 __ Bind(&loop);
2720 __ ldr(R1, Address(R2, 0 * kWordSize)); // Cached instantiator. 2720 __ ldr(R1, Address(R2, 0 * kWordSize)); // Cached instantiator.
2721 __ cmp(R1, ShifterOperand(R0)); 2721 __ cmp(R1, Operand(R0));
2722 __ b(&found, EQ); 2722 __ b(&found, EQ);
2723 __ AddImmediate(R2, 2 * kWordSize); 2723 __ AddImmediate(R2, 2 * kWordSize);
2724 __ CompareImmediate(R1, Smi::RawValue(StubCode::kNoInstantiator)); 2724 __ CompareImmediate(R1, Smi::RawValue(StubCode::kNoInstantiator));
2725 __ b(&loop, NE); 2725 __ b(&loop, NE);
2726 __ b(&slow_case); 2726 __ b(&slow_case);
2727 __ Bind(&found); 2727 __ Bind(&found);
2728 __ ldr(R0, Address(R2, 1 * kWordSize)); // Cached instantiated args. 2728 __ ldr(R0, Address(R2, 1 * kWordSize)); // Cached instantiated args.
2729 __ b(&type_arguments_instantiated); 2729 __ b(&type_arguments_instantiated);
2730 2730
2731 __ Bind(&slow_case); 2731 __ Bind(&slow_case);
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
2899 Label osr_entry_label_; 2899 Label osr_entry_label_;
2900 }; 2900 };
2901 2901
2902 2902
2903 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 2903 void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2904 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this); 2904 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
2905 compiler->AddSlowPathCode(slow_path); 2905 compiler->AddSlowPathCode(slow_path);
2906 2906
2907 __ LoadImmediate(IP, Isolate::Current()->stack_limit_address()); 2907 __ LoadImmediate(IP, Isolate::Current()->stack_limit_address());
2908 __ ldr(IP, Address(IP)); 2908 __ ldr(IP, Address(IP));
2909 __ cmp(SP, ShifterOperand(IP)); 2909 __ cmp(SP, Operand(IP));
2910 __ b(slow_path->entry_label(), LS); 2910 __ b(slow_path->entry_label(), LS);
2911 if (compiler->CanOSRFunction() && in_loop()) { 2911 if (compiler->CanOSRFunction() && in_loop()) {
2912 const Register temp = locs()->temp(0).reg(); 2912 const Register temp = locs()->temp(0).reg();
2913 // In unoptimized code check the usage counter to trigger OSR at loop 2913 // In unoptimized code check the usage counter to trigger OSR at loop
2914 // stack checks. Use progressively higher thresholds for more deeply 2914 // stack checks. Use progressively higher thresholds for more deeply
2915 // nested loops to attempt to hit outer loops with OSR when possible. 2915 // nested loops to attempt to hit outer loops with OSR when possible.
2916 __ LoadObject(temp, compiler->parsed_function().function()); 2916 __ LoadObject(temp, compiler->parsed_function().function());
2917 intptr_t threshold = 2917 intptr_t threshold =
2918 FLAG_optimization_counter_threshold * (loop_depth() + 1); 2918 FLAG_optimization_counter_threshold * (loop_depth() + 1);
2919 __ ldr(temp, FieldAddress(temp, Function::usage_counter_offset())); 2919 __ ldr(temp, FieldAddress(temp, Function::usage_counter_offset()));
(...skipping 21 matching lines...) Expand all
2941 ASSERT(constant.IsSmi()); 2941 ASSERT(constant.IsSmi());
2942 // Immediate shift operation takes 5 bits for the count. 2942 // Immediate shift operation takes 5 bits for the count.
2943 const intptr_t kCountLimit = 0x1F; 2943 const intptr_t kCountLimit = 0x1F;
2944 const intptr_t value = Smi::Cast(constant).Value(); 2944 const intptr_t value = Smi::Cast(constant).Value();
2945 if (value == 0) { 2945 if (value == 0) {
2946 __ MoveRegister(result, left); 2946 __ MoveRegister(result, left);
2947 } else if ((value < 0) || (value >= kCountLimit)) { 2947 } else if ((value < 0) || (value >= kCountLimit)) {
2948 // This condition may not be known earlier in some cases because 2948 // This condition may not be known earlier in some cases because
2949 // of constant propagation, inlining, etc. 2949 // of constant propagation, inlining, etc.
2950 if ((value >= kCountLimit) && is_truncating) { 2950 if ((value >= kCountLimit) && is_truncating) {
2951 __ mov(result, ShifterOperand(0)); 2951 __ mov(result, Operand(0));
2952 } else { 2952 } else {
2953 // Result is Mint or exception. 2953 // Result is Mint or exception.
2954 __ b(deopt); 2954 __ b(deopt);
2955 } 2955 }
2956 } else { 2956 } else {
2957 if (!is_truncating) { 2957 if (!is_truncating) {
2958 // Check for overflow (preserve left). 2958 // Check for overflow (preserve left).
2959 __ Lsl(IP, left, value); 2959 __ Lsl(IP, left, value);
2960 __ cmp(left, ShifterOperand(IP, ASR, value)); 2960 __ cmp(left, Operand(IP, ASR, value));
2961 __ b(deopt, NE); // Overflow. 2961 __ b(deopt, NE); // Overflow.
2962 } 2962 }
2963 // Shift for result now we know there is no overflow. 2963 // Shift for result now we know there is no overflow.
2964 __ Lsl(result, left, value); 2964 __ Lsl(result, left, value);
2965 } 2965 }
2966 return; 2966 return;
2967 } 2967 }
2968 2968
2969 // Right (locs.in(1)) is not constant. 2969 // Right (locs.in(1)) is not constant.
2970 const Register right = locs.in(1).reg(); 2970 const Register right = locs.in(1).reg();
2971 Range* right_range = shift_left->right()->definition()->range(); 2971 Range* right_range = shift_left->right()->definition()->range();
2972 if (shift_left->left()->BindsToConstant() && !is_truncating) { 2972 if (shift_left->left()->BindsToConstant() && !is_truncating) {
2973 // TODO(srdjan): Implement code below for is_truncating(). 2973 // TODO(srdjan): Implement code below for is_truncating().
2974 // If left is constant, we know the maximal allowed size for right. 2974 // If left is constant, we know the maximal allowed size for right.
2975 const Object& obj = shift_left->left()->BoundConstant(); 2975 const Object& obj = shift_left->left()->BoundConstant();
2976 if (obj.IsSmi()) { 2976 if (obj.IsSmi()) {
2977 const intptr_t left_int = Smi::Cast(obj).Value(); 2977 const intptr_t left_int = Smi::Cast(obj).Value();
2978 if (left_int == 0) { 2978 if (left_int == 0) {
2979 __ cmp(right, ShifterOperand(0)); 2979 __ cmp(right, Operand(0));
2980 __ b(deopt, MI); 2980 __ b(deopt, MI);
2981 __ mov(result, ShifterOperand(0)); 2981 __ mov(result, Operand(0));
2982 return; 2982 return;
2983 } 2983 }
2984 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int); 2984 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
2985 const bool right_needs_check = 2985 const bool right_needs_check =
2986 (right_range == NULL) || 2986 (right_range == NULL) ||
2987 !right_range->IsWithin(0, max_right - 1); 2987 !right_range->IsWithin(0, max_right - 1);
2988 if (right_needs_check) { 2988 if (right_needs_check) {
2989 __ cmp(right, 2989 __ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(max_right))));
2990 ShifterOperand(reinterpret_cast<int32_t>(Smi::New(max_right))));
2991 __ b(deopt, CS); 2990 __ b(deopt, CS);
2992 } 2991 }
2993 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 2992 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
2994 __ Lsl(result, left, IP); 2993 __ Lsl(result, left, IP);
2995 } 2994 }
2996 return; 2995 return;
2997 } 2996 }
2998 2997
2999 const bool right_needs_check = 2998 const bool right_needs_check =
3000 (right_range == NULL) || !right_range->IsWithin(0, (Smi::kBits - 1)); 2999 (right_range == NULL) || !right_range->IsWithin(0, (Smi::kBits - 1));
3001 if (is_truncating) { 3000 if (is_truncating) {
3002 if (right_needs_check) { 3001 if (right_needs_check) {
3003 const bool right_may_be_negative = 3002 const bool right_may_be_negative =
3004 (right_range == NULL) || 3003 (right_range == NULL) ||
3005 !right_range->IsWithin(0, RangeBoundary::kPlusInfinity); 3004 !right_range->IsWithin(0, RangeBoundary::kPlusInfinity);
3006 if (right_may_be_negative) { 3005 if (right_may_be_negative) {
3007 ASSERT(shift_left->CanDeoptimize()); 3006 ASSERT(shift_left->CanDeoptimize());
3008 __ cmp(right, ShifterOperand(0)); 3007 __ cmp(right, Operand(0));
3009 __ b(deopt, MI); 3008 __ b(deopt, MI);
3010 } 3009 }
3011 3010
3012 __ cmp(right, 3011 __ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
3013 ShifterOperand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits)))); 3012 __ mov(result, Operand(0), CS);
3014 __ mov(result, ShifterOperand(0), CS);
3015 __ Asr(IP, right, kSmiTagSize, CC); // SmiUntag right into IP if CC. 3013 __ Asr(IP, right, kSmiTagSize, CC); // SmiUntag right into IP if CC.
3016 __ Lsl(result, left, IP, CC); 3014 __ Lsl(result, left, IP, CC);
3017 } else { 3015 } else {
3018 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3016 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3019 __ Lsl(result, left, IP); 3017 __ Lsl(result, left, IP);
3020 } 3018 }
3021 } else { 3019 } else {
3022 if (right_needs_check) { 3020 if (right_needs_check) {
3023 ASSERT(shift_left->CanDeoptimize()); 3021 ASSERT(shift_left->CanDeoptimize());
3024 __ cmp(right, 3022 __ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
3025 ShifterOperand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
3026 __ b(deopt, CS); 3023 __ b(deopt, CS);
3027 } 3024 }
3028 // Left is not a constant. 3025 // Left is not a constant.
3029 // Check if count too large for handling it inlined. 3026 // Check if count too large for handling it inlined.
3030 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3027 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3031 // Overflow test (preserve left, right, and IP); 3028 // Overflow test (preserve left, right, and IP);
3032 const Register temp = locs.temp(0).reg(); 3029 const Register temp = locs.temp(0).reg();
3033 __ Lsl(temp, left, IP); 3030 __ Lsl(temp, left, IP);
3034 __ cmp(left, ShifterOperand(temp, ASR, IP)); 3031 __ cmp(left, Operand(temp, ASR, IP));
3035 __ b(deopt, NE); // Overflow. 3032 __ b(deopt, NE); // Overflow.
3036 // Shift for result now we know there is no overflow. 3033 // Shift for result now we know there is no overflow.
3037 __ Lsl(result, left, IP); 3034 __ Lsl(result, left, IP);
3038 } 3035 }
3039 } 3036 }
3040 3037
3041 3038
3042 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Isolate* isolate, 3039 LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Isolate* isolate,
3043 bool opt) const { 3040 bool opt) const {
3044 const intptr_t kNumInputs = 2; 3041 const intptr_t kNumInputs = 2;
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
3122 __ SubImmediateSetFlags(result, left, imm); 3119 __ SubImmediateSetFlags(result, left, imm);
3123 __ b(deopt, VS); 3120 __ b(deopt, VS);
3124 } 3121 }
3125 break; 3122 break;
3126 } 3123 }
3127 case Token::kMUL: { 3124 case Token::kMUL: {
3128 // Keep left value tagged and untag right value. 3125 // Keep left value tagged and untag right value.
3129 const intptr_t value = Smi::Cast(constant).Value(); 3126 const intptr_t value = Smi::Cast(constant).Value();
3130 if (deopt == NULL) { 3127 if (deopt == NULL) {
3131 if (value == 2) { 3128 if (value == 2) {
3132 __ mov(result, ShifterOperand(left, LSL, 1)); 3129 __ mov(result, Operand(left, LSL, 1));
3133 } else { 3130 } else {
3134 __ LoadImmediate(IP, value); 3131 __ LoadImmediate(IP, value);
3135 __ mul(result, left, IP); 3132 __ mul(result, left, IP);
3136 } 3133 }
3137 } else { 3134 } else {
3138 if (value == 2) { 3135 if (value == 2) {
3139 __ mov(IP, ShifterOperand(left, ASR, 31)); // IP = sign of left. 3136 __ mov(IP, Operand(left, ASR, 31)); // IP = sign of left.
3140 __ mov(result, ShifterOperand(left, LSL, 1)); 3137 __ mov(result, Operand(left, LSL, 1));
3141 // IP: result bits 32..63. 3138 // IP: result bits 32..63.
3142 __ cmp(IP, ShifterOperand(result, ASR, 31)); 3139 __ cmp(IP, Operand(result, ASR, 31));
3143 __ b(deopt, NE); 3140 __ b(deopt, NE);
3144 } else { 3141 } else {
3145 if (TargetCPUFeatures::arm_version() == ARMv7) { 3142 if (TargetCPUFeatures::arm_version() == ARMv7) {
3146 __ LoadImmediate(IP, value); 3143 __ LoadImmediate(IP, value);
3147 __ smull(result, IP, left, IP); 3144 __ smull(result, IP, left, IP);
3148 // IP: result bits 32..63. 3145 // IP: result bits 32..63.
3149 __ cmp(IP, ShifterOperand(result, ASR, 31)); 3146 __ cmp(IP, Operand(result, ASR, 31));
3150 __ b(deopt, NE); 3147 __ b(deopt, NE);
3151 } else { 3148 } else {
3152 const QRegister qtmp = locs()->temp(0).fpu_reg(); 3149 const QRegister qtmp = locs()->temp(0).fpu_reg();
3153 const DRegister dtmp0 = EvenDRegisterOf(qtmp); 3150 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3154 const DRegister dtmp1 = OddDRegisterOf(qtmp); 3151 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3155 __ LoadImmediate(IP, value); 3152 __ LoadImmediate(IP, value);
3156 __ CheckMultSignedOverflow(left, IP, result, dtmp0, dtmp1, deopt); 3153 __ CheckMultSignedOverflow(left, IP, result, dtmp0, dtmp1, deopt);
3157 __ mul(result, left, IP); 3154 __ mul(result, left, IP);
3158 } 3155 }
3159 } 3156 }
3160 } 3157 }
3161 break; 3158 break;
3162 } 3159 }
3163 case Token::kTRUNCDIV: { 3160 case Token::kTRUNCDIV: {
3164 const intptr_t value = Smi::Cast(constant).Value(); 3161 const intptr_t value = Smi::Cast(constant).Value();
3165 if (value == 1) { 3162 if (value == 1) {
3166 __ MoveRegister(result, left); 3163 __ MoveRegister(result, left);
3167 break; 3164 break;
3168 } else if (value == -1) { 3165 } else if (value == -1) {
3169 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 3166 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3170 // case we cannot negate the result. 3167 // case we cannot negate the result.
3171 __ CompareImmediate(left, 0x80000000); 3168 __ CompareImmediate(left, 0x80000000);
3172 __ b(deopt, EQ); 3169 __ b(deopt, EQ);
3173 __ rsb(result, left, ShifterOperand(0)); 3170 __ rsb(result, left, Operand(0));
3174 break; 3171 break;
3175 } 3172 }
3176 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value))); 3173 ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value)));
3177 const intptr_t shift_count = 3174 const intptr_t shift_count =
3178 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize; 3175 Utils::ShiftForPowerOfTwo(Utils::Abs(value)) + kSmiTagSize;
3179 ASSERT(kSmiTagSize == 1); 3176 ASSERT(kSmiTagSize == 1);
3180 __ mov(IP, ShifterOperand(left, ASR, 31)); 3177 __ mov(IP, Operand(left, ASR, 31));
3181 ASSERT(shift_count > 1); // 1, -1 case handled above. 3178 ASSERT(shift_count > 1); // 1, -1 case handled above.
3182 const Register temp = locs()->temp(0).reg(); 3179 const Register temp = locs()->temp(0).reg();
3183 __ add(temp, left, ShifterOperand(IP, LSR, 32 - shift_count)); 3180 __ add(temp, left, Operand(IP, LSR, 32 - shift_count));
3184 ASSERT(shift_count > 0); 3181 ASSERT(shift_count > 0);
3185 __ mov(result, ShifterOperand(temp, ASR, shift_count)); 3182 __ mov(result, Operand(temp, ASR, shift_count));
3186 if (value < 0) { 3183 if (value < 0) {
3187 __ rsb(result, result, ShifterOperand(0)); 3184 __ rsb(result, result, Operand(0));
3188 } 3185 }
3189 __ SmiTag(result); 3186 __ SmiTag(result);
3190 break; 3187 break;
3191 } 3188 }
3192 case Token::kBIT_AND: { 3189 case Token::kBIT_AND: {
3193 // No overflow check. 3190 // No overflow check.
3194 ShifterOperand shifter_op; 3191 Operand o;
3195 if (ShifterOperand::CanHold(imm, &shifter_op)) { 3192 if (Operand::CanHold(imm, &o)) {
3196 __ and_(result, left, shifter_op); 3193 __ and_(result, left, o);
3197 } else if (ShifterOperand::CanHold(~imm, &shifter_op)) { 3194 } else if (Operand::CanHold(~imm, &o)) {
3198 __ bic(result, left, shifter_op); 3195 __ bic(result, left, o);
3199 } else { 3196 } else {
3200 __ LoadImmediate(IP, imm); 3197 __ LoadImmediate(IP, imm);
3201 __ and_(result, left, ShifterOperand(IP)); 3198 __ and_(result, left, Operand(IP));
3202 } 3199 }
3203 break; 3200 break;
3204 } 3201 }
3205 case Token::kBIT_OR: { 3202 case Token::kBIT_OR: {
3206 // No overflow check. 3203 // No overflow check.
3207 ShifterOperand shifter_op; 3204 Operand o;
3208 if (ShifterOperand::CanHold(imm, &shifter_op)) { 3205 if (Operand::CanHold(imm, &o)) {
3209 __ orr(result, left, shifter_op); 3206 __ orr(result, left, o);
3210 } else { 3207 } else {
3211 __ LoadImmediate(IP, imm); 3208 __ LoadImmediate(IP, imm);
3212 __ orr(result, left, ShifterOperand(IP)); 3209 __ orr(result, left, Operand(IP));
3213 } 3210 }
3214 break; 3211 break;
3215 } 3212 }
3216 case Token::kBIT_XOR: { 3213 case Token::kBIT_XOR: {
3217 // No overflow check. 3214 // No overflow check.
3218 ShifterOperand shifter_op; 3215 Operand o;
3219 if (ShifterOperand::CanHold(imm, &shifter_op)) { 3216 if (Operand::CanHold(imm, &o)) {
3220 __ eor(result, left, shifter_op); 3217 __ eor(result, left, o);
3221 } else { 3218 } else {
3222 __ LoadImmediate(IP, imm); 3219 __ LoadImmediate(IP, imm);
3223 __ eor(result, left, ShifterOperand(IP)); 3220 __ eor(result, left, Operand(IP));
3224 } 3221 }
3225 break; 3222 break;
3226 } 3223 }
3227 case Token::kSHR: { 3224 case Token::kSHR: {
3228 // sarl operation masks the count to 5 bits. 3225 // sarl operation masks the count to 5 bits.
3229 const intptr_t kCountLimit = 0x1F; 3226 const intptr_t kCountLimit = 0x1F;
3230 intptr_t value = Smi::Cast(constant).Value(); 3227 intptr_t value = Smi::Cast(constant).Value();
3231 3228
3232 if (value == 0) { 3229 if (value == 0) {
3233 // TODO(vegorov): should be handled outside. 3230 // TODO(vegorov): should be handled outside.
(...skipping 20 matching lines...) Expand all
3254 break; 3251 break;
3255 } 3252 }
3256 return; 3253 return;
3257 } 3254 }
3258 3255
3259 const Register right = locs()->in(1).reg(); 3256 const Register right = locs()->in(1).reg();
3260 Range* right_range = this->right()->definition()->range(); 3257 Range* right_range = this->right()->definition()->range();
3261 switch (op_kind()) { 3258 switch (op_kind()) {
3262 case Token::kADD: { 3259 case Token::kADD: {
3263 if (deopt == NULL) { 3260 if (deopt == NULL) {
3264 __ add(result, left, ShifterOperand(right)); 3261 __ add(result, left, Operand(right));
3265 } else { 3262 } else {
3266 __ adds(result, left, ShifterOperand(right)); 3263 __ adds(result, left, Operand(right));
3267 __ b(deopt, VS); 3264 __ b(deopt, VS);
3268 } 3265 }
3269 break; 3266 break;
3270 } 3267 }
3271 case Token::kSUB: { 3268 case Token::kSUB: {
3272 if (deopt == NULL) { 3269 if (deopt == NULL) {
3273 __ sub(result, left, ShifterOperand(right)); 3270 __ sub(result, left, Operand(right));
3274 } else { 3271 } else {
3275 __ subs(result, left, ShifterOperand(right)); 3272 __ subs(result, left, Operand(right));
3276 __ b(deopt, VS); 3273 __ b(deopt, VS);
3277 } 3274 }
3278 break; 3275 break;
3279 } 3276 }
3280 case Token::kMUL: { 3277 case Token::kMUL: {
3281 __ Asr(IP, left, kSmiTagSize); // SmiUntag left into IP. 3278 __ Asr(IP, left, kSmiTagSize); // SmiUntag left into IP.
3282 if (deopt == NULL) { 3279 if (deopt == NULL) {
3283 __ mul(result, IP, right); 3280 __ mul(result, IP, right);
3284 } else { 3281 } else {
3285 if (TargetCPUFeatures::arm_version() == ARMv7) { 3282 if (TargetCPUFeatures::arm_version() == ARMv7) {
3286 __ smull(result, IP, IP, right); 3283 __ smull(result, IP, IP, right);
3287 // IP: result bits 32..63. 3284 // IP: result bits 32..63.
3288 __ cmp(IP, ShifterOperand(result, ASR, 31)); 3285 __ cmp(IP, Operand(result, ASR, 31));
3289 __ b(deopt, NE); 3286 __ b(deopt, NE);
3290 } else { 3287 } else {
3291 const QRegister qtmp = locs()->temp(0).fpu_reg(); 3288 const QRegister qtmp = locs()->temp(0).fpu_reg();
3292 const DRegister dtmp0 = EvenDRegisterOf(qtmp); 3289 const DRegister dtmp0 = EvenDRegisterOf(qtmp);
3293 const DRegister dtmp1 = OddDRegisterOf(qtmp); 3290 const DRegister dtmp1 = OddDRegisterOf(qtmp);
3294 __ CheckMultSignedOverflow(IP, right, result, dtmp0, dtmp1, deopt); 3291 __ CheckMultSignedOverflow(IP, right, result, dtmp0, dtmp1, deopt);
3295 __ mul(result, IP, right); 3292 __ mul(result, IP, right);
3296 } 3293 }
3297 } 3294 }
3298 break; 3295 break;
3299 } 3296 }
3300 case Token::kBIT_AND: { 3297 case Token::kBIT_AND: {
3301 // No overflow check. 3298 // No overflow check.
3302 __ and_(result, left, ShifterOperand(right)); 3299 __ and_(result, left, Operand(right));
3303 break; 3300 break;
3304 } 3301 }
3305 case Token::kBIT_OR: { 3302 case Token::kBIT_OR: {
3306 // No overflow check. 3303 // No overflow check.
3307 __ orr(result, left, ShifterOperand(right)); 3304 __ orr(result, left, Operand(right));
3308 break; 3305 break;
3309 } 3306 }
3310 case Token::kBIT_XOR: { 3307 case Token::kBIT_XOR: {
3311 // No overflow check. 3308 // No overflow check.
3312 __ eor(result, left, ShifterOperand(right)); 3309 __ eor(result, left, Operand(right));
3313 break; 3310 break;
3314 } 3311 }
3315 case Token::kTRUNCDIV: { 3312 case Token::kTRUNCDIV: {
3316 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 3313 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
3317 // Handle divide by zero in runtime. 3314 // Handle divide by zero in runtime.
3318 __ cmp(right, ShifterOperand(0)); 3315 __ cmp(right, Operand(0));
3319 __ b(deopt, EQ); 3316 __ b(deopt, EQ);
3320 } 3317 }
3321 const Register temp = locs()->temp(0).reg(); 3318 const Register temp = locs()->temp(0).reg();
3322 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3319 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3323 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 3320 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
3324 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3321 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3325 3322
3326 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3323 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3327 3324
3328 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 3325 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3329 // case we cannot tag the result. 3326 // case we cannot tag the result.
3330 __ CompareImmediate(result, 0x40000000); 3327 __ CompareImmediate(result, 0x40000000);
3331 __ b(deopt, EQ); 3328 __ b(deopt, EQ);
3332 __ SmiTag(result); 3329 __ SmiTag(result);
3333 break; 3330 break;
3334 } 3331 }
3335 case Token::kMOD: { 3332 case Token::kMOD: {
3336 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 3333 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
3337 // Handle divide by zero in runtime. 3334 // Handle divide by zero in runtime.
3338 __ cmp(right, ShifterOperand(0)); 3335 __ cmp(right, Operand(0));
3339 __ b(deopt, EQ); 3336 __ b(deopt, EQ);
3340 } 3337 }
3341 const Register temp = locs()->temp(0).reg(); 3338 const Register temp = locs()->temp(0).reg();
3342 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 3339 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3343 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 3340 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
3344 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3341 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3345 3342
3346 __ IntegerDivide(result, temp, IP, dtemp, DTMP); 3343 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3347 3344
3348 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3345 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3349 __ mls(result, IP, result, temp); // result <- left - right * result 3346 __ mls(result, IP, result, temp); // result <- left - right * result
3350 __ SmiTag(result); 3347 __ SmiTag(result);
3351 // res = left % right; 3348 // res = left % right;
3352 // if (res < 0) { 3349 // if (res < 0) {
3353 // if (right < 0) { 3350 // if (right < 0) {
3354 // res = res - right; 3351 // res = res - right;
3355 // } else { 3352 // } else {
3356 // res = res + right; 3353 // res = res + right;
3357 // } 3354 // }
3358 // } 3355 // }
3359 Label done; 3356 Label done;
3360 __ cmp(result, ShifterOperand(0)); 3357 __ cmp(result, Operand(0));
3361 __ b(&done, GE); 3358 __ b(&done, GE);
3362 // Result is negative, adjust it. 3359 // Result is negative, adjust it.
3363 __ cmp(right, ShifterOperand(0)); 3360 __ cmp(right, Operand(0));
3364 __ sub(result, result, ShifterOperand(right), LT); 3361 __ sub(result, result, Operand(right), LT);
3365 __ add(result, result, ShifterOperand(right), GE); 3362 __ add(result, result, Operand(right), GE);
3366 __ Bind(&done); 3363 __ Bind(&done);
3367 break; 3364 break;
3368 } 3365 }
3369 case Token::kSHR: { 3366 case Token::kSHR: {
3370 if (CanDeoptimize()) { 3367 if (CanDeoptimize()) {
3371 __ CompareImmediate(right, 0); 3368 __ CompareImmediate(right, 0);
3372 __ b(deopt, LT); 3369 __ b(deopt, LT);
3373 } 3370 }
3374 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 3371 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
3375 // sarl operation masks the count to 5 bits. 3372 // sarl operation masks the count to 5 bits.
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3421 3418
3422 3419
3423 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 3420 void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3424 Label* deopt = compiler->AddDeoptStub(deopt_id(), 3421 Label* deopt = compiler->AddDeoptStub(deopt_id(),
3425 ICData::kDeoptBinaryDoubleOp); 3422 ICData::kDeoptBinaryDoubleOp);
3426 intptr_t left_cid = left()->Type()->ToCid(); 3423 intptr_t left_cid = left()->Type()->ToCid();
3427 intptr_t right_cid = right()->Type()->ToCid(); 3424 intptr_t right_cid = right()->Type()->ToCid();
3428 const Register left = locs()->in(0).reg(); 3425 const Register left = locs()->in(0).reg();
3429 const Register right = locs()->in(1).reg(); 3426 const Register right = locs()->in(1).reg();
3430 if (this->left()->definition() == this->right()->definition()) { 3427 if (this->left()->definition() == this->right()->definition()) {
3431 __ tst(left, ShifterOperand(kSmiTagMask)); 3428 __ tst(left, Operand(kSmiTagMask));
3432 } else if (left_cid == kSmiCid) { 3429 } else if (left_cid == kSmiCid) {
3433 __ tst(right, ShifterOperand(kSmiTagMask)); 3430 __ tst(right, Operand(kSmiTagMask));
3434 } else if (right_cid == kSmiCid) { 3431 } else if (right_cid == kSmiCid) {
3435 __ tst(left, ShifterOperand(kSmiTagMask)); 3432 __ tst(left, Operand(kSmiTagMask));
3436 } else { 3433 } else {
3437 __ orr(IP, left, ShifterOperand(right)); 3434 __ orr(IP, left, Operand(right));
3438 __ tst(IP, ShifterOperand(kSmiTagMask)); 3435 __ tst(IP, Operand(kSmiTagMask));
3439 } 3436 }
3440 __ b(deopt, EQ); 3437 __ b(deopt, EQ);
3441 } 3438 }
3442 3439
3443 3440
3444 LocationSummary* BoxDoubleInstr::MakeLocationSummary(Isolate* isolate, 3441 LocationSummary* BoxDoubleInstr::MakeLocationSummary(Isolate* isolate,
3445 bool opt) const { 3442 bool opt) const {
3446 const intptr_t kNumInputs = 1; 3443 const intptr_t kNumInputs = 1;
3447 const intptr_t kNumTemps = 1; 3444 const intptr_t kNumTemps = 1;
3448 LocationSummary* summary = new(isolate) LocationSummary( 3445 LocationSummary* summary = new(isolate) LocationSummary(
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
3508 const Register temp = locs()->temp(0).reg(); 3505 const Register temp = locs()->temp(0).reg();
3509 if (value_type->is_nullable() && 3506 if (value_type->is_nullable() &&
3510 (value_type->ToNullableCid() == kDoubleCid)) { 3507 (value_type->ToNullableCid() == kDoubleCid)) {
3511 __ CompareImmediate(value, reinterpret_cast<intptr_t>(Object::null())); 3508 __ CompareImmediate(value, reinterpret_cast<intptr_t>(Object::null()));
3512 __ b(deopt, EQ); 3509 __ b(deopt, EQ);
3513 // It must be double now. 3510 // It must be double now.
3514 __ LoadDFromOffset(result, value, 3511 __ LoadDFromOffset(result, value,
3515 Double::value_offset() - kHeapObjectTag); 3512 Double::value_offset() - kHeapObjectTag);
3516 } else { 3513 } else {
3517 Label is_smi, done; 3514 Label is_smi, done;
3518 __ tst(value, ShifterOperand(kSmiTagMask)); 3515 __ tst(value, Operand(kSmiTagMask));
3519 __ b(&is_smi, EQ); 3516 __ b(&is_smi, EQ);
3520 __ CompareClassId(value, kDoubleCid, temp); 3517 __ CompareClassId(value, kDoubleCid, temp);
3521 __ b(deopt, NE); 3518 __ b(deopt, NE);
3522 __ LoadDFromOffset(result, value, 3519 __ LoadDFromOffset(result, value,
3523 Double::value_offset() - kHeapObjectTag); 3520 Double::value_offset() - kHeapObjectTag);
3524 __ b(&done); 3521 __ b(&done);
3525 __ Bind(&is_smi); 3522 __ Bind(&is_smi);
3526 // TODO(regis): Why do we preserve value here but not above? 3523 // TODO(regis): Why do we preserve value here but not above?
3527 __ mov(IP, ShifterOperand(value, ASR, 1)); // Copy and untag. 3524 __ mov(IP, Operand(value, ASR, 1)); // Copy and untag.
3528 __ vmovsr(STMP, IP); 3525 __ vmovsr(STMP, IP);
3529 __ vcvtdi(result, STMP); 3526 __ vcvtdi(result, STMP);
3530 __ Bind(&done); 3527 __ Bind(&done);
3531 } 3528 }
3532 } 3529 }
3533 } 3530 }
3534 3531
3535 3532
3536 LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(Isolate* isolate, 3533 LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(Isolate* isolate,
3537 bool opt) const { 3534 bool opt) const {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3585 3582
3586 3583
3587 void UnboxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3584 void UnboxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3588 const intptr_t value_cid = value()->Type()->ToCid(); 3585 const intptr_t value_cid = value()->Type()->ToCid();
3589 const Register value = locs()->in(0).reg(); 3586 const Register value = locs()->in(0).reg();
3590 const QRegister result = locs()->out(0).fpu_reg(); 3587 const QRegister result = locs()->out(0).fpu_reg();
3591 3588
3592 if (value_cid != kFloat32x4Cid) { 3589 if (value_cid != kFloat32x4Cid) {
3593 const Register temp = locs()->temp(0).reg(); 3590 const Register temp = locs()->temp(0).reg();
3594 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass); 3591 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3595 __ tst(value, ShifterOperand(kSmiTagMask)); 3592 __ tst(value, Operand(kSmiTagMask));
3596 __ b(deopt, EQ); 3593 __ b(deopt, EQ);
3597 __ CompareClassId(value, kFloat32x4Cid, temp); 3594 __ CompareClassId(value, kFloat32x4Cid, temp);
3598 __ b(deopt, NE); 3595 __ b(deopt, NE);
3599 } 3596 }
3600 3597
3601 const DRegister dresult0 = EvenDRegisterOf(result); 3598 const DRegister dresult0 = EvenDRegisterOf(result);
3602 __ LoadMultipleDFromOffset(dresult0, 2, value, 3599 __ LoadMultipleDFromOffset(dresult0, 2, value,
3603 Float32x4::value_offset() - kHeapObjectTag); 3600 Float32x4::value_offset() - kHeapObjectTag);
3604 } 3601 }
3605 3602
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
3656 3653
3657 3654
3658 void UnboxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3655 void UnboxFloat64x2Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3659 const intptr_t value_cid = value()->Type()->ToCid(); 3656 const intptr_t value_cid = value()->Type()->ToCid();
3660 const Register value = locs()->in(0).reg(); 3657 const Register value = locs()->in(0).reg();
3661 const QRegister result = locs()->out(0).fpu_reg(); 3658 const QRegister result = locs()->out(0).fpu_reg();
3662 3659
3663 if (value_cid != kFloat64x2Cid) { 3660 if (value_cid != kFloat64x2Cid) {
3664 const Register temp = locs()->temp(0).reg(); 3661 const Register temp = locs()->temp(0).reg();
3665 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass); 3662 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3666 __ tst(value, ShifterOperand(kSmiTagMask)); 3663 __ tst(value, Operand(kSmiTagMask));
3667 __ b(deopt, EQ); 3664 __ b(deopt, EQ);
3668 __ CompareClassId(value, kFloat64x2Cid, temp); 3665 __ CompareClassId(value, kFloat64x2Cid, temp);
3669 __ b(deopt, NE); 3666 __ b(deopt, NE);
3670 } 3667 }
3671 3668
3672 const DRegister dresult0 = EvenDRegisterOf(result); 3669 const DRegister dresult0 = EvenDRegisterOf(result);
3673 __ LoadMultipleDFromOffset(dresult0, 2, value, 3670 __ LoadMultipleDFromOffset(dresult0, 2, value,
3674 Float64x2::value_offset() - kHeapObjectTag); 3671 Float64x2::value_offset() - kHeapObjectTag);
3675 } 3672 }
3676 3673
(...skipping 27 matching lines...) Expand all
3704 const ExternalLabel label(stub.EntryPoint()); 3701 const ExternalLabel label(stub.EntryPoint());
3705 3702
3706 LocationSummary* locs = instruction_->locs(); 3703 LocationSummary* locs = instruction_->locs();
3707 locs->live_registers()->Remove(locs->out(0)); 3704 locs->live_registers()->Remove(locs->out(0));
3708 3705
3709 compiler->SaveLiveRegisters(locs); 3706 compiler->SaveLiveRegisters(locs);
3710 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 3707 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
3711 &label, 3708 &label,
3712 PcDescriptors::kOther, 3709 PcDescriptors::kOther,
3713 locs); 3710 locs);
3714 __ mov(locs->out(0).reg(), ShifterOperand(R0)); 3711 __ mov(locs->out(0).reg(), Operand(R0));
3715 compiler->RestoreLiveRegisters(locs); 3712 compiler->RestoreLiveRegisters(locs);
3716 3713
3717 __ b(exit_label()); 3714 __ b(exit_label());
3718 } 3715 }
3719 3716
3720 private: 3717 private:
3721 BoxInt32x4Instr* instruction_; 3718 BoxInt32x4Instr* instruction_;
3722 }; 3719 };
3723 3720
3724 3721
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3758 3755
3759 3756
3760 void UnboxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) { 3757 void UnboxInt32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3761 const intptr_t value_cid = value()->Type()->ToCid(); 3758 const intptr_t value_cid = value()->Type()->ToCid();
3762 const Register value = locs()->in(0).reg(); 3759 const Register value = locs()->in(0).reg();
3763 const QRegister result = locs()->out(0).fpu_reg(); 3760 const QRegister result = locs()->out(0).fpu_reg();
3764 3761
3765 if (value_cid != kInt32x4Cid) { 3762 if (value_cid != kInt32x4Cid) {
3766 const Register temp = locs()->temp(0).reg(); 3763 const Register temp = locs()->temp(0).reg();
3767 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass); 3764 Label* deopt = compiler->AddDeoptStub(deopt_id_, ICData::kDeoptCheckClass);
3768 __ tst(value, ShifterOperand(kSmiTagMask)); 3765 __ tst(value, Operand(kSmiTagMask));
3769 __ b(deopt, EQ); 3766 __ b(deopt, EQ);
3770 __ CompareClassId(value, kInt32x4Cid, temp); 3767 __ CompareClassId(value, kInt32x4Cid, temp);
3771 __ b(deopt, NE); 3768 __ b(deopt, NE);
3772 } 3769 }
3773 3770
3774 const DRegister dresult0 = EvenDRegisterOf(result); 3771 const DRegister dresult0 = EvenDRegisterOf(result);
3775 __ LoadMultipleDFromOffset(dresult0, 2, value, 3772 __ LoadMultipleDFromOffset(dresult0, 2, value,
3776 Int32x4::value_offset() - kHeapObjectTag); 3773 Int32x4::value_offset() - kHeapObjectTag);
3777 } 3774 }
3778 3775
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
4039 4036
4040 const Register out = locs()->out(0).reg(); 4037 const Register out = locs()->out(0).reg();
4041 const Register temp = locs()->temp(0).reg(); 4038 const Register temp = locs()->temp(0).reg();
4042 4039
4043 // X lane. 4040 // X lane.
4044 __ vmovrs(out, EvenSRegisterOf(dvalue0)); 4041 __ vmovrs(out, EvenSRegisterOf(dvalue0));
4045 __ Lsr(out, out, 31); 4042 __ Lsr(out, out, 31);
4046 // Y lane. 4043 // Y lane.
4047 __ vmovrs(temp, OddSRegisterOf(dvalue0)); 4044 __ vmovrs(temp, OddSRegisterOf(dvalue0));
4048 __ Lsr(temp, temp, 31); 4045 __ Lsr(temp, temp, 31);
4049 __ orr(out, out, ShifterOperand(temp, LSL, 1)); 4046 __ orr(out, out, Operand(temp, LSL, 1));
4050 // Z lane. 4047 // Z lane.
4051 __ vmovrs(temp, EvenSRegisterOf(dvalue1)); 4048 __ vmovrs(temp, EvenSRegisterOf(dvalue1));
4052 __ Lsr(temp, temp, 31); 4049 __ Lsr(temp, temp, 31);
4053 __ orr(out, out, ShifterOperand(temp, LSL, 2)); 4050 __ orr(out, out, Operand(temp, LSL, 2));
4054 // W lane. 4051 // W lane.
4055 __ vmovrs(temp, OddSRegisterOf(dvalue1)); 4052 __ vmovrs(temp, OddSRegisterOf(dvalue1));
4056 __ Lsr(temp, temp, 31); 4053 __ Lsr(temp, temp, 31);
4057 __ orr(out, out, ShifterOperand(temp, LSL, 3)); 4054 __ orr(out, out, Operand(temp, LSL, 3));
4058 // Tag. 4055 // Tag.
4059 __ SmiTag(out); 4056 __ SmiTag(out);
4060 } 4057 }
4061 4058
4062 4059
4063 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary( 4060 LocationSummary* Float32x4ConstructorInstr::MakeLocationSummary(
4064 Isolate* isolate, bool opt) const { 4061 Isolate* isolate, bool opt) const {
4065 const intptr_t kNumInputs = 4; 4062 const intptr_t kNumInputs = 4;
4066 const intptr_t kNumTemps = 0; 4063 const intptr_t kNumTemps = 0;
4067 LocationSummary* summary = new(isolate) LocationSummary( 4064 LocationSummary* summary = new(isolate) LocationSummary(
(...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after
4592 const DRegister dvalue1 = OddDRegisterOf(q); 4589 const DRegister dvalue1 = OddDRegisterOf(q);
4593 4590
4594 const Register out = locs()->out(0).reg(); 4591 const Register out = locs()->out(0).reg();
4595 4592
4596 // Upper 32-bits of X lane. 4593 // Upper 32-bits of X lane.
4597 __ vmovrs(out, OddSRegisterOf(dvalue0)); 4594 __ vmovrs(out, OddSRegisterOf(dvalue0));
4598 __ Lsr(out, out, 31); 4595 __ Lsr(out, out, 31);
4599 // Upper 32-bits of Y lane. 4596 // Upper 32-bits of Y lane.
4600 __ vmovrs(TMP, OddSRegisterOf(dvalue1)); 4597 __ vmovrs(TMP, OddSRegisterOf(dvalue1));
4601 __ Lsr(TMP, TMP, 31); 4598 __ Lsr(TMP, TMP, 31);
4602 __ orr(out, out, ShifterOperand(TMP, LSL, 1)); 4599 __ orr(out, out, Operand(TMP, LSL, 1));
4603 // Tag. 4600 // Tag.
4604 __ SmiTag(out); 4601 __ SmiTag(out);
4605 return; 4602 return;
4606 } 4603 }
4607 ASSERT(representation() == kUnboxedFloat64x2); 4604 ASSERT(representation() == kUnboxedFloat64x2);
4608 const QRegister r = locs()->out(0).fpu_reg(); 4605 const QRegister r = locs()->out(0).fpu_reg();
4609 4606
4610 const DRegister dvalue0 = EvenDRegisterOf(q); 4607 const DRegister dvalue0 = EvenDRegisterOf(q);
4611 const DRegister dvalue1 = OddDRegisterOf(q); 4608 const DRegister dvalue1 = OddDRegisterOf(q);
4612 const DRegister dresult0 = EvenDRegisterOf(r); 4609 const DRegister dresult0 = EvenDRegisterOf(r);
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
4784 break; 4781 break;
4785 case MethodRecognizer::kInt32x4GetFlagZ: 4782 case MethodRecognizer::kInt32x4GetFlagZ:
4786 __ vmovrs(result, svalue2); 4783 __ vmovrs(result, svalue2);
4787 break; 4784 break;
4788 case MethodRecognizer::kInt32x4GetFlagW: 4785 case MethodRecognizer::kInt32x4GetFlagW:
4789 __ vmovrs(result, svalue3); 4786 __ vmovrs(result, svalue3);
4790 break; 4787 break;
4791 default: UNREACHABLE(); 4788 default: UNREACHABLE();
4792 } 4789 }
4793 4790
4794 __ tst(result, ShifterOperand(result)); 4791 __ tst(result, Operand(result));
4795 __ LoadObject(result, Bool::True(), NE); 4792 __ LoadObject(result, Bool::True(), NE);
4796 __ LoadObject(result, Bool::False(), EQ); 4793 __ LoadObject(result, Bool::False(), EQ);
4797 } 4794 }
4798 4795
4799 4796
4800 LocationSummary* Int32x4SelectInstr::MakeLocationSummary(Isolate* isolate, 4797 LocationSummary* Int32x4SelectInstr::MakeLocationSummary(Isolate* isolate,
4801 bool opt) const { 4798 bool opt) const {
4802 const intptr_t kNumInputs = 3; 4799 const intptr_t kNumInputs = 3;
4803 const intptr_t kNumTemps = 1; 4800 const intptr_t kNumTemps = 1;
4804 LocationSummary* summary = new(isolate) LocationSummary( 4801 LocationSummary* summary = new(isolate) LocationSummary(
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
5043 __ b(&done); 5040 __ b(&done);
5044 5041
5045 __ Bind(&are_equal); 5042 __ Bind(&are_equal);
5046 // Check for negative zero: -0.0 is equal 0.0 but min or max must return 5043 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
5047 // -0.0 or 0.0 respectively. 5044 // -0.0 or 0.0 respectively.
5048 // Check for negative left value (get the sign bit): 5045 // Check for negative left value (get the sign bit):
5049 // - min -> left is negative ? left : right. 5046 // - min -> left is negative ? left : right.
5050 // - max -> left is negative ? right : left 5047 // - max -> left is negative ? right : left
5051 // Check the sign bit. 5048 // Check the sign bit.
5052 __ vmovrrd(IP, temp, left); // Sign bit is in bit 31 of temp. 5049 __ vmovrrd(IP, temp, left); // Sign bit is in bit 31 of temp.
5053 __ cmp(temp, ShifterOperand(0)); 5050 __ cmp(temp, Operand(0));
5054 if (is_min) { 5051 if (is_min) {
5055 ASSERT(left == result); 5052 ASSERT(left == result);
5056 __ vmovd(result, right, GE); 5053 __ vmovd(result, right, GE);
5057 } else { 5054 } else {
5058 __ vmovd(result, right, LT); 5055 __ vmovd(result, right, LT);
5059 ASSERT(left == result); 5056 ASSERT(left == result);
5060 } 5057 }
5061 __ Bind(&done); 5058 __ Bind(&done);
5062 return; 5059 return;
5063 } 5060 }
5064 5061
5065 ASSERT(result_cid() == kSmiCid); 5062 ASSERT(result_cid() == kSmiCid);
5066 const Register left = locs()->in(0).reg(); 5063 const Register left = locs()->in(0).reg();
5067 const Register right = locs()->in(1).reg(); 5064 const Register right = locs()->in(1).reg();
5068 const Register result = locs()->out(0).reg(); 5065 const Register result = locs()->out(0).reg();
5069 __ cmp(left, ShifterOperand(right)); 5066 __ cmp(left, Operand(right));
5070 ASSERT(result == left); 5067 ASSERT(result == left);
5071 if (is_min) { 5068 if (is_min) {
5072 __ mov(result, ShifterOperand(right), GT); 5069 __ mov(result, Operand(right), GT);
5073 } else { 5070 } else {
5074 __ mov(result, ShifterOperand(right), LT); 5071 __ mov(result, Operand(right), LT);
5075 } 5072 }
5076 } 5073 }
5077 5074
5078 5075
5079 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Isolate* isolate, 5076 LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Isolate* isolate,
5080 bool opt) const { 5077 bool opt) const {
5081 const intptr_t kNumInputs = 1; 5078 const intptr_t kNumInputs = 1;
5082 const intptr_t kNumTemps = 0; 5079 const intptr_t kNumTemps = 0;
5083 LocationSummary* summary = new(isolate) LocationSummary( 5080 LocationSummary* summary = new(isolate) LocationSummary(
5084 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 5081 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5085 summary->set_in(0, Location::RequiresRegister()); 5082 summary->set_in(0, Location::RequiresRegister());
5086 // We make use of 3-operand instructions by not requiring result register 5083 // We make use of 3-operand instructions by not requiring result register
5087 // to be identical to first input register as on Intel. 5084 // to be identical to first input register as on Intel.
5088 summary->set_out(0, Location::RequiresRegister()); 5085 summary->set_out(0, Location::RequiresRegister());
5089 return summary; 5086 return summary;
5090 } 5087 }
5091 5088
5092 5089
5093 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5090 void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5094 const Register value = locs()->in(0).reg(); 5091 const Register value = locs()->in(0).reg();
5095 const Register result = locs()->out(0).reg(); 5092 const Register result = locs()->out(0).reg();
5096 switch (op_kind()) { 5093 switch (op_kind()) {
5097 case Token::kNEGATE: { 5094 case Token::kNEGATE: {
5098 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp); 5095 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
5099 __ rsbs(result, value, ShifterOperand(0)); 5096 __ rsbs(result, value, Operand(0));
5100 __ b(deopt, VS); 5097 __ b(deopt, VS);
5101 break; 5098 break;
5102 } 5099 }
5103 case Token::kBIT_NOT: 5100 case Token::kBIT_NOT:
5104 __ mvn(result, ShifterOperand(value)); 5101 __ mvn(result, Operand(value));
5105 // Remove inverted smi-tag. 5102 // Remove inverted smi-tag.
5106 __ bic(result, result, ShifterOperand(kSmiTagMask)); 5103 __ bic(result, result, Operand(kSmiTagMask));
5107 break; 5104 break;
5108 default: 5105 default:
5109 UNREACHABLE(); 5106 UNREACHABLE();
5110 } 5107 }
5111 } 5108 }
5112 5109
5113 5110
5114 LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Isolate* isolate, 5111 LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Isolate* isolate,
5115 bool opt) const { 5112 bool opt) const {
5116 const intptr_t kNumInputs = 1; 5113 const intptr_t kNumInputs = 1;
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after
5511 PairLocation* pair = locs()->in(0).AsPairLocation(); 5508 PairLocation* pair = locs()->in(0).AsPairLocation();
5512 Location in_loc = pair->At(index()); 5509 Location in_loc = pair->At(index());
5513 if (representation() == kUnboxedDouble) { 5510 if (representation() == kUnboxedDouble) {
5514 const QRegister out = locs()->out(0).fpu_reg(); 5511 const QRegister out = locs()->out(0).fpu_reg();
5515 const QRegister in = in_loc.fpu_reg(); 5512 const QRegister in = in_loc.fpu_reg();
5516 __ vmovq(out, in); 5513 __ vmovq(out, in);
5517 } else { 5514 } else {
5518 ASSERT(representation() == kTagged); 5515 ASSERT(representation() == kTagged);
5519 const Register out = locs()->out(0).reg(); 5516 const Register out = locs()->out(0).reg();
5520 const Register in = in_loc.reg(); 5517 const Register in = in_loc.reg();
5521 __ mov(out, ShifterOperand(in)); 5518 __ mov(out, Operand(in));
5522 } 5519 }
5523 } 5520 }
5524 5521
5525 5522
5526 LocationSummary* MergedMathInstr::MakeLocationSummary(Isolate* isolate, 5523 LocationSummary* MergedMathInstr::MakeLocationSummary(Isolate* isolate,
5527 bool opt) const { 5524 bool opt) const {
5528 if (kind() == MergedMathInstr::kTruncDivMod) { 5525 if (kind() == MergedMathInstr::kTruncDivMod) {
5529 const intptr_t kNumInputs = 2; 5526 const intptr_t kNumInputs = 2;
5530 const intptr_t kNumTemps = 2; 5527 const intptr_t kNumTemps = 2;
5531 LocationSummary* summary = new(isolate) LocationSummary( 5528 LocationSummary* summary = new(isolate) LocationSummary(
(...skipping 20 matching lines...) Expand all
5552 if (kind() == MergedMathInstr::kTruncDivMod) { 5549 if (kind() == MergedMathInstr::kTruncDivMod) {
5553 const Register left = locs()->in(0).reg(); 5550 const Register left = locs()->in(0).reg();
5554 const Register right = locs()->in(1).reg(); 5551 const Register right = locs()->in(1).reg();
5555 ASSERT(locs()->out(0).IsPairLocation()); 5552 ASSERT(locs()->out(0).IsPairLocation());
5556 PairLocation* pair = locs()->out(0).AsPairLocation(); 5553 PairLocation* pair = locs()->out(0).AsPairLocation();
5557 const Register result_div = pair->At(0).reg(); 5554 const Register result_div = pair->At(0).reg();
5558 const Register result_mod = pair->At(1).reg(); 5555 const Register result_mod = pair->At(1).reg();
5559 Range* right_range = InputAt(1)->definition()->range(); 5556 Range* right_range = InputAt(1)->definition()->range();
5560 if ((right_range == NULL) || right_range->Overlaps(0, 0)) { 5557 if ((right_range == NULL) || right_range->Overlaps(0, 0)) {
5561 // Handle divide by zero in runtime. 5558 // Handle divide by zero in runtime.
5562 __ cmp(right, ShifterOperand(0)); 5559 __ cmp(right, Operand(0));
5563 __ b(deopt, EQ); 5560 __ b(deopt, EQ);
5564 } 5561 }
5565 const Register temp = locs()->temp(0).reg(); 5562 const Register temp = locs()->temp(0).reg();
5566 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg()); 5563 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
5567 5564
5568 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp. 5565 __ Asr(temp, left, kSmiTagSize); // SmiUntag left into temp.
5569 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 5566 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
5570 5567
5571 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP); 5568 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
5572 5569
5573 // Check the corner case of dividing the 'MIN_SMI' with -1, in which 5570 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5574 // case we cannot tag the result. 5571 // case we cannot tag the result.
5575 __ CompareImmediate(result_div, 0x40000000); 5572 __ CompareImmediate(result_div, 0x40000000);
5576 __ b(deopt, EQ); 5573 __ b(deopt, EQ);
5577 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP. 5574 __ Asr(IP, right, kSmiTagSize); // SmiUntag right into IP.
5578 // result_mod <- left - right * result_div. 5575 // result_mod <- left - right * result_div.
5579 __ mls(result_mod, IP, result_div, temp); 5576 __ mls(result_mod, IP, result_div, temp);
5580 __ SmiTag(result_div); 5577 __ SmiTag(result_div);
5581 __ SmiTag(result_mod); 5578 __ SmiTag(result_mod);
5582 // Correct MOD result: 5579 // Correct MOD result:
5583 // res = left % right; 5580 // res = left % right;
5584 // if (res < 0) { 5581 // if (res < 0) {
5585 // if (right < 0) { 5582 // if (right < 0) {
5586 // res = res - right; 5583 // res = res - right;
5587 // } else { 5584 // } else {
5588 // res = res + right; 5585 // res = res + right;
5589 // } 5586 // }
5590 // } 5587 // }
5591 Label done; 5588 Label done;
5592 __ cmp(result_mod, ShifterOperand(0)); 5589 __ cmp(result_mod, Operand(0));
5593 __ b(&done, GE); 5590 __ b(&done, GE);
5594 // Result is negative, adjust it. 5591 // Result is negative, adjust it.
5595 __ cmp(right, ShifterOperand(0)); 5592 __ cmp(right, Operand(0));
5596 __ sub(result_mod, result_mod, ShifterOperand(right), LT); 5593 __ sub(result_mod, result_mod, Operand(right), LT);
5597 __ add(result_mod, result_mod, ShifterOperand(right), GE); 5594 __ add(result_mod, result_mod, Operand(right), GE);
5598 __ Bind(&done); 5595 __ Bind(&done);
5599 5596
5600 return; 5597 return;
5601 } 5598 }
5602 if (kind() == MergedMathInstr::kSinCos) { 5599 if (kind() == MergedMathInstr::kSinCos) {
5603 UNIMPLEMENTED(); 5600 UNIMPLEMENTED();
5604 } 5601 }
5605 UNIMPLEMENTED(); 5602 UNIMPLEMENTED();
5606 } 5603 }
5607 5604
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
5690 } 5687 }
5691 5688
5692 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) || 5689 ASSERT((unary_checks().GetReceiverClassIdAt(0) != kSmiCid) ||
5693 (unary_checks().NumberOfChecks() > 1)); 5690 (unary_checks().NumberOfChecks() > 1));
5694 const Register value = locs()->in(0).reg(); 5691 const Register value = locs()->in(0).reg();
5695 const Register temp = locs()->temp(0).reg(); 5692 const Register temp = locs()->temp(0).reg();
5696 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason); 5693 Label* deopt = compiler->AddDeoptStub(deopt_id(), deopt_reason);
5697 Label is_ok; 5694 Label is_ok;
5698 intptr_t cix = 0; 5695 intptr_t cix = 0;
5699 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) { 5696 if (unary_checks().GetReceiverClassIdAt(cix) == kSmiCid) {
5700 __ tst(value, ShifterOperand(kSmiTagMask)); 5697 __ tst(value, Operand(kSmiTagMask));
5701 __ b(&is_ok, EQ); 5698 __ b(&is_ok, EQ);
5702 cix++; // Skip first check. 5699 cix++; // Skip first check.
5703 } else { 5700 } else {
5704 __ tst(value, ShifterOperand(kSmiTagMask)); 5701 __ tst(value, Operand(kSmiTagMask));
5705 __ b(deopt, EQ); 5702 __ b(deopt, EQ);
5706 } 5703 }
5707 __ LoadClassId(temp, value); 5704 __ LoadClassId(temp, value);
5708 const intptr_t num_checks = unary_checks().NumberOfChecks(); 5705 const intptr_t num_checks = unary_checks().NumberOfChecks();
5709 for (intptr_t i = cix; i < num_checks; i++) { 5706 for (intptr_t i = cix; i < num_checks; i++) {
5710 ASSERT(unary_checks().GetReceiverClassIdAt(i) != kSmiCid); 5707 ASSERT(unary_checks().GetReceiverClassIdAt(i) != kSmiCid);
5711 __ CompareImmediate(temp, unary_checks().GetReceiverClassIdAt(i)); 5708 __ CompareImmediate(temp, unary_checks().GetReceiverClassIdAt(i));
5712 if (i == (num_checks - 1)) { 5709 if (i == (num_checks - 1)) {
5713 __ b(deopt, NE); 5710 __ b(deopt, NE);
5714 } else { 5711 } else {
(...skipping 11 matching lines...) Expand all
5726 LocationSummary* summary = new(isolate) LocationSummary( 5723 LocationSummary* summary = new(isolate) LocationSummary(
5727 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 5724 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5728 summary->set_in(0, Location::RequiresRegister()); 5725 summary->set_in(0, Location::RequiresRegister());
5729 return summary; 5726 return summary;
5730 } 5727 }
5731 5728
5732 5729
5733 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 5730 void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5734 const Register value = locs()->in(0).reg(); 5731 const Register value = locs()->in(0).reg();
5735 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi); 5732 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5736 __ tst(value, ShifterOperand(kSmiTagMask)); 5733 __ tst(value, Operand(kSmiTagMask));
5737 __ b(deopt, NE); 5734 __ b(deopt, NE);
5738 } 5735 }
5739 5736
5740 5737
5741 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Isolate* isolate, 5738 LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Isolate* isolate,
5742 bool opt) const { 5739 bool opt) const {
5743 const intptr_t kNumInputs = 2; 5740 const intptr_t kNumInputs = 2;
5744 const intptr_t kNumTemps = 0; 5741 const intptr_t kNumTemps = 0;
5745 LocationSummary* locs = new(isolate) LocationSummary( 5742 LocationSummary* locs = new(isolate) LocationSummary(
5746 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall); 5743 isolate, kNumInputs, kNumTemps, LocationSummary::kNoCall);
(...skipping 26 matching lines...) Expand all
5773 __ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw())); 5770 __ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw()));
5774 __ b(deopt, LS); 5771 __ b(deopt, LS);
5775 } else if (length_loc.IsConstant()) { 5772 } else if (length_loc.IsConstant()) {
5776 const Smi& length = Smi::Cast(length_loc.constant()); 5773 const Smi& length = Smi::Cast(length_loc.constant());
5777 const Register index = index_loc.reg(); 5774 const Register index = index_loc.reg();
5778 __ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw())); 5775 __ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw()));
5779 __ b(deopt, CS); 5776 __ b(deopt, CS);
5780 } else { 5777 } else {
5781 const Register length = length_loc.reg(); 5778 const Register length = length_loc.reg();
5782 const Register index = index_loc.reg(); 5779 const Register index = index_loc.reg();
5783 __ cmp(index, ShifterOperand(length)); 5780 __ cmp(index, Operand(length));
5784 __ b(deopt, CS); 5781 __ b(deopt, CS);
5785 } 5782 }
5786 } 5783 }
5787 5784
5788 5785
5789 static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler, 5786 static void EmitJavascriptIntOverflowCheck(FlowGraphCompiler* compiler,
5790 Label* overflow, 5787 Label* overflow,
5791 Register result_lo, 5788 Register result_lo,
5792 Register result_hi) { 5789 Register result_hi) {
5793 // Compare upper half. 5790 // Compare upper half.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
5837 result_lo, 5834 result_lo,
5838 value, 5835 value,
5839 Mint::value_offset() - kHeapObjectTag); 5836 Mint::value_offset() - kHeapObjectTag);
5840 // Load high word. 5837 // Load high word.
5841 __ LoadFromOffset(kWord, 5838 __ LoadFromOffset(kWord,
5842 result_hi, 5839 result_hi,
5843 value, 5840 value,
5844 Mint::value_offset() - kHeapObjectTag + kWordSize); 5841 Mint::value_offset() - kHeapObjectTag + kWordSize);
5845 } else if (value_cid == kSmiCid) { 5842 } else if (value_cid == kSmiCid) {
5846 // Load Smi into result_lo. 5843 // Load Smi into result_lo.
5847 __ mov(result_lo, ShifterOperand(value)); 5844 __ mov(result_lo, Operand(value));
5848 // Untag. 5845 // Untag.
5849 __ SmiUntag(result_lo); 5846 __ SmiUntag(result_lo);
5850 __ SignFill(result_hi, result_lo); 5847 __ SignFill(result_hi, result_lo);
5851 } else { 5848 } else {
5852 const Register temp = locs()->temp(0).reg(); 5849 const Register temp = locs()->temp(0).reg();
5853 Label* deopt = compiler->AddDeoptStub(deopt_id_, 5850 Label* deopt = compiler->AddDeoptStub(deopt_id_,
5854 ICData::kDeoptUnboxInteger); 5851 ICData::kDeoptUnboxInteger);
5855 Label is_smi, done; 5852 Label is_smi, done;
5856 __ tst(value, ShifterOperand(kSmiTagMask)); 5853 __ tst(value, Operand(kSmiTagMask));
5857 __ b(&is_smi, EQ); 5854 __ b(&is_smi, EQ);
5858 __ CompareClassId(value, kMintCid, temp); 5855 __ CompareClassId(value, kMintCid, temp);
5859 __ b(deopt, NE); 5856 __ b(deopt, NE);
5860 5857
5861 // It's a Mint. 5858 // It's a Mint.
5862 // Load low word. 5859 // Load low word.
5863 __ LoadFromOffset(kWord, 5860 __ LoadFromOffset(kWord,
5864 result_lo, 5861 result_lo,
5865 value, 5862 value,
5866 Mint::value_offset() - kHeapObjectTag); 5863 Mint::value_offset() - kHeapObjectTag);
5867 // Load high word. 5864 // Load high word.
5868 __ LoadFromOffset(kWord, 5865 __ LoadFromOffset(kWord,
5869 result_hi, 5866 result_hi,
5870 value, 5867 value,
5871 Mint::value_offset() - kHeapObjectTag + kWordSize); 5868 Mint::value_offset() - kHeapObjectTag + kWordSize);
5872 __ b(&done); 5869 __ b(&done);
5873 5870
5874 // It's a Smi. 5871 // It's a Smi.
5875 __ Bind(&is_smi); 5872 __ Bind(&is_smi);
5876 // Load Smi into result_lo. 5873 // Load Smi into result_lo.
5877 __ mov(result_lo, ShifterOperand(value)); 5874 __ mov(result_lo, Operand(value));
5878 // Untag. 5875 // Untag.
5879 __ SmiUntag(result_lo); 5876 __ SmiUntag(result_lo);
5880 // Sign extend result_lo into result_hi. 5877 // Sign extend result_lo into result_hi.
5881 __ SignFill(result_hi, result_lo); 5878 __ SignFill(result_hi, result_lo);
5882 __ Bind(&done); 5879 __ Bind(&done);
5883 } 5880 }
5884 } 5881 }
5885 5882
5886 5883
5887 LocationSummary* BoxIntegerInstr::MakeLocationSummary(Isolate* isolate, 5884 LocationSummary* BoxIntegerInstr::MakeLocationSummary(Isolate* isolate,
(...skipping 27 matching lines...) Expand all
5915 const ExternalLabel label(stub.EntryPoint()); 5912 const ExternalLabel label(stub.EntryPoint());
5916 5913
5917 LocationSummary* locs = instruction_->locs(); 5914 LocationSummary* locs = instruction_->locs();
5918 locs->live_registers()->Remove(locs->out(0)); 5915 locs->live_registers()->Remove(locs->out(0));
5919 5916
5920 compiler->SaveLiveRegisters(locs); 5917 compiler->SaveLiveRegisters(locs);
5921 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. 5918 compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
5922 &label, 5919 &label,
5923 PcDescriptors::kOther, 5920 PcDescriptors::kOther,
5924 locs); 5921 locs);
5925 __ mov(locs->out(0).reg(), ShifterOperand(R0)); 5922 __ mov(locs->out(0).reg(), Operand(R0));
5926 compiler->RestoreLiveRegisters(locs); 5923 compiler->RestoreLiveRegisters(locs);
5927 5924
5928 __ b(exit_label()); 5925 __ b(exit_label());
5929 } 5926 }
5930 5927
5931 private: 5928 private:
5932 BoxIntegerInstr* instruction_; 5929 BoxIntegerInstr* instruction_;
5933 }; 5930 };
5934 5931
5935 5932
(...skipping 24 matching lines...) Expand all
5960 __ b(&not_smi); 5957 __ b(&not_smi);
5961 5958
5962 __ Bind(&maybe_neg_smi); 5959 __ Bind(&maybe_neg_smi);
5963 __ CompareImmediate(value_lo, 0); 5960 __ CompareImmediate(value_lo, 0);
5964 __ b(&not_smi, GE); 5961 __ b(&not_smi, GE);
5965 __ CompareImmediate(value_lo, kSmiMin); 5962 __ CompareImmediate(value_lo, kSmiMin);
5966 __ b(&not_smi, LT); 5963 __ b(&not_smi, LT);
5967 5964
5968 // lo is a Smi. Tag it and return. 5965 // lo is a Smi. Tag it and return.
5969 __ Bind(&is_smi); 5966 __ Bind(&is_smi);
5970 __ mov(out_reg, ShifterOperand(value_lo)); 5967 __ mov(out_reg, Operand(value_lo));
5971 __ SmiTag(out_reg); 5968 __ SmiTag(out_reg);
5972 __ b(&done); 5969 __ b(&done);
5973 5970
5974 // Not a smi. Box it. 5971 // Not a smi. Box it.
5975 __ Bind(&not_smi); 5972 __ Bind(&not_smi);
5976 __ TryAllocate( 5973 __ TryAllocate(
5977 Class::ZoneHandle(Isolate::Current()->object_store()->mint_class()), 5974 Class::ZoneHandle(Isolate::Current()->object_store()->mint_class()),
5978 slow_path->entry_label(), 5975 slow_path->entry_label(),
5979 out_reg, 5976 out_reg,
5980 tmp); 5977 tmp);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
6017 PairLocation* out_pair = locs()->out(0).AsPairLocation(); 6014 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6018 Register out_lo = out_pair->At(0).reg(); 6015 Register out_lo = out_pair->At(0).reg();
6019 Register out_hi = out_pair->At(1).reg(); 6016 Register out_hi = out_pair->At(1).reg();
6020 6017
6021 Label* deopt = NULL; 6018 Label* deopt = NULL;
6022 if (FLAG_throw_on_javascript_int_overflow) { 6019 if (FLAG_throw_on_javascript_int_overflow) {
6023 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); 6020 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
6024 } 6021 }
6025 switch (op_kind()) { 6022 switch (op_kind()) {
6026 case Token::kBIT_AND: { 6023 case Token::kBIT_AND: {
6027 __ and_(out_lo, left_lo, ShifterOperand(right_lo)); 6024 __ and_(out_lo, left_lo, Operand(right_lo));
6028 __ and_(out_hi, left_hi, ShifterOperand(right_hi)); 6025 __ and_(out_hi, left_hi, Operand(right_hi));
6029 } 6026 }
6030 break; 6027 break;
6031 case Token::kBIT_OR: { 6028 case Token::kBIT_OR: {
6032 __ orr(out_lo, left_lo, ShifterOperand(right_lo)); 6029 __ orr(out_lo, left_lo, Operand(right_lo));
6033 __ orr(out_hi, left_hi, ShifterOperand(right_hi)); 6030 __ orr(out_hi, left_hi, Operand(right_hi));
6034 } 6031 }
6035 break; 6032 break;
6036 case Token::kBIT_XOR: { 6033 case Token::kBIT_XOR: {
6037 __ eor(out_lo, left_lo, ShifterOperand(right_lo)); 6034 __ eor(out_lo, left_lo, Operand(right_lo));
6038 __ eor(out_hi, left_hi, ShifterOperand(right_hi)); 6035 __ eor(out_hi, left_hi, Operand(right_hi));
6039 } 6036 }
6040 break; 6037 break;
6041 case Token::kADD: 6038 case Token::kADD:
6042 case Token::kSUB: { 6039 case Token::kSUB: {
6043 if (!FLAG_throw_on_javascript_int_overflow) { 6040 if (!FLAG_throw_on_javascript_int_overflow) {
6044 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp); 6041 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryMintOp);
6045 } 6042 }
6046 if (op_kind() == Token::kADD) { 6043 if (op_kind() == Token::kADD) {
6047 __ adds(out_lo, left_lo, ShifterOperand(right_lo)); 6044 __ adds(out_lo, left_lo, Operand(right_lo));
6048 __ adcs(out_hi, left_hi, ShifterOperand(right_hi)); 6045 __ adcs(out_hi, left_hi, Operand(right_hi));
6049 } else { 6046 } else {
6050 ASSERT(op_kind() == Token::kSUB); 6047 ASSERT(op_kind() == Token::kSUB);
6051 __ subs(out_lo, left_lo, ShifterOperand(right_lo)); 6048 __ subs(out_lo, left_lo, Operand(right_lo));
6052 __ sbcs(out_hi, left_hi, ShifterOperand(right_hi)); 6049 __ sbcs(out_hi, left_hi, Operand(right_hi));
6053 } 6050 }
6054 // Deopt on overflow. 6051 // Deopt on overflow.
6055 __ b(deopt, VS); 6052 __ b(deopt, VS);
6056 break; 6053 break;
6057 } 6054 }
6058 default: 6055 default:
6059 UNREACHABLE(); 6056 UNREACHABLE();
6060 break; 6057 break;
6061 } 6058 }
6062 if (FLAG_throw_on_javascript_int_overflow) { 6059 if (FLAG_throw_on_javascript_int_overflow) {
(...skipping 26 matching lines...) Expand all
6089 PairLocation* out_pair = locs()->out(0).AsPairLocation(); 6086 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6090 Register out_lo = out_pair->At(0).reg(); 6087 Register out_lo = out_pair->At(0).reg();
6091 Register out_hi = out_pair->At(1).reg(); 6088 Register out_hi = out_pair->At(1).reg();
6092 Register temp = locs()->temp(0).reg(); 6089 Register temp = locs()->temp(0).reg();
6093 6090
6094 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp); 6091 Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptShiftMintOp);
6095 Label done; 6092 Label done;
6096 6093
6097 // Early out if shift is 0. 6094 // Early out if shift is 0.
6098 __ CompareImmediate(shift, 0); 6095 __ CompareImmediate(shift, 0);
6099 __ mov(out_lo, ShifterOperand(left_lo)); 6096 __ mov(out_lo, Operand(left_lo));
6100 __ mov(out_hi, ShifterOperand(left_hi)); 6097 __ mov(out_hi, Operand(left_hi));
6101 __ b(&done, EQ); 6098 __ b(&done, EQ);
6102 6099
6103 // Untag shift count. 6100 // Untag shift count.
6104 __ SmiUntag(shift); 6101 __ SmiUntag(shift);
6105 6102
6106 // Deopt if shift is negative. 6103 // Deopt if shift is negative.
6107 __ CompareImmediate(shift, 1); 6104 __ CompareImmediate(shift, 1);
6108 __ b(deopt, LT); 6105 __ b(deopt, LT);
6109 6106
6110 // Deopt if shift is larger than 63. 6107 // Deopt if shift is larger than 63.
6111 __ CompareImmediate(shift, 63); 6108 __ CompareImmediate(shift, 63);
6112 __ b(deopt, GT); 6109 __ b(deopt, GT);
6113 6110
6114 switch (op_kind()) { 6111 switch (op_kind()) {
6115 case Token::kSHR: { 6112 case Token::kSHR: {
6116 __ cmp(shift, ShifterOperand(32)); 6113 __ cmp(shift, Operand(32));
6117 6114
6118 __ mov(out_lo, ShifterOperand(out_hi), HI); 6115 __ mov(out_lo, Operand(out_hi), HI);
6119 __ Asr(out_hi, out_hi, 31, HI); 6116 __ Asr(out_hi, out_hi, 31, HI);
6120 __ sub(shift, shift, ShifterOperand(32), HI); 6117 __ sub(shift, shift, Operand(32), HI);
6121 6118
6122 __ rsb(temp, shift, ShifterOperand(32)); 6119 __ rsb(temp, shift, Operand(32));
6123 __ mov(temp, ShifterOperand(out_hi, LSL, temp)); 6120 __ mov(temp, Operand(out_hi, LSL, temp));
6124 __ orr(out_lo, temp, ShifterOperand(out_lo, LSR, shift)); 6121 __ orr(out_lo, temp, Operand(out_lo, LSR, shift));
6125 __ Asr(out_hi, out_hi, shift); 6122 __ Asr(out_hi, out_hi, shift);
6126 break; 6123 break;
6127 } 6124 }
6128 case Token::kSHL: { 6125 case Token::kSHL: {
6129 __ rsbs(temp, shift, ShifterOperand(32)); 6126 __ rsbs(temp, shift, Operand(32));
6130 __ sub(temp, shift, ShifterOperand(32), MI); 6127 __ sub(temp, shift, Operand(32), MI);
6131 __ mov(out_hi, ShifterOperand(out_lo, LSL, temp), MI); 6128 __ mov(out_hi, Operand(out_lo, LSL, temp), MI);
6132 __ mov(out_hi, ShifterOperand(out_hi, LSL, shift), PL); 6129 __ mov(out_hi, Operand(out_hi, LSL, shift), PL);
6133 __ orr(out_hi, out_hi, ShifterOperand(out_lo, LSR, temp), PL); 6130 __ orr(out_hi, out_hi, Operand(out_lo, LSR, temp), PL);
6134 __ mov(out_lo, ShifterOperand(out_lo, LSL, shift)); 6131 __ mov(out_lo, Operand(out_lo, LSL, shift));
6135 6132
6136 // Check for overflow. 6133 // Check for overflow.
6137 6134
6138 // Copy high word from output. 6135 // Copy high word from output.
6139 __ mov(temp, ShifterOperand(out_hi)); 6136 __ mov(temp, Operand(out_hi));
6140 // Shift copy right. 6137 // Shift copy right.
6141 __ Asr(temp, temp, shift); 6138 __ Asr(temp, temp, shift);
6142 // Compare with high word from input. 6139 // Compare with high word from input.
6143 __ cmp(temp, ShifterOperand(left_hi)); 6140 __ cmp(temp, Operand(left_hi));
6144 // Overflow if they aren't equal. 6141 // Overflow if they aren't equal.
6145 __ b(deopt, NE); 6142 __ b(deopt, NE);
6146 break; 6143 break;
6147 } 6144 }
6148 default: 6145 default:
6149 UNREACHABLE(); 6146 UNREACHABLE();
6150 break; 6147 break;
6151 } 6148 }
6152 6149
6153 __ Bind(&done); 6150 __ Bind(&done);
(...skipping 25 matching lines...) Expand all
6179 6176
6180 PairLocation* out_pair = locs()->out(0).AsPairLocation(); 6177 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6181 Register out_lo = out_pair->At(0).reg(); 6178 Register out_lo = out_pair->At(0).reg();
6182 Register out_hi = out_pair->At(1).reg(); 6179 Register out_hi = out_pair->At(1).reg();
6183 6180
6184 Label* deopt = NULL; 6181 Label* deopt = NULL;
6185 6182
6186 if (FLAG_throw_on_javascript_int_overflow) { 6183 if (FLAG_throw_on_javascript_int_overflow) {
6187 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp); 6184 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryMintOp);
6188 } 6185 }
6189 __ mvn(out_lo, ShifterOperand(left_lo)); 6186 __ mvn(out_lo, Operand(left_lo));
6190 __ mvn(out_hi, ShifterOperand(left_hi)); 6187 __ mvn(out_hi, Operand(left_hi));
6191 if (FLAG_throw_on_javascript_int_overflow) { 6188 if (FLAG_throw_on_javascript_int_overflow) {
6192 EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi); 6189 EmitJavascriptIntOverflowCheck(compiler, deopt, out_lo, out_hi);
6193 } 6190 }
6194 } 6191 }
6195 6192
6196 6193
6197 LocationSummary* ThrowInstr::MakeLocationSummary(Isolate* isolate, 6194 LocationSummary* ThrowInstr::MakeLocationSummary(Isolate* isolate,
6198 bool opt) const { 6195 bool opt) const {
6199 return new(isolate) LocationSummary(isolate, 0, 0, LocationSummary::kCall); 6196 return new(isolate) LocationSummary(isolate, 0, 0, LocationSummary::kCall);
6200 } 6197 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
6289 LocationSummary* CurrentContextInstr::MakeLocationSummary(Isolate* isolate, 6286 LocationSummary* CurrentContextInstr::MakeLocationSummary(Isolate* isolate,
6290 bool opt) const { 6287 bool opt) const {
6291 return LocationSummary::Make(isolate, 6288 return LocationSummary::Make(isolate,
6292 0, 6289 0,
6293 Location::RequiresRegister(), 6290 Location::RequiresRegister(),
6294 LocationSummary::kNoCall); 6291 LocationSummary::kNoCall);
6295 } 6292 }
6296 6293
6297 6294
6298 void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6295 void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6299 __ mov(locs()->out(0).reg(), ShifterOperand(CTX)); 6296 __ mov(locs()->out(0).reg(), Operand(CTX));
6300 } 6297 }
6301 6298
6302 6299
6303 LocationSummary* StrictCompareInstr::MakeLocationSummary(Isolate* isolate, 6300 LocationSummary* StrictCompareInstr::MakeLocationSummary(Isolate* isolate,
6304 bool opt) const { 6301 bool opt) const {
6305 const intptr_t kNumInputs = 2; 6302 const intptr_t kNumInputs = 2;
6306 const intptr_t kNumTemps = 0; 6303 const intptr_t kNumTemps = 0;
6307 if (needs_number_check()) { 6304 if (needs_number_check()) {
6308 LocationSummary* locs = new(isolate) LocationSummary( 6305 LocationSummary* locs = new(isolate) LocationSummary(
6309 isolate, kNumInputs, kNumTemps, LocationSummary::kCall); 6306 isolate, kNumInputs, kNumTemps, LocationSummary::kCall);
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
6381 Location::RequiresRegister(), 6378 Location::RequiresRegister(),
6382 LocationSummary::kNoCall); 6379 LocationSummary::kNoCall);
6383 } 6380 }
6384 6381
6385 6382
6386 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6383 void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6387 const Register value = locs()->in(0).reg(); 6384 const Register value = locs()->in(0).reg();
6388 const Register result = locs()->out(0).reg(); 6385 const Register result = locs()->out(0).reg();
6389 6386
6390 __ LoadObject(result, Bool::True()); 6387 __ LoadObject(result, Bool::True());
6391 __ cmp(result, ShifterOperand(value)); 6388 __ cmp(result, Operand(value));
6392 __ LoadObject(result, Bool::False(), EQ); 6389 __ LoadObject(result, Bool::False(), EQ);
6393 } 6390 }
6394 6391
6395 6392
6396 LocationSummary* AllocateObjectInstr::MakeLocationSummary(Isolate* isolate, 6393 LocationSummary* AllocateObjectInstr::MakeLocationSummary(Isolate* isolate,
6397 bool opt) const { 6394 bool opt) const {
6398 return MakeCallSummary(); 6395 return MakeCallSummary();
6399 } 6396 }
6400 6397
6401 6398
6402 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) { 6399 void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6403 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls())); 6400 const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls()));
6404 const ExternalLabel label(stub.EntryPoint()); 6401 const ExternalLabel label(stub.EntryPoint());
6405 compiler->GenerateCall(token_pos(), 6402 compiler->GenerateCall(token_pos(),
6406 &label, 6403 &label,
6407 PcDescriptors::kOther, 6404 PcDescriptors::kOther,
6408 locs()); 6405 locs());
6409 __ Drop(ArgumentCount()); // Discard arguments. 6406 __ Drop(ArgumentCount()); // Discard arguments.
6410 } 6407 }
6411 6408
6412 } // namespace dart 6409 } // namespace dart
6413 6410
6414 #endif // defined TARGET_ARCH_ARM 6411 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/instructions_arm.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698