Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/regexp-macro-assembler-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #if V8_TARGET_ARCH_MIPS64 9 #if V8_TARGET_ARCH_MIPS64
10 10
(...skipping 14 matching lines...) Expand all
25 if (isolate() != NULL) { 25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
27 isolate()); 27 isolate());
28 } 28 }
29 } 29 }
30 30
31 31
32 void MacroAssembler::Load(Register dst, 32 void MacroAssembler::Load(Register dst,
33 const MemOperand& src, 33 const MemOperand& src,
34 Representation r) { 34 Representation r) {
35 ASSERT(!r.IsDouble()); 35 DCHECK(!r.IsDouble());
36 if (r.IsInteger8()) { 36 if (r.IsInteger8()) {
37 lb(dst, src); 37 lb(dst, src);
38 } else if (r.IsUInteger8()) { 38 } else if (r.IsUInteger8()) {
39 lbu(dst, src); 39 lbu(dst, src);
40 } else if (r.IsInteger16()) { 40 } else if (r.IsInteger16()) {
41 lh(dst, src); 41 lh(dst, src);
42 } else if (r.IsUInteger16()) { 42 } else if (r.IsUInteger16()) {
43 lhu(dst, src); 43 lhu(dst, src);
44 } else if (r.IsInteger32()) { 44 } else if (r.IsInteger32()) {
45 lw(dst, src); 45 lw(dst, src);
46 } else { 46 } else {
47 ld(dst, src); 47 ld(dst, src);
48 } 48 }
49 } 49 }
50 50
51 51
52 void MacroAssembler::Store(Register src, 52 void MacroAssembler::Store(Register src,
53 const MemOperand& dst, 53 const MemOperand& dst,
54 Representation r) { 54 Representation r) {
55 ASSERT(!r.IsDouble()); 55 DCHECK(!r.IsDouble());
56 if (r.IsInteger8() || r.IsUInteger8()) { 56 if (r.IsInteger8() || r.IsUInteger8()) {
57 sb(src, dst); 57 sb(src, dst);
58 } else if (r.IsInteger16() || r.IsUInteger16()) { 58 } else if (r.IsInteger16() || r.IsUInteger16()) {
59 sh(src, dst); 59 sh(src, dst);
60 } else if (r.IsInteger32()) { 60 } else if (r.IsInteger32()) {
61 sw(src, dst); 61 sw(src, dst);
62 } else { 62 } else {
63 if (r.IsHeapObject()) { 63 if (r.IsHeapObject()) {
64 AssertNotSmi(src); 64 AssertNotSmi(src);
65 } else if (r.IsSmi()) { 65 } else if (r.IsSmi()) {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
98 Branch(2, NegateCondition(cond), src1, src2); 98 Branch(2, NegateCondition(cond), src1, src2);
99 sd(source, MemOperand(s6, index << kPointerSizeLog2)); 99 sd(source, MemOperand(s6, index << kPointerSizeLog2));
100 } 100 }
101 101
102 102
103 // Push and pop all registers that can hold pointers. 103 // Push and pop all registers that can hold pointers.
104 void MacroAssembler::PushSafepointRegisters() { 104 void MacroAssembler::PushSafepointRegisters() {
105 // Safepoints expect a block of kNumSafepointRegisters values on the 105 // Safepoints expect a block of kNumSafepointRegisters values on the
106 // stack, so adjust the stack for unsaved registers. 106 // stack, so adjust the stack for unsaved registers.
107 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 107 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
108 ASSERT(num_unsaved >= 0); 108 DCHECK(num_unsaved >= 0);
109 if (num_unsaved > 0) { 109 if (num_unsaved > 0) {
110 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize)); 110 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
111 } 111 }
112 MultiPush(kSafepointSavedRegisters); 112 MultiPush(kSafepointSavedRegisters);
113 } 113 }
114 114
115 115
116 void MacroAssembler::PopSafepointRegisters() { 116 void MacroAssembler::PopSafepointRegisters() {
117 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 117 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
118 MultiPop(kSafepointSavedRegisters); 118 MultiPop(kSafepointSavedRegisters);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
150 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize; 150 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
151 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 151 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
152 return MemOperand(sp, doubles_size + register_offset); 152 return MemOperand(sp, doubles_size + register_offset);
153 } 153 }
154 154
155 155
156 void MacroAssembler::InNewSpace(Register object, 156 void MacroAssembler::InNewSpace(Register object,
157 Register scratch, 157 Register scratch,
158 Condition cc, 158 Condition cc,
159 Label* branch) { 159 Label* branch) {
160 ASSERT(cc == eq || cc == ne); 160 DCHECK(cc == eq || cc == ne);
161 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); 161 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
162 Branch(branch, cc, scratch, 162 Branch(branch, cc, scratch,
163 Operand(ExternalReference::new_space_start(isolate()))); 163 Operand(ExternalReference::new_space_start(isolate())));
164 } 164 }
165 165
166 166
167 void MacroAssembler::RecordWriteField( 167 void MacroAssembler::RecordWriteField(
168 Register object, 168 Register object,
169 int offset, 169 int offset,
170 Register value, 170 Register value,
171 Register dst, 171 Register dst,
172 RAStatus ra_status, 172 RAStatus ra_status,
173 SaveFPRegsMode save_fp, 173 SaveFPRegsMode save_fp,
174 RememberedSetAction remembered_set_action, 174 RememberedSetAction remembered_set_action,
175 SmiCheck smi_check, 175 SmiCheck smi_check,
176 PointersToHereCheck pointers_to_here_check_for_value) { 176 PointersToHereCheck pointers_to_here_check_for_value) {
177 ASSERT(!AreAliased(value, dst, t8, object)); 177 DCHECK(!AreAliased(value, dst, t8, object));
178 // First, check if a write barrier is even needed. The tests below 178 // First, check if a write barrier is even needed. The tests below
179 // catch stores of Smis. 179 // catch stores of Smis.
180 Label done; 180 Label done;
181 181
182 // Skip barrier if writing a smi. 182 // Skip barrier if writing a smi.
183 if (smi_check == INLINE_SMI_CHECK) { 183 if (smi_check == INLINE_SMI_CHECK) {
184 JumpIfSmi(value, &done); 184 JumpIfSmi(value, &done);
185 } 185 }
186 186
187 // Although the object register is tagged, the offset is relative to the start 187 // Although the object register is tagged, the offset is relative to the start
188 // of the object, so so offset must be a multiple of kPointerSize. 188 // of the object, so so offset must be a multiple of kPointerSize.
189 ASSERT(IsAligned(offset, kPointerSize)); 189 DCHECK(IsAligned(offset, kPointerSize));
190 190
191 Daddu(dst, object, Operand(offset - kHeapObjectTag)); 191 Daddu(dst, object, Operand(offset - kHeapObjectTag));
192 if (emit_debug_code()) { 192 if (emit_debug_code()) {
193 Label ok; 193 Label ok;
194 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); 194 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
195 Branch(&ok, eq, t8, Operand(zero_reg)); 195 Branch(&ok, eq, t8, Operand(zero_reg));
196 stop("Unaligned cell in write barrier"); 196 stop("Unaligned cell in write barrier");
197 bind(&ok); 197 bind(&ok);
198 } 198 }
199 199
(...skipping 18 matching lines...) Expand all
218 218
219 219
220 // Will clobber 4 registers: object, map, dst, ip. The 220 // Will clobber 4 registers: object, map, dst, ip. The
221 // register 'object' contains a heap object pointer. 221 // register 'object' contains a heap object pointer.
222 void MacroAssembler::RecordWriteForMap(Register object, 222 void MacroAssembler::RecordWriteForMap(Register object,
223 Register map, 223 Register map,
224 Register dst, 224 Register dst,
225 RAStatus ra_status, 225 RAStatus ra_status,
226 SaveFPRegsMode fp_mode) { 226 SaveFPRegsMode fp_mode) {
227 if (emit_debug_code()) { 227 if (emit_debug_code()) {
228 ASSERT(!dst.is(at)); 228 DCHECK(!dst.is(at));
229 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset)); 229 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
230 Check(eq, 230 Check(eq,
231 kWrongAddressOrValuePassedToRecordWrite, 231 kWrongAddressOrValuePassedToRecordWrite,
232 dst, 232 dst,
233 Operand(isolate()->factory()->meta_map())); 233 Operand(isolate()->factory()->meta_map()));
234 } 234 }
235 235
236 if (!FLAG_incremental_marking) { 236 if (!FLAG_incremental_marking) {
237 return; 237 return;
238 } 238 }
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
297 // tag is shifted away. 297 // tag is shifted away.
298 void MacroAssembler::RecordWrite( 298 void MacroAssembler::RecordWrite(
299 Register object, 299 Register object,
300 Register address, 300 Register address,
301 Register value, 301 Register value,
302 RAStatus ra_status, 302 RAStatus ra_status,
303 SaveFPRegsMode fp_mode, 303 SaveFPRegsMode fp_mode,
304 RememberedSetAction remembered_set_action, 304 RememberedSetAction remembered_set_action,
305 SmiCheck smi_check, 305 SmiCheck smi_check,
306 PointersToHereCheck pointers_to_here_check_for_value) { 306 PointersToHereCheck pointers_to_here_check_for_value) {
307 ASSERT(!AreAliased(object, address, value, t8)); 307 DCHECK(!AreAliased(object, address, value, t8));
308 ASSERT(!AreAliased(object, address, value, t9)); 308 DCHECK(!AreAliased(object, address, value, t9));
309 309
310 if (emit_debug_code()) { 310 if (emit_debug_code()) {
311 ld(at, MemOperand(address)); 311 ld(at, MemOperand(address));
312 Assert( 312 Assert(
313 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); 313 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
314 } 314 }
315 315
316 if (remembered_set_action == OMIT_REMEMBERED_SET && 316 if (remembered_set_action == OMIT_REMEMBERED_SET &&
317 !FLAG_incremental_marking) { 317 !FLAG_incremental_marking) {
318 return; 318 return;
319 } 319 }
320 320
321 // First, check if a write barrier is even needed. The tests below 321 // First, check if a write barrier is even needed. The tests below
322 // catch stores of smis and stores into the young generation. 322 // catch stores of smis and stores into the young generation.
323 Label done; 323 Label done;
324 324
325 if (smi_check == INLINE_SMI_CHECK) { 325 if (smi_check == INLINE_SMI_CHECK) {
326 ASSERT_EQ(0, kSmiTag); 326 DCHECK_EQ(0, kSmiTag);
327 JumpIfSmi(value, &done); 327 JumpIfSmi(value, &done);
328 } 328 }
329 329
330 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { 330 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
331 CheckPageFlag(value, 331 CheckPageFlag(value,
332 value, // Used as scratch. 332 value, // Used as scratch.
333 MemoryChunk::kPointersToHereAreInterestingMask, 333 MemoryChunk::kPointersToHereAreInterestingMask,
334 eq, 334 eq,
335 &done); 335 &done);
336 } 336 }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
385 li(t8, Operand(store_buffer)); 385 li(t8, Operand(store_buffer));
386 ld(scratch, MemOperand(t8)); 386 ld(scratch, MemOperand(t8));
387 // Store pointer to buffer and increment buffer top. 387 // Store pointer to buffer and increment buffer top.
388 sd(address, MemOperand(scratch)); 388 sd(address, MemOperand(scratch));
389 Daddu(scratch, scratch, kPointerSize); 389 Daddu(scratch, scratch, kPointerSize);
390 // Write back new top of buffer. 390 // Write back new top of buffer.
391 sd(scratch, MemOperand(t8)); 391 sd(scratch, MemOperand(t8));
392 // Call stub on end of buffer. 392 // Call stub on end of buffer.
393 // Check for end of buffer. 393 // Check for end of buffer.
394 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); 394 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
395 ASSERT(!scratch.is(t8)); 395 DCHECK(!scratch.is(t8));
396 if (and_then == kFallThroughAtEnd) { 396 if (and_then == kFallThroughAtEnd) {
397 Branch(&done, eq, t8, Operand(zero_reg)); 397 Branch(&done, eq, t8, Operand(zero_reg));
398 } else { 398 } else {
399 ASSERT(and_then == kReturnAtEnd); 399 DCHECK(and_then == kReturnAtEnd);
400 Ret(eq, t8, Operand(zero_reg)); 400 Ret(eq, t8, Operand(zero_reg));
401 } 401 }
402 push(ra); 402 push(ra);
403 StoreBufferOverflowStub store_buffer_overflow = 403 StoreBufferOverflowStub store_buffer_overflow =
404 StoreBufferOverflowStub(isolate(), fp_mode); 404 StoreBufferOverflowStub(isolate(), fp_mode);
405 CallStub(&store_buffer_overflow); 405 CallStub(&store_buffer_overflow);
406 pop(ra); 406 pop(ra);
407 bind(&done); 407 bind(&done);
408 if (and_then == kReturnAtEnd) { 408 if (and_then == kReturnAtEnd) {
409 Ret(); 409 Ret();
410 } 410 }
411 } 411 }
412 412
413 413
414 // ----------------------------------------------------------------------------- 414 // -----------------------------------------------------------------------------
415 // Allocation support. 415 // Allocation support.
416 416
417 417
418 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 418 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
419 Register scratch, 419 Register scratch,
420 Label* miss) { 420 Label* miss) {
421 Label same_contexts; 421 Label same_contexts;
422 422
423 ASSERT(!holder_reg.is(scratch)); 423 DCHECK(!holder_reg.is(scratch));
424 ASSERT(!holder_reg.is(at)); 424 DCHECK(!holder_reg.is(at));
425 ASSERT(!scratch.is(at)); 425 DCHECK(!scratch.is(at));
426 426
427 // Load current lexical context from the stack frame. 427 // Load current lexical context from the stack frame.
428 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 428 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
429 // In debug mode, make sure the lexical context is set. 429 // In debug mode, make sure the lexical context is set.
430 #ifdef DEBUG 430 #ifdef DEBUG
431 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext, 431 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
432 scratch, Operand(zero_reg)); 432 scratch, Operand(zero_reg));
433 #endif 433 #endif
434 434
435 // Load the native context of the current context. 435 // Load the native context of the current context.
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
571 for (int i = 0; i < kNumberDictionaryProbes; i++) { 571 for (int i = 0; i < kNumberDictionaryProbes; i++) {
572 // Use reg2 for index calculations and keep the hash intact in reg0. 572 // Use reg2 for index calculations and keep the hash intact in reg0.
573 mov(reg2, reg0); 573 mov(reg2, reg0);
574 // Compute the masked index: (hash + i + i * i) & mask. 574 // Compute the masked index: (hash + i + i * i) & mask.
575 if (i > 0) { 575 if (i > 0) {
576 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i))); 576 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
577 } 577 }
578 and_(reg2, reg2, reg1); 578 and_(reg2, reg2, reg1);
579 579
580 // Scale the index by multiplying by the element size. 580 // Scale the index by multiplying by the element size.
581 ASSERT(SeededNumberDictionary::kEntrySize == 3); 581 DCHECK(SeededNumberDictionary::kEntrySize == 3);
582 dsll(at, reg2, 1); // 2x. 582 dsll(at, reg2, 1); // 2x.
583 daddu(reg2, reg2, at); // reg2 = reg2 * 3. 583 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
584 584
585 // Check if the key is identical to the name. 585 // Check if the key is identical to the name.
586 dsll(at, reg2, kPointerSizeLog2); 586 dsll(at, reg2, kPointerSizeLog2);
587 daddu(reg2, elements, at); 587 daddu(reg2, elements, at);
588 588
589 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset)); 589 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
590 if (i != kNumberDictionaryProbes - 1) { 590 if (i != kNumberDictionaryProbes - 1) {
591 Branch(&done, eq, key, Operand(at)); 591 Branch(&done, eq, key, Operand(at));
(...skipping 22 matching lines...) Expand all
614 // Instruction macros. 614 // Instruction macros.
615 615
616 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 616 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
617 if (rt.is_reg()) { 617 if (rt.is_reg()) {
618 addu(rd, rs, rt.rm()); 618 addu(rd, rs, rt.rm());
619 } else { 619 } else {
620 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 620 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
621 addiu(rd, rs, rt.imm64_); 621 addiu(rd, rs, rt.imm64_);
622 } else { 622 } else {
623 // li handles the relocation. 623 // li handles the relocation.
624 ASSERT(!rs.is(at)); 624 DCHECK(!rs.is(at));
625 li(at, rt); 625 li(at, rt);
626 addu(rd, rs, at); 626 addu(rd, rs, at);
627 } 627 }
628 } 628 }
629 } 629 }
630 630
631 631
632 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) { 632 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
633 if (rt.is_reg()) { 633 if (rt.is_reg()) {
634 daddu(rd, rs, rt.rm()); 634 daddu(rd, rs, rt.rm());
635 } else { 635 } else {
636 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 636 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
637 daddiu(rd, rs, rt.imm64_); 637 daddiu(rd, rs, rt.imm64_);
638 } else { 638 } else {
639 // li handles the relocation. 639 // li handles the relocation.
640 ASSERT(!rs.is(at)); 640 DCHECK(!rs.is(at));
641 li(at, rt); 641 li(at, rt);
642 daddu(rd, rs, at); 642 daddu(rd, rs, at);
643 } 643 }
644 } 644 }
645 } 645 }
646 646
647 647
648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
649 if (rt.is_reg()) { 649 if (rt.is_reg()) {
650 subu(rd, rs, rt.rm()); 650 subu(rd, rs, rt.rm());
651 } else { 651 } else {
652 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 652 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
653 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). 653 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
654 } else { 654 } else {
655 // li handles the relocation. 655 // li handles the relocation.
656 ASSERT(!rs.is(at)); 656 DCHECK(!rs.is(at));
657 li(at, rt); 657 li(at, rt);
658 subu(rd, rs, at); 658 subu(rd, rs, at);
659 } 659 }
660 } 660 }
661 } 661 }
662 662
663 663
664 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { 664 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
665 if (rt.is_reg()) { 665 if (rt.is_reg()) {
666 dsubu(rd, rs, rt.rm()); 666 dsubu(rd, rs, rt.rm());
667 } else { 667 } else {
668 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 668 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
669 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm). 669 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
670 } else { 670 } else {
671 // li handles the relocation. 671 // li handles the relocation.
672 ASSERT(!rs.is(at)); 672 DCHECK(!rs.is(at));
673 li(at, rt); 673 li(at, rt);
674 dsubu(rd, rs, at); 674 dsubu(rd, rs, at);
675 } 675 }
676 } 676 }
677 } 677 }
678 678
679 679
680 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 680 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
681 if (rt.is_reg()) { 681 if (rt.is_reg()) {
682 mul(rd, rs, rt.rm()); 682 mul(rd, rs, rt.rm());
683 } else { 683 } else {
684 // li handles the relocation. 684 // li handles the relocation.
685 ASSERT(!rs.is(at)); 685 DCHECK(!rs.is(at));
686 li(at, rt); 686 li(at, rt);
687 mul(rd, rs, at); 687 mul(rd, rs, at);
688 } 688 }
689 } 689 }
690 690
691 691
692 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { 692 void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
693 if (rt.is_reg()) { 693 if (rt.is_reg()) {
694 if (kArchVariant != kMips64r6) { 694 if (kArchVariant != kMips64r6) {
695 mult(rs, rt.rm()); 695 mult(rs, rt.rm());
696 mfhi(rd); 696 mfhi(rd);
697 } else { 697 } else {
698 muh(rd, rs, rt.rm()); 698 muh(rd, rs, rt.rm());
699 } 699 }
700 } else { 700 } else {
701 // li handles the relocation. 701 // li handles the relocation.
702 ASSERT(!rs.is(at)); 702 DCHECK(!rs.is(at));
703 li(at, rt); 703 li(at, rt);
704 if (kArchVariant != kMips64r6) { 704 if (kArchVariant != kMips64r6) {
705 mult(rs, at); 705 mult(rs, at);
706 mfhi(rd); 706 mfhi(rd);
707 } else { 707 } else {
708 muh(rd, rs, at); 708 muh(rd, rs, at);
709 } 709 }
710 } 710 }
711 } 711 }
712 712
713 713
714 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { 714 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
715 if (rt.is_reg()) { 715 if (rt.is_reg()) {
716 if (kArchVariant == kMips64r6) { 716 if (kArchVariant == kMips64r6) {
717 dmul(rd, rs, rt.rm()); 717 dmul(rd, rs, rt.rm());
718 } else { 718 } else {
719 dmult(rs, rt.rm()); 719 dmult(rs, rt.rm());
720 mflo(rd); 720 mflo(rd);
721 } 721 }
722 } else { 722 } else {
723 // li handles the relocation. 723 // li handles the relocation.
724 ASSERT(!rs.is(at)); 724 DCHECK(!rs.is(at));
725 li(at, rt); 725 li(at, rt);
726 if (kArchVariant == kMips64r6) { 726 if (kArchVariant == kMips64r6) {
727 dmul(rd, rs, at); 727 dmul(rd, rs, at);
728 } else { 728 } else {
729 dmult(rs, at); 729 dmult(rs, at);
730 mflo(rd); 730 mflo(rd);
731 } 731 }
732 } 732 }
733 } 733 }
734 734
735 735
736 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { 736 void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
737 if (rt.is_reg()) { 737 if (rt.is_reg()) {
738 if (kArchVariant == kMips64r6) { 738 if (kArchVariant == kMips64r6) {
739 dmuh(rd, rs, rt.rm()); 739 dmuh(rd, rs, rt.rm());
740 } else { 740 } else {
741 dmult(rs, rt.rm()); 741 dmult(rs, rt.rm());
742 mfhi(rd); 742 mfhi(rd);
743 } 743 }
744 } else { 744 } else {
745 // li handles the relocation. 745 // li handles the relocation.
746 ASSERT(!rs.is(at)); 746 DCHECK(!rs.is(at));
747 li(at, rt); 747 li(at, rt);
748 if (kArchVariant == kMips64r6) { 748 if (kArchVariant == kMips64r6) {
749 dmuh(rd, rs, at); 749 dmuh(rd, rs, at);
750 } else { 750 } else {
751 dmult(rs, at); 751 dmult(rs, at);
752 mfhi(rd); 752 mfhi(rd);
753 } 753 }
754 } 754 }
755 } 755 }
756 756
757 757
758 void MacroAssembler::Mult(Register rs, const Operand& rt) { 758 void MacroAssembler::Mult(Register rs, const Operand& rt) {
759 if (rt.is_reg()) { 759 if (rt.is_reg()) {
760 mult(rs, rt.rm()); 760 mult(rs, rt.rm());
761 } else { 761 } else {
762 // li handles the relocation. 762 // li handles the relocation.
763 ASSERT(!rs.is(at)); 763 DCHECK(!rs.is(at));
764 li(at, rt); 764 li(at, rt);
765 mult(rs, at); 765 mult(rs, at);
766 } 766 }
767 } 767 }
768 768
769 769
770 void MacroAssembler::Dmult(Register rs, const Operand& rt) { 770 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
771 if (rt.is_reg()) { 771 if (rt.is_reg()) {
772 dmult(rs, rt.rm()); 772 dmult(rs, rt.rm());
773 } else { 773 } else {
774 // li handles the relocation. 774 // li handles the relocation.
775 ASSERT(!rs.is(at)); 775 DCHECK(!rs.is(at));
776 li(at, rt); 776 li(at, rt);
777 dmult(rs, at); 777 dmult(rs, at);
778 } 778 }
779 } 779 }
780 780
781 781
782 void MacroAssembler::Multu(Register rs, const Operand& rt) { 782 void MacroAssembler::Multu(Register rs, const Operand& rt) {
783 if (rt.is_reg()) { 783 if (rt.is_reg()) {
784 multu(rs, rt.rm()); 784 multu(rs, rt.rm());
785 } else { 785 } else {
786 // li handles the relocation. 786 // li handles the relocation.
787 ASSERT(!rs.is(at)); 787 DCHECK(!rs.is(at));
788 li(at, rt); 788 li(at, rt);
789 multu(rs, at); 789 multu(rs, at);
790 } 790 }
791 } 791 }
792 792
793 793
794 void MacroAssembler::Dmultu(Register rs, const Operand& rt) { 794 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
795 if (rt.is_reg()) { 795 if (rt.is_reg()) {
796 dmultu(rs, rt.rm()); 796 dmultu(rs, rt.rm());
797 } else { 797 } else {
798 // li handles the relocation. 798 // li handles the relocation.
799 ASSERT(!rs.is(at)); 799 DCHECK(!rs.is(at));
800 li(at, rt); 800 li(at, rt);
801 dmultu(rs, at); 801 dmultu(rs, at);
802 } 802 }
803 } 803 }
804 804
805 805
806 void MacroAssembler::Div(Register rs, const Operand& rt) { 806 void MacroAssembler::Div(Register rs, const Operand& rt) {
807 if (rt.is_reg()) { 807 if (rt.is_reg()) {
808 div(rs, rt.rm()); 808 div(rs, rt.rm());
809 } else { 809 } else {
810 // li handles the relocation. 810 // li handles the relocation.
811 ASSERT(!rs.is(at)); 811 DCHECK(!rs.is(at));
812 li(at, rt); 812 li(at, rt);
813 div(rs, at); 813 div(rs, at);
814 } 814 }
815 } 815 }
816 816
817 817
818 void MacroAssembler::Ddiv(Register rs, const Operand& rt) { 818 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
819 if (rt.is_reg()) { 819 if (rt.is_reg()) {
820 ddiv(rs, rt.rm()); 820 ddiv(rs, rt.rm());
821 } else { 821 } else {
822 // li handles the relocation. 822 // li handles the relocation.
823 ASSERT(!rs.is(at)); 823 DCHECK(!rs.is(at));
824 li(at, rt); 824 li(at, rt);
825 ddiv(rs, at); 825 ddiv(rs, at);
826 } 826 }
827 } 827 }
828 828
829 829
830 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { 830 void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
831 if (kArchVariant != kMips64r6) { 831 if (kArchVariant != kMips64r6) {
832 if (rt.is_reg()) { 832 if (rt.is_reg()) {
833 ddiv(rs, rt.rm()); 833 ddiv(rs, rt.rm());
834 mflo(rd); 834 mflo(rd);
835 } else { 835 } else {
836 // li handles the relocation. 836 // li handles the relocation.
837 ASSERT(!rs.is(at)); 837 DCHECK(!rs.is(at));
838 li(at, rt); 838 li(at, rt);
839 ddiv(rs, at); 839 ddiv(rs, at);
840 mflo(rd); 840 mflo(rd);
841 } 841 }
842 } else { 842 } else {
843 if (rt.is_reg()) { 843 if (rt.is_reg()) {
844 ddiv(rd, rs, rt.rm()); 844 ddiv(rd, rs, rt.rm());
845 } else { 845 } else {
846 // li handles the relocation. 846 // li handles the relocation.
847 ASSERT(!rs.is(at)); 847 DCHECK(!rs.is(at));
848 li(at, rt); 848 li(at, rt);
849 ddiv(rd, rs, at); 849 ddiv(rd, rs, at);
850 } 850 }
851 } 851 }
852 } 852 }
853 853
854 854
855 void MacroAssembler::Divu(Register rs, const Operand& rt) { 855 void MacroAssembler::Divu(Register rs, const Operand& rt) {
856 if (rt.is_reg()) { 856 if (rt.is_reg()) {
857 divu(rs, rt.rm()); 857 divu(rs, rt.rm());
858 } else { 858 } else {
859 // li handles the relocation. 859 // li handles the relocation.
860 ASSERT(!rs.is(at)); 860 DCHECK(!rs.is(at));
861 li(at, rt); 861 li(at, rt);
862 divu(rs, at); 862 divu(rs, at);
863 } 863 }
864 } 864 }
865 865
866 866
867 void MacroAssembler::Ddivu(Register rs, const Operand& rt) { 867 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
868 if (rt.is_reg()) { 868 if (rt.is_reg()) {
869 ddivu(rs, rt.rm()); 869 ddivu(rs, rt.rm());
870 } else { 870 } else {
871 // li handles the relocation. 871 // li handles the relocation.
872 ASSERT(!rs.is(at)); 872 DCHECK(!rs.is(at));
873 li(at, rt); 873 li(at, rt);
874 ddivu(rs, at); 874 ddivu(rs, at);
875 } 875 }
876 } 876 }
877 877
878 878
879 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { 879 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
880 if (kArchVariant != kMips64r6) { 880 if (kArchVariant != kMips64r6) {
881 if (rt.is_reg()) { 881 if (rt.is_reg()) {
882 ddiv(rs, rt.rm()); 882 ddiv(rs, rt.rm());
883 mfhi(rd); 883 mfhi(rd);
884 } else { 884 } else {
885 // li handles the relocation. 885 // li handles the relocation.
886 ASSERT(!rs.is(at)); 886 DCHECK(!rs.is(at));
887 li(at, rt); 887 li(at, rt);
888 ddiv(rs, at); 888 ddiv(rs, at);
889 mfhi(rd); 889 mfhi(rd);
890 } 890 }
891 } else { 891 } else {
892 if (rt.is_reg()) { 892 if (rt.is_reg()) {
893 dmod(rd, rs, rt.rm()); 893 dmod(rd, rs, rt.rm());
894 } else { 894 } else {
895 // li handles the relocation. 895 // li handles the relocation.
896 ASSERT(!rs.is(at)); 896 DCHECK(!rs.is(at));
897 li(at, rt); 897 li(at, rt);
898 dmod(rd, rs, at); 898 dmod(rd, rs, at);
899 } 899 }
900 } 900 }
901 } 901 }
902 902
903 903
904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 904 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
905 if (rt.is_reg()) { 905 if (rt.is_reg()) {
906 and_(rd, rs, rt.rm()); 906 and_(rd, rs, rt.rm());
907 } else { 907 } else {
908 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 908 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
909 andi(rd, rs, rt.imm64_); 909 andi(rd, rs, rt.imm64_);
910 } else { 910 } else {
911 // li handles the relocation. 911 // li handles the relocation.
912 ASSERT(!rs.is(at)); 912 DCHECK(!rs.is(at));
913 li(at, rt); 913 li(at, rt);
914 and_(rd, rs, at); 914 and_(rd, rs, at);
915 } 915 }
916 } 916 }
917 } 917 }
918 918
919 919
920 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 920 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
921 if (rt.is_reg()) { 921 if (rt.is_reg()) {
922 or_(rd, rs, rt.rm()); 922 or_(rd, rs, rt.rm());
923 } else { 923 } else {
924 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 924 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
925 ori(rd, rs, rt.imm64_); 925 ori(rd, rs, rt.imm64_);
926 } else { 926 } else {
927 // li handles the relocation. 927 // li handles the relocation.
928 ASSERT(!rs.is(at)); 928 DCHECK(!rs.is(at));
929 li(at, rt); 929 li(at, rt);
930 or_(rd, rs, at); 930 or_(rd, rs, at);
931 } 931 }
932 } 932 }
933 } 933 }
934 934
935 935
936 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 936 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
937 if (rt.is_reg()) { 937 if (rt.is_reg()) {
938 xor_(rd, rs, rt.rm()); 938 xor_(rd, rs, rt.rm());
939 } else { 939 } else {
940 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 940 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
941 xori(rd, rs, rt.imm64_); 941 xori(rd, rs, rt.imm64_);
942 } else { 942 } else {
943 // li handles the relocation. 943 // li handles the relocation.
944 ASSERT(!rs.is(at)); 944 DCHECK(!rs.is(at));
945 li(at, rt); 945 li(at, rt);
946 xor_(rd, rs, at); 946 xor_(rd, rs, at);
947 } 947 }
948 } 948 }
949 } 949 }
950 950
951 951
952 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { 952 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
953 if (rt.is_reg()) { 953 if (rt.is_reg()) {
954 nor(rd, rs, rt.rm()); 954 nor(rd, rs, rt.rm());
955 } else { 955 } else {
956 // li handles the relocation. 956 // li handles the relocation.
957 ASSERT(!rs.is(at)); 957 DCHECK(!rs.is(at));
958 li(at, rt); 958 li(at, rt);
959 nor(rd, rs, at); 959 nor(rd, rs, at);
960 } 960 }
961 } 961 }
962 962
963 963
964 void MacroAssembler::Neg(Register rs, const Operand& rt) { 964 void MacroAssembler::Neg(Register rs, const Operand& rt) {
965 ASSERT(rt.is_reg()); 965 DCHECK(rt.is_reg());
966 ASSERT(!at.is(rs)); 966 DCHECK(!at.is(rs));
967 ASSERT(!at.is(rt.rm())); 967 DCHECK(!at.is(rt.rm()));
968 li(at, -1); 968 li(at, -1);
969 xor_(rs, rt.rm(), at); 969 xor_(rs, rt.rm(), at);
970 } 970 }
971 971
972 972
973 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 973 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
974 if (rt.is_reg()) { 974 if (rt.is_reg()) {
975 slt(rd, rs, rt.rm()); 975 slt(rd, rs, rt.rm());
976 } else { 976 } else {
977 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 977 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
978 slti(rd, rs, rt.imm64_); 978 slti(rd, rs, rt.imm64_);
979 } else { 979 } else {
980 // li handles the relocation. 980 // li handles the relocation.
981 ASSERT(!rs.is(at)); 981 DCHECK(!rs.is(at));
982 li(at, rt); 982 li(at, rt);
983 slt(rd, rs, at); 983 slt(rd, rs, at);
984 } 984 }
985 } 985 }
986 } 986 }
987 987
988 988
989 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 989 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
990 if (rt.is_reg()) { 990 if (rt.is_reg()) {
991 sltu(rd, rs, rt.rm()); 991 sltu(rd, rs, rt.rm());
992 } else { 992 } else {
993 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) { 993 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
994 sltiu(rd, rs, rt.imm64_); 994 sltiu(rd, rs, rt.imm64_);
995 } else { 995 } else {
996 // li handles the relocation. 996 // li handles the relocation.
997 ASSERT(!rs.is(at)); 997 DCHECK(!rs.is(at));
998 li(at, rt); 998 li(at, rt);
999 sltu(rd, rs, at); 999 sltu(rd, rs, at);
1000 } 1000 }
1001 } 1001 }
1002 } 1002 }
1003 1003
1004 1004
1005 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { 1005 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1006 if (kArchVariant == kMips64r2) { 1006 if (kArchVariant == kMips64r2) {
1007 if (rt.is_reg()) { 1007 if (rt.is_reg()) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1054 swr(rd, rs); 1054 swr(rd, rs);
1055 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1055 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
1056 } 1056 }
1057 1057
1058 1058
1059 // Do 64-bit load from unaligned address. Note this only handles 1059 // Do 64-bit load from unaligned address. Note this only handles
1060 // the specific case of 32-bit aligned, but not 64-bit aligned. 1060 // the specific case of 32-bit aligned, but not 64-bit aligned.
1061 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) { 1061 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1062 // Assert fail if the offset from start of object IS actually aligned. 1062 // Assert fail if the offset from start of object IS actually aligned.
1063 // ONLY use with known misalignment, since there is performance cost. 1063 // ONLY use with known misalignment, since there is performance cost.
1064 ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); 1064 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1065 // TODO(plind): endian dependency. 1065 // TODO(plind): endian dependency.
1066 lwu(rd, rs); 1066 lwu(rd, rs);
1067 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1067 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1068 dsll32(scratch, scratch, 0); 1068 dsll32(scratch, scratch, 0);
1069 Daddu(rd, rd, scratch); 1069 Daddu(rd, rd, scratch);
1070 } 1070 }
1071 1071
1072 1072
1073 // Do 64-bit store to unaligned address. Note this only handles 1073 // Do 64-bit store to unaligned address. Note this only handles
1074 // the specific case of 32-bit aligned, but not 64-bit aligned. 1074 // the specific case of 32-bit aligned, but not 64-bit aligned.
1075 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) { 1075 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1076 // Assert fail if the offset from start of object IS actually aligned. 1076 // Assert fail if the offset from start of object IS actually aligned.
1077 // ONLY use with known misalignment, since there is performance cost. 1077 // ONLY use with known misalignment, since there is performance cost.
1078 ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1)); 1078 DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1079 // TODO(plind): endian dependency. 1079 // TODO(plind): endian dependency.
1080 sw(rd, rs); 1080 sw(rd, rs);
1081 dsrl32(scratch, rd, 0); 1081 dsrl32(scratch, rd, 0);
1082 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); 1082 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1083 } 1083 }
1084 1084
1085 1085
1086 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1086 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1087 AllowDeferredHandleDereference smi_check; 1087 AllowDeferredHandleDereference smi_check;
1088 if (value->IsSmi()) { 1088 if (value->IsSmi()) {
1089 li(dst, Operand(value), mode); 1089 li(dst, Operand(value), mode);
1090 } else { 1090 } else {
1091 ASSERT(value->IsHeapObject()); 1091 DCHECK(value->IsHeapObject());
1092 if (isolate()->heap()->InNewSpace(*value)) { 1092 if (isolate()->heap()->InNewSpace(*value)) {
1093 Handle<Cell> cell = isolate()->factory()->NewCell(value); 1093 Handle<Cell> cell = isolate()->factory()->NewCell(value);
1094 li(dst, Operand(cell)); 1094 li(dst, Operand(cell));
1095 ld(dst, FieldMemOperand(dst, Cell::kValueOffset)); 1095 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
1096 } else { 1096 } else {
1097 li(dst, Operand(value)); 1097 li(dst, Operand(value));
1098 } 1098 }
1099 } 1099 }
1100 } 1100 }
1101 1101
1102 1102
1103 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 1103 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1104 ASSERT(!j.is_reg()); 1104 DCHECK(!j.is_reg());
1105 BlockTrampolinePoolScope block_trampoline_pool(this); 1105 BlockTrampolinePoolScope block_trampoline_pool(this);
1106 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 1106 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1107 // Normal load of an immediate value which does not need Relocation Info. 1107 // Normal load of an immediate value which does not need Relocation Info.
1108 if (is_int32(j.imm64_)) { 1108 if (is_int32(j.imm64_)) {
1109 if (is_int16(j.imm64_)) { 1109 if (is_int16(j.imm64_)) {
1110 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1110 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1111 } else if (!(j.imm64_ & kHiMask)) { 1111 } else if (!(j.imm64_ & kHiMask)) {
1112 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); 1112 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1113 } else if (!(j.imm64_ & kImm16Mask)) { 1113 } else if (!(j.imm64_ & kImm16Mask)) {
1114 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); 1114 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
1268 li(a1, instructions * kInstrSize); 1268 li(a1, instructions * kInstrSize);
1269 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); 1269 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1270 MultiPop(saved_regs); 1270 MultiPop(saved_regs);
1271 } 1271 }
1272 1272
1273 1273
1274 void MacroAssembler::Ext(Register rt, 1274 void MacroAssembler::Ext(Register rt,
1275 Register rs, 1275 Register rs,
1276 uint16_t pos, 1276 uint16_t pos,
1277 uint16_t size) { 1277 uint16_t size) {
1278 ASSERT(pos < 32); 1278 DCHECK(pos < 32);
1279 ASSERT(pos + size < 33); 1279 DCHECK(pos + size < 33);
1280 ext_(rt, rs, pos, size); 1280 ext_(rt, rs, pos, size);
1281 } 1281 }
1282 1282
1283 1283
1284 void MacroAssembler::Ins(Register rt, 1284 void MacroAssembler::Ins(Register rt,
1285 Register rs, 1285 Register rs,
1286 uint16_t pos, 1286 uint16_t pos,
1287 uint16_t size) { 1287 uint16_t size) {
1288 ASSERT(pos < 32); 1288 DCHECK(pos < 32);
1289 ASSERT(pos + size <= 32); 1289 DCHECK(pos + size <= 32);
1290 ASSERT(size != 0); 1290 DCHECK(size != 0);
1291 ins_(rt, rs, pos, size); 1291 ins_(rt, rs, pos, size);
1292 } 1292 }
1293 1293
1294 1294
1295 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1295 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1296 FPURegister fs, 1296 FPURegister fs,
1297 FPURegister scratch) { 1297 FPURegister scratch) {
1298 // Move the data from fs to t8. 1298 // Move the data from fs to t8.
1299 mfc1(t8, fs); 1299 mfc1(t8, fs);
1300 Cvt_d_uw(fd, t8, scratch); 1300 Cvt_d_uw(fd, t8, scratch);
1301 } 1301 }
1302 1302
1303 1303
1304 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1304 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1305 Register rs, 1305 Register rs,
1306 FPURegister scratch) { 1306 FPURegister scratch) {
1307 // Convert rs to a FP value in fd (and fd + 1). 1307 // Convert rs to a FP value in fd (and fd + 1).
1308 // We do this by converting rs minus the MSB to avoid sign conversion, 1308 // We do this by converting rs minus the MSB to avoid sign conversion,
1309 // then adding 2^31 to the result (if needed). 1309 // then adding 2^31 to the result (if needed).
1310 1310
1311 ASSERT(!fd.is(scratch)); 1311 DCHECK(!fd.is(scratch));
1312 ASSERT(!rs.is(t9)); 1312 DCHECK(!rs.is(t9));
1313 ASSERT(!rs.is(at)); 1313 DCHECK(!rs.is(at));
1314 1314
1315 // Save rs's MSB to t9. 1315 // Save rs's MSB to t9.
1316 Ext(t9, rs, 31, 1); 1316 Ext(t9, rs, 31, 1);
1317 // Remove rs's MSB. 1317 // Remove rs's MSB.
1318 Ext(at, rs, 0, 31); 1318 Ext(at, rs, 0, 31);
1319 // Move the result to fd. 1319 // Move the result to fd.
1320 mtc1(at, fd); 1320 mtc1(at, fd);
1321 mthc1(zero_reg, fd); 1321 mthc1(zero_reg, fd);
1322 1322
1323 // Convert fd to a real FP value. 1323 // Convert fd to a real FP value.
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1397 1397
1398 1398
1399 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { 1399 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1400 ceil_w_d(fd, fs); 1400 ceil_w_d(fd, fs);
1401 } 1401 }
1402 1402
1403 1403
1404 void MacroAssembler::Trunc_uw_d(FPURegister fd, 1404 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1405 Register rs, 1405 Register rs,
1406 FPURegister scratch) { 1406 FPURegister scratch) {
1407 ASSERT(!fd.is(scratch)); 1407 DCHECK(!fd.is(scratch));
1408 ASSERT(!rs.is(at)); 1408 DCHECK(!rs.is(at));
1409 1409
1410 // Load 2^31 into scratch as its float representation. 1410 // Load 2^31 into scratch as its float representation.
1411 li(at, 0x41E00000); 1411 li(at, 0x41E00000);
1412 mtc1(zero_reg, scratch); 1412 mtc1(zero_reg, scratch);
1413 mthc1(at, scratch); 1413 mthc1(at, scratch);
1414 // Test if scratch > fd. 1414 // Test if scratch > fd.
1415 // If fd < 2^31 we can convert it normally. 1415 // If fd < 2^31 we can convert it normally.
1416 Label simple_convert; 1416 Label simple_convert;
1417 BranchF(&simple_convert, NULL, lt, fd, scratch); 1417 BranchF(&simple_convert, NULL, lt, fd, scratch);
1418 1418
(...skipping 14 matching lines...) Expand all
1433 bind(&done); 1433 bind(&done);
1434 } 1434 }
1435 1435
1436 1436
1437 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, 1437 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1438 FPURegister ft, FPURegister scratch) { 1438 FPURegister ft, FPURegister scratch) {
1439 if (0) { // TODO(plind): find reasonable arch-variant symbol names. 1439 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1440 madd_d(fd, fr, fs, ft); 1440 madd_d(fd, fr, fs, ft);
1441 } else { 1441 } else {
1442 // Can not change source regs's value. 1442 // Can not change source regs's value.
1443 ASSERT(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch)); 1443 DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1444 mul_d(scratch, fs, ft); 1444 mul_d(scratch, fs, ft);
1445 add_d(fd, fr, scratch); 1445 add_d(fd, fr, scratch);
1446 } 1446 }
1447 } 1447 }
1448 1448
1449 1449
1450 void MacroAssembler::BranchF(Label* target, 1450 void MacroAssembler::BranchF(Label* target,
1451 Label* nan, 1451 Label* nan,
1452 Condition cc, 1452 Condition cc,
1453 FPURegister cmp1, 1453 FPURegister cmp1,
1454 FPURegister cmp2, 1454 FPURegister cmp2,
1455 BranchDelaySlot bd) { 1455 BranchDelaySlot bd) {
1456 BlockTrampolinePoolScope block_trampoline_pool(this); 1456 BlockTrampolinePoolScope block_trampoline_pool(this);
1457 if (cc == al) { 1457 if (cc == al) {
1458 Branch(bd, target); 1458 Branch(bd, target);
1459 return; 1459 return;
1460 } 1460 }
1461 1461
1462 ASSERT(nan || target); 1462 DCHECK(nan || target);
1463 // Check for unordered (NaN) cases. 1463 // Check for unordered (NaN) cases.
1464 if (nan) { 1464 if (nan) {
1465 if (kArchVariant != kMips64r6) { 1465 if (kArchVariant != kMips64r6) {
1466 c(UN, D, cmp1, cmp2); 1466 c(UN, D, cmp1, cmp2);
1467 bc1t(nan); 1467 bc1t(nan);
1468 } else { 1468 } else {
1469 // Use f31 for comparison result. It has to be unavailable to lithium 1469 // Use f31 for comparison result. It has to be unavailable to lithium
1470 // register allocator. 1470 // register allocator.
1471 ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); 1471 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1472 cmp(UN, L, f31, cmp1, cmp2); 1472 cmp(UN, L, f31, cmp1, cmp2);
1473 bc1nez(nan, f31); 1473 bc1nez(nan, f31);
1474 } 1474 }
1475 } 1475 }
1476 1476
1477 if (kArchVariant != kMips64r6) { 1477 if (kArchVariant != kMips64r6) {
1478 if (target) { 1478 if (target) {
1479 // Here NaN cases were either handled by this function or are assumed to 1479 // Here NaN cases were either handled by this function or are assumed to
1480 // have been handled by the caller. 1480 // have been handled by the caller.
1481 switch (cc) { 1481 switch (cc) {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1514 default: 1514 default:
1515 CHECK(0); 1515 CHECK(0);
1516 } 1516 }
1517 } 1517 }
1518 } else { 1518 } else {
1519 if (target) { 1519 if (target) {
1520 // Here NaN cases were either handled by this function or are assumed to 1520 // Here NaN cases were either handled by this function or are assumed to
1521 // have been handled by the caller. 1521 // have been handled by the caller.
1522 // Unsigned conditions are treated as their signed counterpart. 1522 // Unsigned conditions are treated as their signed counterpart.
1523 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. 1523 // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
1524 ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); 1524 DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
1525 switch (cc) { 1525 switch (cc) {
1526 case lt: 1526 case lt:
1527 cmp(OLT, L, f31, cmp1, cmp2); 1527 cmp(OLT, L, f31, cmp1, cmp2);
1528 bc1nez(target, f31); 1528 bc1nez(target, f31);
1529 break; 1529 break;
1530 case gt: 1530 case gt:
1531 cmp(ULE, L, f31, cmp1, cmp2); 1531 cmp(ULE, L, f31, cmp1, cmp2);
1532 bc1eqz(target, f31); 1532 bc1eqz(target, f31);
1533 break; 1533 break;
1534 case ge: 1534 case ge:
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 } 1639 }
1640 1640
1641 1641
1642 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, 1642 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1643 Register result, 1643 Register result,
1644 DoubleRegister double_input, 1644 DoubleRegister double_input,
1645 Register scratch, 1645 Register scratch,
1646 DoubleRegister double_scratch, 1646 DoubleRegister double_scratch,
1647 Register except_flag, 1647 Register except_flag,
1648 CheckForInexactConversion check_inexact) { 1648 CheckForInexactConversion check_inexact) {
1649 ASSERT(!result.is(scratch)); 1649 DCHECK(!result.is(scratch));
1650 ASSERT(!double_input.is(double_scratch)); 1650 DCHECK(!double_input.is(double_scratch));
1651 ASSERT(!except_flag.is(scratch)); 1651 DCHECK(!except_flag.is(scratch));
1652 1652
1653 Label done; 1653 Label done;
1654 1654
1655 // Clear the except flag (0 = no exception) 1655 // Clear the except flag (0 = no exception)
1656 mov(except_flag, zero_reg); 1656 mov(except_flag, zero_reg);
1657 1657
1658 // Test for values that can be exactly represented as a signed 32-bit integer. 1658 // Test for values that can be exactly represented as a signed 32-bit integer.
1659 cvt_w_d(double_scratch, double_input); 1659 cvt_w_d(double_scratch, double_input);
1660 mfc1(result, double_scratch); 1660 mfc1(result, double_scratch);
1661 cvt_d_w(double_scratch, double_scratch); 1661 cvt_d_w(double_scratch, double_scratch);
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1745 Daddu(sp, sp, Operand(kDoubleSize)); 1745 Daddu(sp, sp, Operand(kDoubleSize));
1746 pop(ra); 1746 pop(ra);
1747 1747
1748 bind(&done); 1748 bind(&done);
1749 } 1749 }
1750 1750
1751 1751
1752 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { 1752 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1753 Label done; 1753 Label done;
1754 DoubleRegister double_scratch = f12; 1754 DoubleRegister double_scratch = f12;
1755 ASSERT(!result.is(object)); 1755 DCHECK(!result.is(object));
1756 1756
1757 ldc1(double_scratch, 1757 ldc1(double_scratch,
1758 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); 1758 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1759 TryInlineTruncateDoubleToI(result, double_scratch, &done); 1759 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1760 1760
1761 // If we fell through then inline version didn't succeed - call stub instead. 1761 // If we fell through then inline version didn't succeed - call stub instead.
1762 push(ra); 1762 push(ra);
1763 DoubleToIStub stub(isolate(), 1763 DoubleToIStub stub(isolate(),
1764 object, 1764 object,
1765 result, 1765 result,
1766 HeapNumber::kValueOffset - kHeapObjectTag, 1766 HeapNumber::kValueOffset - kHeapObjectTag,
1767 true, 1767 true,
1768 true); 1768 true);
1769 CallStub(&stub); 1769 CallStub(&stub);
1770 pop(ra); 1770 pop(ra);
1771 1771
1772 bind(&done); 1772 bind(&done);
1773 } 1773 }
1774 1774
1775 1775
1776 void MacroAssembler::TruncateNumberToI(Register object, 1776 void MacroAssembler::TruncateNumberToI(Register object,
1777 Register result, 1777 Register result,
1778 Register heap_number_map, 1778 Register heap_number_map,
1779 Register scratch, 1779 Register scratch,
1780 Label* not_number) { 1780 Label* not_number) {
1781 Label done; 1781 Label done;
1782 ASSERT(!result.is(object)); 1782 DCHECK(!result.is(object));
1783 1783
1784 UntagAndJumpIfSmi(result, object, &done); 1784 UntagAndJumpIfSmi(result, object, &done);
1785 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); 1785 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1786 TruncateHeapNumberToI(result, object); 1786 TruncateHeapNumberToI(result, object);
1787 1787
1788 bind(&done); 1788 bind(&done);
1789 } 1789 }
1790 1790
1791 1791
1792 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1792 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1793 Register src, 1793 Register src,
1794 int num_least_bits) { 1794 int num_least_bits) {
1795 // Ext(dst, src, kSmiTagSize, num_least_bits); 1795 // Ext(dst, src, kSmiTagSize, num_least_bits);
1796 SmiUntag(dst, src); 1796 SmiUntag(dst, src);
1797 And(dst, dst, Operand((1 << num_least_bits) - 1)); 1797 And(dst, dst, Operand((1 << num_least_bits) - 1));
1798 } 1798 }
1799 1799
1800 1800
1801 void MacroAssembler::GetLeastBitsFromInt32(Register dst, 1801 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1802 Register src, 1802 Register src,
1803 int num_least_bits) { 1803 int num_least_bits) {
1804 ASSERT(!src.is(dst)); 1804 DCHECK(!src.is(dst));
1805 And(dst, src, Operand((1 << num_least_bits) - 1)); 1805 And(dst, src, Operand((1 << num_least_bits) - 1));
1806 } 1806 }
1807 1807
1808 1808
1809 // Emulated condtional branches do not emit a nop in the branch delay slot. 1809 // Emulated condtional branches do not emit a nop in the branch delay slot.
1810 // 1810 //
1811 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1811 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1812 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1812 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1813 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1813 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1814 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1814 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1815 1815
1816 1816
1817 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 1817 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1818 BranchShort(offset, bdslot); 1818 BranchShort(offset, bdslot);
1819 } 1819 }
1820 1820
1821 1821
1822 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, 1822 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
1894 // Emit a nop in the branch delay slot if required. 1894 // Emit a nop in the branch delay slot if required.
1895 if (bdslot == PROTECT) 1895 if (bdslot == PROTECT)
1896 nop(); 1896 nop();
1897 } 1897 }
1898 1898
1899 1899
1900 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, 1900 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1901 const Operand& rt, 1901 const Operand& rt,
1902 BranchDelaySlot bdslot) { 1902 BranchDelaySlot bdslot) {
1903 BRANCH_ARGS_CHECK(cond, rs, rt); 1903 BRANCH_ARGS_CHECK(cond, rs, rt);
1904 ASSERT(!rs.is(zero_reg)); 1904 DCHECK(!rs.is(zero_reg));
1905 Register r2 = no_reg; 1905 Register r2 = no_reg;
1906 Register scratch = at; 1906 Register scratch = at;
1907 1907
1908 if (rt.is_reg()) { 1908 if (rt.is_reg()) {
1909 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or 1909 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1910 // rt. 1910 // rt.
1911 BlockTrampolinePoolScope block_trampoline_pool(this); 1911 BlockTrampolinePoolScope block_trampoline_pool(this);
1912 r2 = rt.rm_; 1912 r2 = rt.rm_;
1913 switch (cond) { 1913 switch (cond) {
1914 case cc_always: 1914 case cc_always:
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1994 // Be careful to always use shifted_branch_offset only just before the 1994 // Be careful to always use shifted_branch_offset only just before the
1995 // branch instruction, as the location will be remember for patching the 1995 // branch instruction, as the location will be remember for patching the
1996 // target. 1996 // target.
1997 BlockTrampolinePoolScope block_trampoline_pool(this); 1997 BlockTrampolinePoolScope block_trampoline_pool(this);
1998 switch (cond) { 1998 switch (cond) {
1999 case cc_always: 1999 case cc_always:
2000 b(offset); 2000 b(offset);
2001 break; 2001 break;
2002 case eq: 2002 case eq:
2003 // We don't want any other register but scratch clobbered. 2003 // We don't want any other register but scratch clobbered.
2004 ASSERT(!scratch.is(rs)); 2004 DCHECK(!scratch.is(rs));
2005 r2 = scratch; 2005 r2 = scratch;
2006 li(r2, rt); 2006 li(r2, rt);
2007 beq(rs, r2, offset); 2007 beq(rs, r2, offset);
2008 break; 2008 break;
2009 case ne: 2009 case ne:
2010 // We don't want any other register but scratch clobbered. 2010 // We don't want any other register but scratch clobbered.
2011 ASSERT(!scratch.is(rs)); 2011 DCHECK(!scratch.is(rs));
2012 r2 = scratch; 2012 r2 = scratch;
2013 li(r2, rt); 2013 li(r2, rt);
2014 bne(rs, r2, offset); 2014 bne(rs, r2, offset);
2015 break; 2015 break;
2016 // Signed comparison. 2016 // Signed comparison.
2017 case greater: 2017 case greater:
2018 if (rt.imm64_ == 0) { 2018 if (rt.imm64_ == 0) {
2019 bgtz(rs, offset); 2019 bgtz(rs, offset);
2020 } else { 2020 } else {
2021 r2 = scratch; 2021 r2 = scratch;
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
2246 // Be careful to always use shifted_branch_offset only just before the 2246 // Be careful to always use shifted_branch_offset only just before the
2247 // branch instruction, as the location will be remember for patching the 2247 // branch instruction, as the location will be remember for patching the
2248 // target. 2248 // target.
2249 BlockTrampolinePoolScope block_trampoline_pool(this); 2249 BlockTrampolinePoolScope block_trampoline_pool(this);
2250 switch (cond) { 2250 switch (cond) {
2251 case cc_always: 2251 case cc_always:
2252 offset = shifted_branch_offset(L, false); 2252 offset = shifted_branch_offset(L, false);
2253 b(offset); 2253 b(offset);
2254 break; 2254 break;
2255 case eq: 2255 case eq:
2256 ASSERT(!scratch.is(rs)); 2256 DCHECK(!scratch.is(rs));
2257 r2 = scratch; 2257 r2 = scratch;
2258 li(r2, rt); 2258 li(r2, rt);
2259 offset = shifted_branch_offset(L, false); 2259 offset = shifted_branch_offset(L, false);
2260 beq(rs, r2, offset); 2260 beq(rs, r2, offset);
2261 break; 2261 break;
2262 case ne: 2262 case ne:
2263 ASSERT(!scratch.is(rs)); 2263 DCHECK(!scratch.is(rs));
2264 r2 = scratch; 2264 r2 = scratch;
2265 li(r2, rt); 2265 li(r2, rt);
2266 offset = shifted_branch_offset(L, false); 2266 offset = shifted_branch_offset(L, false);
2267 bne(rs, r2, offset); 2267 bne(rs, r2, offset);
2268 break; 2268 break;
2269 // Signed comparison. 2269 // Signed comparison.
2270 case greater: 2270 case greater:
2271 if (rt.imm64_ == 0) { 2271 if (rt.imm64_ == 0) {
2272 offset = shifted_branch_offset(L, false); 2272 offset = shifted_branch_offset(L, false);
2273 bgtz(rs, offset); 2273 bgtz(rs, offset);
2274 } else { 2274 } else {
2275 ASSERT(!scratch.is(rs)); 2275 DCHECK(!scratch.is(rs));
2276 r2 = scratch; 2276 r2 = scratch;
2277 li(r2, rt); 2277 li(r2, rt);
2278 slt(scratch, r2, rs); 2278 slt(scratch, r2, rs);
2279 offset = shifted_branch_offset(L, false); 2279 offset = shifted_branch_offset(L, false);
2280 bne(scratch, zero_reg, offset); 2280 bne(scratch, zero_reg, offset);
2281 } 2281 }
2282 break; 2282 break;
2283 case greater_equal: 2283 case greater_equal:
2284 if (rt.imm64_ == 0) { 2284 if (rt.imm64_ == 0) {
2285 offset = shifted_branch_offset(L, false); 2285 offset = shifted_branch_offset(L, false);
2286 bgez(rs, offset); 2286 bgez(rs, offset);
2287 } else if (is_int16(rt.imm64_)) { 2287 } else if (is_int16(rt.imm64_)) {
2288 slti(scratch, rs, rt.imm64_); 2288 slti(scratch, rs, rt.imm64_);
2289 offset = shifted_branch_offset(L, false); 2289 offset = shifted_branch_offset(L, false);
2290 beq(scratch, zero_reg, offset); 2290 beq(scratch, zero_reg, offset);
2291 } else { 2291 } else {
2292 ASSERT(!scratch.is(rs)); 2292 DCHECK(!scratch.is(rs));
2293 r2 = scratch; 2293 r2 = scratch;
2294 li(r2, rt); 2294 li(r2, rt);
2295 slt(scratch, rs, r2); 2295 slt(scratch, rs, r2);
2296 offset = shifted_branch_offset(L, false); 2296 offset = shifted_branch_offset(L, false);
2297 beq(scratch, zero_reg, offset); 2297 beq(scratch, zero_reg, offset);
2298 } 2298 }
2299 break; 2299 break;
2300 case less: 2300 case less:
2301 if (rt.imm64_ == 0) { 2301 if (rt.imm64_ == 0) {
2302 offset = shifted_branch_offset(L, false); 2302 offset = shifted_branch_offset(L, false);
2303 bltz(rs, offset); 2303 bltz(rs, offset);
2304 } else if (is_int16(rt.imm64_)) { 2304 } else if (is_int16(rt.imm64_)) {
2305 slti(scratch, rs, rt.imm64_); 2305 slti(scratch, rs, rt.imm64_);
2306 offset = shifted_branch_offset(L, false); 2306 offset = shifted_branch_offset(L, false);
2307 bne(scratch, zero_reg, offset); 2307 bne(scratch, zero_reg, offset);
2308 } else { 2308 } else {
2309 ASSERT(!scratch.is(rs)); 2309 DCHECK(!scratch.is(rs));
2310 r2 = scratch; 2310 r2 = scratch;
2311 li(r2, rt); 2311 li(r2, rt);
2312 slt(scratch, rs, r2); 2312 slt(scratch, rs, r2);
2313 offset = shifted_branch_offset(L, false); 2313 offset = shifted_branch_offset(L, false);
2314 bne(scratch, zero_reg, offset); 2314 bne(scratch, zero_reg, offset);
2315 } 2315 }
2316 break; 2316 break;
2317 case less_equal: 2317 case less_equal:
2318 if (rt.imm64_ == 0) { 2318 if (rt.imm64_ == 0) {
2319 offset = shifted_branch_offset(L, false); 2319 offset = shifted_branch_offset(L, false);
2320 blez(rs, offset); 2320 blez(rs, offset);
2321 } else { 2321 } else {
2322 ASSERT(!scratch.is(rs)); 2322 DCHECK(!scratch.is(rs));
2323 r2 = scratch; 2323 r2 = scratch;
2324 li(r2, rt); 2324 li(r2, rt);
2325 slt(scratch, r2, rs); 2325 slt(scratch, r2, rs);
2326 offset = shifted_branch_offset(L, false); 2326 offset = shifted_branch_offset(L, false);
2327 beq(scratch, zero_reg, offset); 2327 beq(scratch, zero_reg, offset);
2328 } 2328 }
2329 break; 2329 break;
2330 // Unsigned comparison. 2330 // Unsigned comparison.
2331 case Ugreater: 2331 case Ugreater:
2332 if (rt.imm64_ == 0) { 2332 if (rt.imm64_ == 0) {
2333 offset = shifted_branch_offset(L, false); 2333 offset = shifted_branch_offset(L, false);
2334 bne(rs, zero_reg, offset); 2334 bne(rs, zero_reg, offset);
2335 } else { 2335 } else {
2336 ASSERT(!scratch.is(rs)); 2336 DCHECK(!scratch.is(rs));
2337 r2 = scratch; 2337 r2 = scratch;
2338 li(r2, rt); 2338 li(r2, rt);
2339 sltu(scratch, r2, rs); 2339 sltu(scratch, r2, rs);
2340 offset = shifted_branch_offset(L, false); 2340 offset = shifted_branch_offset(L, false);
2341 bne(scratch, zero_reg, offset); 2341 bne(scratch, zero_reg, offset);
2342 } 2342 }
2343 break; 2343 break;
2344 case Ugreater_equal: 2344 case Ugreater_equal:
2345 if (rt.imm64_ == 0) { 2345 if (rt.imm64_ == 0) {
2346 offset = shifted_branch_offset(L, false); 2346 offset = shifted_branch_offset(L, false);
2347 bgez(rs, offset); 2347 bgez(rs, offset);
2348 } else if (is_int16(rt.imm64_)) { 2348 } else if (is_int16(rt.imm64_)) {
2349 sltiu(scratch, rs, rt.imm64_); 2349 sltiu(scratch, rs, rt.imm64_);
2350 offset = shifted_branch_offset(L, false); 2350 offset = shifted_branch_offset(L, false);
2351 beq(scratch, zero_reg, offset); 2351 beq(scratch, zero_reg, offset);
2352 } else { 2352 } else {
2353 ASSERT(!scratch.is(rs)); 2353 DCHECK(!scratch.is(rs));
2354 r2 = scratch; 2354 r2 = scratch;
2355 li(r2, rt); 2355 li(r2, rt);
2356 sltu(scratch, rs, r2); 2356 sltu(scratch, rs, r2);
2357 offset = shifted_branch_offset(L, false); 2357 offset = shifted_branch_offset(L, false);
2358 beq(scratch, zero_reg, offset); 2358 beq(scratch, zero_reg, offset);
2359 } 2359 }
2360 break; 2360 break;
2361 case Uless: 2361 case Uless:
2362 if (rt.imm64_ == 0) { 2362 if (rt.imm64_ == 0) {
2363 // No code needs to be emitted. 2363 // No code needs to be emitted.
2364 return; 2364 return;
2365 } else if (is_int16(rt.imm64_)) { 2365 } else if (is_int16(rt.imm64_)) {
2366 sltiu(scratch, rs, rt.imm64_); 2366 sltiu(scratch, rs, rt.imm64_);
2367 offset = shifted_branch_offset(L, false); 2367 offset = shifted_branch_offset(L, false);
2368 bne(scratch, zero_reg, offset); 2368 bne(scratch, zero_reg, offset);
2369 } else { 2369 } else {
2370 ASSERT(!scratch.is(rs)); 2370 DCHECK(!scratch.is(rs));
2371 r2 = scratch; 2371 r2 = scratch;
2372 li(r2, rt); 2372 li(r2, rt);
2373 sltu(scratch, rs, r2); 2373 sltu(scratch, rs, r2);
2374 offset = shifted_branch_offset(L, false); 2374 offset = shifted_branch_offset(L, false);
2375 bne(scratch, zero_reg, offset); 2375 bne(scratch, zero_reg, offset);
2376 } 2376 }
2377 break; 2377 break;
2378 case Uless_equal: 2378 case Uless_equal:
2379 if (rt.imm64_ == 0) { 2379 if (rt.imm64_ == 0) {
2380 offset = shifted_branch_offset(L, false); 2380 offset = shifted_branch_offset(L, false);
2381 beq(rs, zero_reg, offset); 2381 beq(rs, zero_reg, offset);
2382 } else { 2382 } else {
2383 ASSERT(!scratch.is(rs)); 2383 DCHECK(!scratch.is(rs));
2384 r2 = scratch; 2384 r2 = scratch;
2385 li(r2, rt); 2385 li(r2, rt);
2386 sltu(scratch, r2, rs); 2386 sltu(scratch, r2, rs);
2387 offset = shifted_branch_offset(L, false); 2387 offset = shifted_branch_offset(L, false);
2388 beq(scratch, zero_reg, offset); 2388 beq(scratch, zero_reg, offset);
2389 } 2389 }
2390 break; 2390 break;
2391 default: 2391 default:
2392 UNREACHABLE(); 2392 UNREACHABLE();
2393 } 2393 }
2394 } 2394 }
2395 // Check that offset could actually hold on an int16_t. 2395 // Check that offset could actually hold on an int16_t.
2396 ASSERT(is_int16(offset)); 2396 DCHECK(is_int16(offset));
2397 // Emit a nop in the branch delay slot if required. 2397 // Emit a nop in the branch delay slot if required.
2398 if (bdslot == PROTECT) 2398 if (bdslot == PROTECT)
2399 nop(); 2399 nop();
2400 } 2400 }
2401 2401
2402 2402
2403 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { 2403 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2404 BranchAndLinkShort(offset, bdslot); 2404 BranchAndLinkShort(offset, bdslot);
2405 } 2405 }
2406 2406
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
2681 nop(); 2681 nop();
2682 offset = shifted_branch_offset(L, false); 2682 offset = shifted_branch_offset(L, false);
2683 bal(offset); 2683 bal(offset);
2684 break; 2684 break;
2685 2685
2686 default: 2686 default:
2687 UNREACHABLE(); 2687 UNREACHABLE();
2688 } 2688 }
2689 } 2689 }
2690 // Check that offset could actually hold on an int16_t. 2690 // Check that offset could actually hold on an int16_t.
2691 ASSERT(is_int16(offset)); 2691 DCHECK(is_int16(offset));
2692 2692
2693 // Emit a nop in the branch delay slot if required. 2693 // Emit a nop in the branch delay slot if required.
2694 if (bdslot == PROTECT) 2694 if (bdslot == PROTECT)
2695 nop(); 2695 nop();
2696 } 2696 }
2697 2697
2698 2698
2699 void MacroAssembler::Jump(Register target, 2699 void MacroAssembler::Jump(Register target,
2700 Condition cond, 2700 Condition cond,
2701 Register rs, 2701 Register rs,
(...skipping 30 matching lines...) Expand all
2732 bind(&skip); 2732 bind(&skip);
2733 } 2733 }
2734 2734
2735 2735
2736 void MacroAssembler::Jump(Address target, 2736 void MacroAssembler::Jump(Address target,
2737 RelocInfo::Mode rmode, 2737 RelocInfo::Mode rmode,
2738 Condition cond, 2738 Condition cond,
2739 Register rs, 2739 Register rs,
2740 const Operand& rt, 2740 const Operand& rt,
2741 BranchDelaySlot bd) { 2741 BranchDelaySlot bd) {
2742 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 2742 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2743 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); 2743 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2744 } 2744 }
2745 2745
2746 2746
2747 void MacroAssembler::Jump(Handle<Code> code, 2747 void MacroAssembler::Jump(Handle<Code> code,
2748 RelocInfo::Mode rmode, 2748 RelocInfo::Mode rmode,
2749 Condition cond, 2749 Condition cond,
2750 Register rs, 2750 Register rs,
2751 const Operand& rt, 2751 const Operand& rt,
2752 BranchDelaySlot bd) { 2752 BranchDelaySlot bd) {
2753 ASSERT(RelocInfo::IsCodeTarget(rmode)); 2753 DCHECK(RelocInfo::IsCodeTarget(rmode));
2754 AllowDeferredHandleDereference embedding_raw_address; 2754 AllowDeferredHandleDereference embedding_raw_address;
2755 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); 2755 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2756 } 2756 }
2757 2757
2758 2758
2759 int MacroAssembler::CallSize(Register target, 2759 int MacroAssembler::CallSize(Register target,
2760 Condition cond, 2760 Condition cond,
2761 Register rs, 2761 Register rs,
2762 const Operand& rt, 2762 const Operand& rt,
2763 BranchDelaySlot bd) { 2763 BranchDelaySlot bd) {
(...skipping 25 matching lines...) Expand all
2789 jalr(target); 2789 jalr(target);
2790 } else { 2790 } else {
2791 BRANCH_ARGS_CHECK(cond, rs, rt); 2791 BRANCH_ARGS_CHECK(cond, rs, rt);
2792 Branch(2, NegateCondition(cond), rs, rt); 2792 Branch(2, NegateCondition(cond), rs, rt);
2793 jalr(target); 2793 jalr(target);
2794 } 2794 }
2795 // Emit a nop in the branch delay slot if required. 2795 // Emit a nop in the branch delay slot if required.
2796 if (bd == PROTECT) 2796 if (bd == PROTECT)
2797 nop(); 2797 nop();
2798 2798
2799 ASSERT_EQ(CallSize(target, cond, rs, rt, bd), 2799 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2800 SizeOfCodeGeneratedSince(&start)); 2800 SizeOfCodeGeneratedSince(&start));
2801 } 2801 }
2802 2802
2803 2803
2804 int MacroAssembler::CallSize(Address target, 2804 int MacroAssembler::CallSize(Address target,
2805 RelocInfo::Mode rmode, 2805 RelocInfo::Mode rmode,
2806 Condition cond, 2806 Condition cond,
2807 Register rs, 2807 Register rs,
2808 const Operand& rt, 2808 const Operand& rt,
2809 BranchDelaySlot bd) { 2809 BranchDelaySlot bd) {
(...skipping 10 matching lines...) Expand all
2820 BranchDelaySlot bd) { 2820 BranchDelaySlot bd) {
2821 BlockTrampolinePoolScope block_trampoline_pool(this); 2821 BlockTrampolinePoolScope block_trampoline_pool(this);
2822 Label start; 2822 Label start;
2823 bind(&start); 2823 bind(&start);
2824 int64_t target_int = reinterpret_cast<int64_t>(target); 2824 int64_t target_int = reinterpret_cast<int64_t>(target);
2825 // Must record previous source positions before the 2825 // Must record previous source positions before the
2826 // li() generates a new code target. 2826 // li() generates a new code target.
2827 positions_recorder()->WriteRecordedPositions(); 2827 positions_recorder()->WriteRecordedPositions();
2828 li(t9, Operand(target_int, rmode), ADDRESS_LOAD); 2828 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
2829 Call(t9, cond, rs, rt, bd); 2829 Call(t9, cond, rs, rt, bd);
2830 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), 2830 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2831 SizeOfCodeGeneratedSince(&start)); 2831 SizeOfCodeGeneratedSince(&start));
2832 } 2832 }
2833 2833
2834 2834
2835 int MacroAssembler::CallSize(Handle<Code> code, 2835 int MacroAssembler::CallSize(Handle<Code> code,
2836 RelocInfo::Mode rmode, 2836 RelocInfo::Mode rmode,
2837 TypeFeedbackId ast_id, 2837 TypeFeedbackId ast_id,
2838 Condition cond, 2838 Condition cond,
2839 Register rs, 2839 Register rs,
2840 const Operand& rt, 2840 const Operand& rt,
2841 BranchDelaySlot bd) { 2841 BranchDelaySlot bd) {
2842 AllowDeferredHandleDereference using_raw_address; 2842 AllowDeferredHandleDereference using_raw_address;
2843 return CallSize(reinterpret_cast<Address>(code.location()), 2843 return CallSize(reinterpret_cast<Address>(code.location()),
2844 rmode, cond, rs, rt, bd); 2844 rmode, cond, rs, rt, bd);
2845 } 2845 }
2846 2846
2847 2847
2848 void MacroAssembler::Call(Handle<Code> code, 2848 void MacroAssembler::Call(Handle<Code> code,
2849 RelocInfo::Mode rmode, 2849 RelocInfo::Mode rmode,
2850 TypeFeedbackId ast_id, 2850 TypeFeedbackId ast_id,
2851 Condition cond, 2851 Condition cond,
2852 Register rs, 2852 Register rs,
2853 const Operand& rt, 2853 const Operand& rt,
2854 BranchDelaySlot bd) { 2854 BranchDelaySlot bd) {
2855 BlockTrampolinePoolScope block_trampoline_pool(this); 2855 BlockTrampolinePoolScope block_trampoline_pool(this);
2856 Label start; 2856 Label start;
2857 bind(&start); 2857 bind(&start);
2858 ASSERT(RelocInfo::IsCodeTarget(rmode)); 2858 DCHECK(RelocInfo::IsCodeTarget(rmode));
2859 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { 2859 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2860 SetRecordedAstId(ast_id); 2860 SetRecordedAstId(ast_id);
2861 rmode = RelocInfo::CODE_TARGET_WITH_ID; 2861 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2862 } 2862 }
2863 AllowDeferredHandleDereference embedding_raw_address; 2863 AllowDeferredHandleDereference embedding_raw_address;
2864 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); 2864 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2865 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), 2865 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2866 SizeOfCodeGeneratedSince(&start)); 2866 SizeOfCodeGeneratedSince(&start));
2867 } 2867 }
2868 2868
2869 2869
2870 void MacroAssembler::Ret(Condition cond, 2870 void MacroAssembler::Ret(Condition cond,
2871 Register rs, 2871 Register rs,
2872 const Operand& rt, 2872 const Operand& rt,
2873 BranchDelaySlot bd) { 2873 BranchDelaySlot bd) {
2874 Jump(ra, cond, rs, rt, bd); 2874 Jump(ra, cond, rs, rt, bd);
2875 } 2875 }
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
2998 } 2998 }
2999 2999
3000 3000
3001 void MacroAssembler::Push(Handle<Object> handle) { 3001 void MacroAssembler::Push(Handle<Object> handle) {
3002 li(at, Operand(handle)); 3002 li(at, Operand(handle));
3003 push(at); 3003 push(at);
3004 } 3004 }
3005 3005
3006 3006
3007 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { 3007 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
3008 ASSERT(!src.is(scratch)); 3008 DCHECK(!src.is(scratch));
3009 mov(scratch, src); 3009 mov(scratch, src);
3010 dsrl32(src, src, 0); 3010 dsrl32(src, src, 0);
3011 dsll32(src, src, 0); 3011 dsll32(src, src, 0);
3012 push(src); 3012 push(src);
3013 dsll32(scratch, scratch, 0); 3013 dsll32(scratch, scratch, 0);
3014 push(scratch); 3014 push(scratch);
3015 } 3015 }
3016 3016
3017 3017
3018 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { 3018 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
3019 ASSERT(!dst.is(scratch)); 3019 DCHECK(!dst.is(scratch));
3020 pop(scratch); 3020 pop(scratch);
3021 dsrl32(scratch, scratch, 0); 3021 dsrl32(scratch, scratch, 0);
3022 pop(dst); 3022 pop(dst);
3023 dsrl32(dst, dst, 0); 3023 dsrl32(dst, dst, 0);
3024 dsll32(dst, dst, 0); 3024 dsll32(dst, dst, 0);
3025 or_(dst, dst, scratch); 3025 or_(dst, dst, scratch);
3026 } 3026 }
3027 3027
3028 3028
3029 void MacroAssembler::DebugBreak() { 3029 void MacroAssembler::DebugBreak() {
3030 PrepareCEntryArgs(0); 3030 PrepareCEntryArgs(0);
3031 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); 3031 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
3032 CEntryStub ces(isolate(), 1); 3032 CEntryStub ces(isolate(), 1);
3033 ASSERT(AllowThisStubCall(&ces)); 3033 DCHECK(AllowThisStubCall(&ces));
3034 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 3034 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3035 } 3035 }
3036 3036
3037 3037
3038 // --------------------------------------------------------------------------- 3038 // ---------------------------------------------------------------------------
3039 // Exception handling. 3039 // Exception handling.
3040 3040
3041 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 3041 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3042 int handler_index) { 3042 int handler_index) {
3043 // Adjust this code if not the case. 3043 // Adjust this code if not the case.
3044 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 3044 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3045 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 3045 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3046 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 3046 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3047 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 3047 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3048 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 3048 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3049 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 3049 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3050 3050
3051 // For the JSEntry handler, we must preserve a0-a3 and s0. 3051 // For the JSEntry handler, we must preserve a0-a3 and s0.
3052 // a5-a7 are available. We will build up the handler from the bottom by 3052 // a5-a7 are available. We will build up the handler from the bottom by
3053 // pushing on the stack. 3053 // pushing on the stack.
3054 // Set up the code object (a5) and the state (a6) for pushing. 3054 // Set up the code object (a5) and the state (a6) for pushing.
3055 unsigned state = 3055 unsigned state =
3056 StackHandler::IndexField::encode(handler_index) | 3056 StackHandler::IndexField::encode(handler_index) |
3057 StackHandler::KindField::encode(kind); 3057 StackHandler::KindField::encode(kind);
3058 li(a5, Operand(CodeObject()), CONSTANT_SIZE); 3058 li(a5, Operand(CodeObject()), CONSTANT_SIZE);
3059 li(a6, Operand(state)); 3059 li(a6, Operand(state));
3060 3060
3061 // Push the frame pointer, context, state, and code object. 3061 // Push the frame pointer, context, state, and code object.
3062 if (kind == StackHandler::JS_ENTRY) { 3062 if (kind == StackHandler::JS_ENTRY) {
3063 ASSERT_EQ(Smi::FromInt(0), 0); 3063 DCHECK_EQ(Smi::FromInt(0), 0);
3064 // The second zero_reg indicates no context. 3064 // The second zero_reg indicates no context.
3065 // The first zero_reg is the NULL frame pointer. 3065 // The first zero_reg is the NULL frame pointer.
3066 // The operands are reversed to match the order of MultiPush/Pop. 3066 // The operands are reversed to match the order of MultiPush/Pop.
3067 Push(zero_reg, zero_reg, a6, a5); 3067 Push(zero_reg, zero_reg, a6, a5);
3068 } else { 3068 } else {
3069 MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit()); 3069 MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
3070 } 3070 }
3071 3071
3072 // Link the current handler as the next handler. 3072 // Link the current handler as the next handler.
3073 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 3073 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
3181 JumpToHandlerEntry(); 3181 JumpToHandlerEntry();
3182 } 3182 }
3183 3183
3184 3184
3185 void MacroAssembler::Allocate(int object_size, 3185 void MacroAssembler::Allocate(int object_size,
3186 Register result, 3186 Register result,
3187 Register scratch1, 3187 Register scratch1,
3188 Register scratch2, 3188 Register scratch2,
3189 Label* gc_required, 3189 Label* gc_required,
3190 AllocationFlags flags) { 3190 AllocationFlags flags) {
3191 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 3191 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3192 if (!FLAG_inline_new) { 3192 if (!FLAG_inline_new) {
3193 if (emit_debug_code()) { 3193 if (emit_debug_code()) {
3194 // Trash the registers to simulate an allocation failure. 3194 // Trash the registers to simulate an allocation failure.
3195 li(result, 0x7091); 3195 li(result, 0x7091);
3196 li(scratch1, 0x7191); 3196 li(scratch1, 0x7191);
3197 li(scratch2, 0x7291); 3197 li(scratch2, 0x7291);
3198 } 3198 }
3199 jmp(gc_required); 3199 jmp(gc_required);
3200 return; 3200 return;
3201 } 3201 }
3202 3202
3203 ASSERT(!result.is(scratch1)); 3203 DCHECK(!result.is(scratch1));
3204 ASSERT(!result.is(scratch2)); 3204 DCHECK(!result.is(scratch2));
3205 ASSERT(!scratch1.is(scratch2)); 3205 DCHECK(!scratch1.is(scratch2));
3206 ASSERT(!scratch1.is(t9)); 3206 DCHECK(!scratch1.is(t9));
3207 ASSERT(!scratch2.is(t9)); 3207 DCHECK(!scratch2.is(t9));
3208 ASSERT(!result.is(t9)); 3208 DCHECK(!result.is(t9));
3209 3209
3210 // Make object size into bytes. 3210 // Make object size into bytes.
3211 if ((flags & SIZE_IN_WORDS) != 0) { 3211 if ((flags & SIZE_IN_WORDS) != 0) {
3212 object_size *= kPointerSize; 3212 object_size *= kPointerSize;
3213 } 3213 }
3214 ASSERT(0 == (object_size & kObjectAlignmentMask)); 3214 DCHECK(0 == (object_size & kObjectAlignmentMask));
3215 3215
3216 // Check relative positions of allocation top and limit addresses. 3216 // Check relative positions of allocation top and limit addresses.
3217 // ARM adds additional checks to make sure the ldm instruction can be 3217 // ARM adds additional checks to make sure the ldm instruction can be
3218 // used. On MIPS we don't have ldm so we don't need additional checks either. 3218 // used. On MIPS we don't have ldm so we don't need additional checks either.
3219 ExternalReference allocation_top = 3219 ExternalReference allocation_top =
3220 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3220 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3221 ExternalReference allocation_limit = 3221 ExternalReference allocation_limit =
3222 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3222 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3223 3223
3224 intptr_t top = 3224 intptr_t top =
3225 reinterpret_cast<intptr_t>(allocation_top.address()); 3225 reinterpret_cast<intptr_t>(allocation_top.address());
3226 intptr_t limit = 3226 intptr_t limit =
3227 reinterpret_cast<intptr_t>(allocation_limit.address()); 3227 reinterpret_cast<intptr_t>(allocation_limit.address());
3228 ASSERT((limit - top) == kPointerSize); 3228 DCHECK((limit - top) == kPointerSize);
3229 3229
3230 // Set up allocation top address and object size registers. 3230 // Set up allocation top address and object size registers.
3231 Register topaddr = scratch1; 3231 Register topaddr = scratch1;
3232 li(topaddr, Operand(allocation_top)); 3232 li(topaddr, Operand(allocation_top));
3233 3233
3234 // This code stores a temporary value in t9. 3234 // This code stores a temporary value in t9.
3235 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3235 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3236 // Load allocation top into result and allocation limit into t9. 3236 // Load allocation top into result and allocation limit into t9.
3237 ld(result, MemOperand(topaddr)); 3237 ld(result, MemOperand(topaddr));
3238 ld(t9, MemOperand(topaddr, kPointerSize)); 3238 ld(t9, MemOperand(topaddr, kPointerSize));
3239 } else { 3239 } else {
3240 if (emit_debug_code()) { 3240 if (emit_debug_code()) {
3241 // Assert that result actually contains top on entry. t9 is used 3241 // Assert that result actually contains top on entry. t9 is used
3242 // immediately below so this use of t9 does not cause difference with 3242 // immediately below so this use of t9 does not cause difference with
3243 // respect to register content between debug and release mode. 3243 // respect to register content between debug and release mode.
3244 ld(t9, MemOperand(topaddr)); 3244 ld(t9, MemOperand(topaddr));
3245 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3245 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3246 } 3246 }
3247 // Load allocation limit into t9. Result already contains allocation top. 3247 // Load allocation limit into t9. Result already contains allocation top.
3248 ld(t9, MemOperand(topaddr, limit - top)); 3248 ld(t9, MemOperand(topaddr, limit - top));
3249 } 3249 }
3250 3250
3251 ASSERT(kPointerSize == kDoubleSize); 3251 DCHECK(kPointerSize == kDoubleSize);
3252 if (emit_debug_code()) { 3252 if (emit_debug_code()) {
3253 And(at, result, Operand(kDoubleAlignmentMask)); 3253 And(at, result, Operand(kDoubleAlignmentMask));
3254 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); 3254 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3255 } 3255 }
3256 3256
3257 // Calculate new top and bail out if new space is exhausted. Use result 3257 // Calculate new top and bail out if new space is exhausted. Use result
3258 // to calculate the new top. 3258 // to calculate the new top.
3259 Daddu(scratch2, result, Operand(object_size)); 3259 Daddu(scratch2, result, Operand(object_size));
3260 Branch(gc_required, Ugreater, scratch2, Operand(t9)); 3260 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3261 sd(scratch2, MemOperand(topaddr)); 3261 sd(scratch2, MemOperand(topaddr));
(...skipping 15 matching lines...) Expand all
3277 if (emit_debug_code()) { 3277 if (emit_debug_code()) {
3278 // Trash the registers to simulate an allocation failure. 3278 // Trash the registers to simulate an allocation failure.
3279 li(result, 0x7091); 3279 li(result, 0x7091);
3280 li(scratch1, 0x7191); 3280 li(scratch1, 0x7191);
3281 li(scratch2, 0x7291); 3281 li(scratch2, 0x7291);
3282 } 3282 }
3283 jmp(gc_required); 3283 jmp(gc_required);
3284 return; 3284 return;
3285 } 3285 }
3286 3286
3287 ASSERT(!result.is(scratch1)); 3287 DCHECK(!result.is(scratch1));
3288 ASSERT(!result.is(scratch2)); 3288 DCHECK(!result.is(scratch2));
3289 ASSERT(!scratch1.is(scratch2)); 3289 DCHECK(!scratch1.is(scratch2));
3290 ASSERT(!object_size.is(t9)); 3290 DCHECK(!object_size.is(t9));
3291 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); 3291 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3292 3292
3293 // Check relative positions of allocation top and limit addresses. 3293 // Check relative positions of allocation top and limit addresses.
3294 // ARM adds additional checks to make sure the ldm instruction can be 3294 // ARM adds additional checks to make sure the ldm instruction can be
3295 // used. On MIPS we don't have ldm so we don't need additional checks either. 3295 // used. On MIPS we don't have ldm so we don't need additional checks either.
3296 ExternalReference allocation_top = 3296 ExternalReference allocation_top =
3297 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3297 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3298 ExternalReference allocation_limit = 3298 ExternalReference allocation_limit =
3299 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3299 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3300 intptr_t top = 3300 intptr_t top =
3301 reinterpret_cast<intptr_t>(allocation_top.address()); 3301 reinterpret_cast<intptr_t>(allocation_top.address());
3302 intptr_t limit = 3302 intptr_t limit =
3303 reinterpret_cast<intptr_t>(allocation_limit.address()); 3303 reinterpret_cast<intptr_t>(allocation_limit.address());
3304 ASSERT((limit - top) == kPointerSize); 3304 DCHECK((limit - top) == kPointerSize);
3305 3305
3306 // Set up allocation top address and object size registers. 3306 // Set up allocation top address and object size registers.
3307 Register topaddr = scratch1; 3307 Register topaddr = scratch1;
3308 li(topaddr, Operand(allocation_top)); 3308 li(topaddr, Operand(allocation_top));
3309 3309
3310 // This code stores a temporary value in t9. 3310 // This code stores a temporary value in t9.
3311 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3311 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3312 // Load allocation top into result and allocation limit into t9. 3312 // Load allocation top into result and allocation limit into t9.
3313 ld(result, MemOperand(topaddr)); 3313 ld(result, MemOperand(topaddr));
3314 ld(t9, MemOperand(topaddr, kPointerSize)); 3314 ld(t9, MemOperand(topaddr, kPointerSize));
3315 } else { 3315 } else {
3316 if (emit_debug_code()) { 3316 if (emit_debug_code()) {
3317 // Assert that result actually contains top on entry. t9 is used 3317 // Assert that result actually contains top on entry. t9 is used
3318 // immediately below so this use of t9 does not cause difference with 3318 // immediately below so this use of t9 does not cause difference with
3319 // respect to register content between debug and release mode. 3319 // respect to register content between debug and release mode.
3320 ld(t9, MemOperand(topaddr)); 3320 ld(t9, MemOperand(topaddr));
3321 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3321 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3322 } 3322 }
3323 // Load allocation limit into t9. Result already contains allocation top. 3323 // Load allocation limit into t9. Result already contains allocation top.
3324 ld(t9, MemOperand(topaddr, limit - top)); 3324 ld(t9, MemOperand(topaddr, limit - top));
3325 } 3325 }
3326 3326
3327 ASSERT(kPointerSize == kDoubleSize); 3327 DCHECK(kPointerSize == kDoubleSize);
3328 if (emit_debug_code()) { 3328 if (emit_debug_code()) {
3329 And(at, result, Operand(kDoubleAlignmentMask)); 3329 And(at, result, Operand(kDoubleAlignmentMask));
3330 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); 3330 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3331 } 3331 }
3332 3332
3333 // Calculate new top and bail out if new space is exhausted. Use result 3333 // Calculate new top and bail out if new space is exhausted. Use result
3334 // to calculate the new top. Object size may be in words so a shift is 3334 // to calculate the new top. Object size may be in words so a shift is
3335 // required to get the number of bytes. 3335 // required to get the number of bytes.
3336 if ((flags & SIZE_IN_WORDS) != 0) { 3336 if ((flags & SIZE_IN_WORDS) != 0) {
3337 dsll(scratch2, object_size, kPointerSizeLog2); 3337 dsll(scratch2, object_size, kPointerSizeLog2);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
3376 3376
3377 3377
3378 void MacroAssembler::AllocateTwoByteString(Register result, 3378 void MacroAssembler::AllocateTwoByteString(Register result,
3379 Register length, 3379 Register length,
3380 Register scratch1, 3380 Register scratch1,
3381 Register scratch2, 3381 Register scratch2,
3382 Register scratch3, 3382 Register scratch3,
3383 Label* gc_required) { 3383 Label* gc_required) {
3384 // Calculate the number of bytes needed for the characters in the string while 3384 // Calculate the number of bytes needed for the characters in the string while
3385 // observing object alignment. 3385 // observing object alignment.
3386 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3386 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3387 dsll(scratch1, length, 1); // Length in bytes, not chars. 3387 dsll(scratch1, length, 1); // Length in bytes, not chars.
3388 daddiu(scratch1, scratch1, 3388 daddiu(scratch1, scratch1,
3389 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); 3389 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3390 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3390 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3391 3391
3392 // Allocate two-byte string in new space. 3392 // Allocate two-byte string in new space.
3393 Allocate(scratch1, 3393 Allocate(scratch1,
3394 result, 3394 result,
3395 scratch2, 3395 scratch2,
3396 scratch3, 3396 scratch3,
(...skipping 10 matching lines...) Expand all
3407 3407
3408 3408
3409 void MacroAssembler::AllocateAsciiString(Register result, 3409 void MacroAssembler::AllocateAsciiString(Register result,
3410 Register length, 3410 Register length,
3411 Register scratch1, 3411 Register scratch1,
3412 Register scratch2, 3412 Register scratch2,
3413 Register scratch3, 3413 Register scratch3,
3414 Label* gc_required) { 3414 Label* gc_required) {
3415 // Calculate the number of bytes needed for the characters in the string 3415 // Calculate the number of bytes needed for the characters in the string
3416 // while observing object alignment. 3416 // while observing object alignment.
3417 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3417 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3418 ASSERT(kCharSize == 1); 3418 DCHECK(kCharSize == 1);
3419 daddiu(scratch1, length, 3419 daddiu(scratch1, length,
3420 kObjectAlignmentMask + SeqOneByteString::kHeaderSize); 3420 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3421 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3421 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3422 3422
3423 // Allocate ASCII string in new space. 3423 // Allocate ASCII string in new space.
3424 Allocate(scratch1, 3424 Allocate(scratch1,
3425 result, 3425 result,
3426 scratch2, 3426 scratch2,
3427 scratch3, 3427 scratch3,
3428 gc_required, 3428 gc_required,
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
3553 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); 3553 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3554 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); 3554 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3555 } 3555 }
3556 3556
3557 3557
3558 // Copies a fixed number of fields of heap objects from src to dst. 3558 // Copies a fixed number of fields of heap objects from src to dst.
3559 void MacroAssembler::CopyFields(Register dst, 3559 void MacroAssembler::CopyFields(Register dst,
3560 Register src, 3560 Register src,
3561 RegList temps, 3561 RegList temps,
3562 int field_count) { 3562 int field_count) {
3563 ASSERT((temps & dst.bit()) == 0); 3563 DCHECK((temps & dst.bit()) == 0);
3564 ASSERT((temps & src.bit()) == 0); 3564 DCHECK((temps & src.bit()) == 0);
3565 // Primitive implementation using only one temporary register. 3565 // Primitive implementation using only one temporary register.
3566 3566
3567 Register tmp = no_reg; 3567 Register tmp = no_reg;
3568 // Find a temp register in temps list. 3568 // Find a temp register in temps list.
3569 for (int i = 0; i < kNumRegisters; i++) { 3569 for (int i = 0; i < kNumRegisters; i++) {
3570 if ((temps & (1 << i)) != 0) { 3570 if ((temps & (1 << i)) != 0) {
3571 tmp.code_ = i; 3571 tmp.code_ = i;
3572 break; 3572 break;
3573 } 3573 }
3574 } 3574 }
3575 ASSERT(!tmp.is(no_reg)); 3575 DCHECK(!tmp.is(no_reg));
3576 3576
3577 for (int i = 0; i < field_count; i++) { 3577 for (int i = 0; i < field_count; i++) {
3578 ld(tmp, FieldMemOperand(src, i * kPointerSize)); 3578 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3579 sd(tmp, FieldMemOperand(dst, i * kPointerSize)); 3579 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3580 } 3580 }
3581 } 3581 }
3582 3582
3583 3583
3584 void MacroAssembler::CopyBytes(Register src, 3584 void MacroAssembler::CopyBytes(Register src,
3585 Register dst, 3585 Register dst,
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
3866 Move(v0, v1, src); 3866 Move(v0, v1, src);
3867 } 3867 }
3868 } 3868 }
3869 3869
3870 3870
3871 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, 3871 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3872 DoubleRegister src2) { 3872 DoubleRegister src2) {
3873 if (!IsMipsSoftFloatABI) { 3873 if (!IsMipsSoftFloatABI) {
3874 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14; 3874 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
3875 if (src2.is(f12)) { 3875 if (src2.is(f12)) {
3876 ASSERT(!src1.is(fparg2)); 3876 DCHECK(!src1.is(fparg2));
3877 Move(fparg2, src2); 3877 Move(fparg2, src2);
3878 Move(f12, src1); 3878 Move(f12, src1);
3879 } else { 3879 } else {
3880 Move(f12, src1); 3880 Move(f12, src1);
3881 Move(fparg2, src2); 3881 Move(fparg2, src2);
3882 } 3882 }
3883 } else { 3883 } else {
3884 Move(a0, a1, src1); 3884 Move(a0, a1, src1);
3885 Move(a2, a3, src2); 3885 Move(a2, a3, src2);
3886 } 3886 }
(...skipping 17 matching lines...) Expand all
3904 3904
3905 // Check whether the expected and actual arguments count match. If not, 3905 // Check whether the expected and actual arguments count match. If not,
3906 // setup registers according to contract with ArgumentsAdaptorTrampoline: 3906 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3907 // a0: actual arguments count 3907 // a0: actual arguments count
3908 // a1: function (passed through to callee) 3908 // a1: function (passed through to callee)
3909 // a2: expected arguments count 3909 // a2: expected arguments count
3910 3910
3911 // The code below is made a lot easier because the calling code already sets 3911 // The code below is made a lot easier because the calling code already sets
3912 // up actual and expected registers according to the contract if values are 3912 // up actual and expected registers according to the contract if values are
3913 // passed in registers. 3913 // passed in registers.
3914 ASSERT(actual.is_immediate() || actual.reg().is(a0)); 3914 DCHECK(actual.is_immediate() || actual.reg().is(a0));
3915 ASSERT(expected.is_immediate() || expected.reg().is(a2)); 3915 DCHECK(expected.is_immediate() || expected.reg().is(a2));
3916 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); 3916 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3917 3917
3918 if (expected.is_immediate()) { 3918 if (expected.is_immediate()) {
3919 ASSERT(actual.is_immediate()); 3919 DCHECK(actual.is_immediate());
3920 if (expected.immediate() == actual.immediate()) { 3920 if (expected.immediate() == actual.immediate()) {
3921 definitely_matches = true; 3921 definitely_matches = true;
3922 } else { 3922 } else {
3923 li(a0, Operand(actual.immediate())); 3923 li(a0, Operand(actual.immediate()));
3924 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3924 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3925 if (expected.immediate() == sentinel) { 3925 if (expected.immediate() == sentinel) {
3926 // Don't worry about adapting arguments for builtins that 3926 // Don't worry about adapting arguments for builtins that
3927 // don't want that done. Skip adaption code by making it look 3927 // don't want that done. Skip adaption code by making it look
3928 // like we have a match between expected and actual number of 3928 // like we have a match between expected and actual number of
3929 // arguments. 3929 // arguments.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3962 } 3962 }
3963 } 3963 }
3964 3964
3965 3965
3966 void MacroAssembler::InvokeCode(Register code, 3966 void MacroAssembler::InvokeCode(Register code,
3967 const ParameterCount& expected, 3967 const ParameterCount& expected,
3968 const ParameterCount& actual, 3968 const ParameterCount& actual,
3969 InvokeFlag flag, 3969 InvokeFlag flag,
3970 const CallWrapper& call_wrapper) { 3970 const CallWrapper& call_wrapper) {
3971 // You can't call a function without a valid frame. 3971 // You can't call a function without a valid frame.
3972 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3972 DCHECK(flag == JUMP_FUNCTION || has_frame());
3973 3973
3974 Label done; 3974 Label done;
3975 3975
3976 bool definitely_mismatches = false; 3976 bool definitely_mismatches = false;
3977 InvokePrologue(expected, actual, Handle<Code>::null(), code, 3977 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3978 &done, &definitely_mismatches, flag, 3978 &done, &definitely_mismatches, flag,
3979 call_wrapper); 3979 call_wrapper);
3980 if (!definitely_mismatches) { 3980 if (!definitely_mismatches) {
3981 if (flag == CALL_FUNCTION) { 3981 if (flag == CALL_FUNCTION) {
3982 call_wrapper.BeforeCall(CallSize(code)); 3982 call_wrapper.BeforeCall(CallSize(code));
3983 Call(code); 3983 Call(code);
3984 call_wrapper.AfterCall(); 3984 call_wrapper.AfterCall();
3985 } else { 3985 } else {
3986 ASSERT(flag == JUMP_FUNCTION); 3986 DCHECK(flag == JUMP_FUNCTION);
3987 Jump(code); 3987 Jump(code);
3988 } 3988 }
3989 // Continue here if InvokePrologue does handle the invocation due to 3989 // Continue here if InvokePrologue does handle the invocation due to
3990 // mismatched parameter counts. 3990 // mismatched parameter counts.
3991 bind(&done); 3991 bind(&done);
3992 } 3992 }
3993 } 3993 }
3994 3994
3995 3995
3996 void MacroAssembler::InvokeFunction(Register function, 3996 void MacroAssembler::InvokeFunction(Register function,
3997 const ParameterCount& actual, 3997 const ParameterCount& actual,
3998 InvokeFlag flag, 3998 InvokeFlag flag,
3999 const CallWrapper& call_wrapper) { 3999 const CallWrapper& call_wrapper) {
4000 // You can't call a function without a valid frame. 4000 // You can't call a function without a valid frame.
4001 ASSERT(flag == JUMP_FUNCTION || has_frame()); 4001 DCHECK(flag == JUMP_FUNCTION || has_frame());
4002 4002
4003 // Contract with called JS functions requires that function is passed in a1. 4003 // Contract with called JS functions requires that function is passed in a1.
4004 ASSERT(function.is(a1)); 4004 DCHECK(function.is(a1));
4005 Register expected_reg = a2; 4005 Register expected_reg = a2;
4006 Register code_reg = a3; 4006 Register code_reg = a3;
4007 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 4007 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4008 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 4008 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4009 // The argument count is stored as int32_t on 64-bit platforms. 4009 // The argument count is stored as int32_t on 64-bit platforms.
4010 // TODO(plind): Smi on 32-bit platforms. 4010 // TODO(plind): Smi on 32-bit platforms.
4011 lw(expected_reg, 4011 lw(expected_reg,
4012 FieldMemOperand(code_reg, 4012 FieldMemOperand(code_reg,
4013 SharedFunctionInfo::kFormalParameterCountOffset)); 4013 SharedFunctionInfo::kFormalParameterCountOffset));
4014 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4014 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4015 ParameterCount expected(expected_reg); 4015 ParameterCount expected(expected_reg);
4016 InvokeCode(code_reg, expected, actual, flag, call_wrapper); 4016 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
4017 } 4017 }
4018 4018
4019 4019
4020 void MacroAssembler::InvokeFunction(Register function, 4020 void MacroAssembler::InvokeFunction(Register function,
4021 const ParameterCount& expected, 4021 const ParameterCount& expected,
4022 const ParameterCount& actual, 4022 const ParameterCount& actual,
4023 InvokeFlag flag, 4023 InvokeFlag flag,
4024 const CallWrapper& call_wrapper) { 4024 const CallWrapper& call_wrapper) {
4025 // You can't call a function without a valid frame. 4025 // You can't call a function without a valid frame.
4026 ASSERT(flag == JUMP_FUNCTION || has_frame()); 4026 DCHECK(flag == JUMP_FUNCTION || has_frame());
4027 4027
4028 // Contract with called JS functions requires that function is passed in a1. 4028 // Contract with called JS functions requires that function is passed in a1.
4029 ASSERT(function.is(a1)); 4029 DCHECK(function.is(a1));
4030 4030
4031 // Get the function and setup the context. 4031 // Get the function and setup the context.
4032 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 4032 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4033 4033
4034 // We call indirectly through the code field in the function to 4034 // We call indirectly through the code field in the function to
4035 // allow recompilation to take effect without changing any of the 4035 // allow recompilation to take effect without changing any of the
4036 // call sites. 4036 // call sites.
4037 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4037 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4038 InvokeCode(a3, expected, actual, flag, call_wrapper); 4038 InvokeCode(a3, expected, actual, flag, call_wrapper);
4039 } 4039 }
(...skipping 23 matching lines...) Expand all
4063 Label* fail) { 4063 Label* fail) {
4064 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 4064 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
4065 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 4065 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4066 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 4066 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4067 } 4067 }
4068 4068
4069 4069
4070 void MacroAssembler::IsObjectJSStringType(Register object, 4070 void MacroAssembler::IsObjectJSStringType(Register object,
4071 Register scratch, 4071 Register scratch,
4072 Label* fail) { 4072 Label* fail) {
4073 ASSERT(kNotStringTag != 0); 4073 DCHECK(kNotStringTag != 0);
4074 4074
4075 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4075 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4076 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 4076 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4077 And(scratch, scratch, Operand(kIsNotStringMask)); 4077 And(scratch, scratch, Operand(kIsNotStringMask));
4078 Branch(fail, ne, scratch, Operand(zero_reg)); 4078 Branch(fail, ne, scratch, Operand(zero_reg));
4079 } 4079 }
4080 4080
4081 4081
4082 void MacroAssembler::IsObjectNameType(Register object, 4082 void MacroAssembler::IsObjectNameType(Register object,
4083 Register scratch, 4083 Register scratch,
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
4162 4162
4163 // ----------------------------------------------------------------------------- 4163 // -----------------------------------------------------------------------------
4164 // Runtime calls. 4164 // Runtime calls.
4165 4165
4166 void MacroAssembler::CallStub(CodeStub* stub, 4166 void MacroAssembler::CallStub(CodeStub* stub,
4167 TypeFeedbackId ast_id, 4167 TypeFeedbackId ast_id,
4168 Condition cond, 4168 Condition cond,
4169 Register r1, 4169 Register r1,
4170 const Operand& r2, 4170 const Operand& r2,
4171 BranchDelaySlot bd) { 4171 BranchDelaySlot bd) {
4172 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 4172 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
4173 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, 4173 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4174 cond, r1, r2, bd); 4174 cond, r1, r2, bd);
4175 } 4175 }
4176 4176
4177 4177
4178 void MacroAssembler::TailCallStub(CodeStub* stub, 4178 void MacroAssembler::TailCallStub(CodeStub* stub,
4179 Condition cond, 4179 Condition cond,
4180 Register r1, 4180 Register r1,
4181 const Operand& r2, 4181 const Operand& r2,
4182 BranchDelaySlot bd) { 4182 BranchDelaySlot bd) {
4183 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); 4183 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4184 } 4184 }
4185 4185
4186 4186
4187 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { 4187 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4188 int64_t offset = (ref0.address() - ref1.address()); 4188 int64_t offset = (ref0.address() - ref1.address());
4189 ASSERT(static_cast<int>(offset) == offset); 4189 DCHECK(static_cast<int>(offset) == offset);
4190 return static_cast<int>(offset); 4190 return static_cast<int>(offset);
4191 } 4191 }
4192 4192
4193 4193
4194 void MacroAssembler::CallApiFunctionAndReturn( 4194 void MacroAssembler::CallApiFunctionAndReturn(
4195 Register function_address, 4195 Register function_address,
4196 ExternalReference thunk_ref, 4196 ExternalReference thunk_ref,
4197 int stack_space, 4197 int stack_space,
4198 MemOperand return_value_operand, 4198 MemOperand return_value_operand,
4199 MemOperand* context_restore_operand) { 4199 MemOperand* context_restore_operand) {
4200 ExternalReference next_address = 4200 ExternalReference next_address =
4201 ExternalReference::handle_scope_next_address(isolate()); 4201 ExternalReference::handle_scope_next_address(isolate());
4202 const int kNextOffset = 0; 4202 const int kNextOffset = 0;
4203 const int kLimitOffset = AddressOffset( 4203 const int kLimitOffset = AddressOffset(
4204 ExternalReference::handle_scope_limit_address(isolate()), 4204 ExternalReference::handle_scope_limit_address(isolate()),
4205 next_address); 4205 next_address);
4206 const int kLevelOffset = AddressOffset( 4206 const int kLevelOffset = AddressOffset(
4207 ExternalReference::handle_scope_level_address(isolate()), 4207 ExternalReference::handle_scope_level_address(isolate()),
4208 next_address); 4208 next_address);
4209 4209
4210 ASSERT(function_address.is(a1) || function_address.is(a2)); 4210 DCHECK(function_address.is(a1) || function_address.is(a2));
4211 4211
4212 Label profiler_disabled; 4212 Label profiler_disabled;
4213 Label end_profiler_check; 4213 Label end_profiler_check;
4214 li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); 4214 li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
4215 lb(t9, MemOperand(t9, 0)); 4215 lb(t9, MemOperand(t9, 0));
4216 Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); 4216 Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
4217 4217
4218 // Additional parameter is the address of the actual callback. 4218 // Additional parameter is the address of the actual callback.
4219 li(t9, Operand(thunk_ref)); 4219 li(t9, Operand(thunk_ref));
4220 jmp(&end_profiler_check); 4220 jmp(&end_profiler_check);
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
4318 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 4318 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4319 return has_frame_ || !stub->SometimesSetsUpAFrame(); 4319 return has_frame_ || !stub->SometimesSetsUpAFrame();
4320 } 4320 }
4321 4321
4322 4322
4323 void MacroAssembler::IndexFromHash(Register hash, Register index) { 4323 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4324 // If the hash field contains an array index pick it out. The assert checks 4324 // If the hash field contains an array index pick it out. The assert checks
4325 // that the constants for the maximum number of digits for an array index 4325 // that the constants for the maximum number of digits for an array index
4326 // cached in the hash field and the number of bits reserved for it does not 4326 // cached in the hash field and the number of bits reserved for it does not
4327 // conflict. 4327 // conflict.
4328 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 4328 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4329 (1 << String::kArrayIndexValueBits)); 4329 (1 << String::kArrayIndexValueBits));
4330 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); 4330 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4331 } 4331 }
4332 4332
4333 4333
4334 void MacroAssembler::ObjectToDoubleFPURegister(Register object, 4334 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4335 FPURegister result, 4335 FPURegister result,
4336 Register scratch1, 4336 Register scratch1,
4337 Register scratch2, 4337 Register scratch2,
4338 Register heap_number_map, 4338 Register heap_number_map,
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
4377 mtc1(scratch1, value); 4377 mtc1(scratch1, value);
4378 cvt_d_w(value, value); 4378 cvt_d_w(value, value);
4379 } 4379 }
4380 4380
4381 4381
4382 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 4382 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4383 Register left, 4383 Register left,
4384 Register right, 4384 Register right,
4385 Register overflow_dst, 4385 Register overflow_dst,
4386 Register scratch) { 4386 Register scratch) {
4387 ASSERT(!dst.is(overflow_dst)); 4387 DCHECK(!dst.is(overflow_dst));
4388 ASSERT(!dst.is(scratch)); 4388 DCHECK(!dst.is(scratch));
4389 ASSERT(!overflow_dst.is(scratch)); 4389 DCHECK(!overflow_dst.is(scratch));
4390 ASSERT(!overflow_dst.is(left)); 4390 DCHECK(!overflow_dst.is(left));
4391 ASSERT(!overflow_dst.is(right)); 4391 DCHECK(!overflow_dst.is(right));
4392 4392
4393 if (left.is(right) && dst.is(left)) { 4393 if (left.is(right) && dst.is(left)) {
4394 ASSERT(!dst.is(t9)); 4394 DCHECK(!dst.is(t9));
4395 ASSERT(!scratch.is(t9)); 4395 DCHECK(!scratch.is(t9));
4396 ASSERT(!left.is(t9)); 4396 DCHECK(!left.is(t9));
4397 ASSERT(!right.is(t9)); 4397 DCHECK(!right.is(t9));
4398 ASSERT(!overflow_dst.is(t9)); 4398 DCHECK(!overflow_dst.is(t9));
4399 mov(t9, right); 4399 mov(t9, right);
4400 right = t9; 4400 right = t9;
4401 } 4401 }
4402 4402
4403 if (dst.is(left)) { 4403 if (dst.is(left)) {
4404 mov(scratch, left); // Preserve left. 4404 mov(scratch, left); // Preserve left.
4405 daddu(dst, left, right); // Left is overwritten. 4405 daddu(dst, left, right); // Left is overwritten.
4406 xor_(scratch, dst, scratch); // Original left. 4406 xor_(scratch, dst, scratch); // Original left.
4407 xor_(overflow_dst, dst, right); 4407 xor_(overflow_dst, dst, right);
4408 and_(overflow_dst, overflow_dst, scratch); 4408 and_(overflow_dst, overflow_dst, scratch);
(...skipping 10 matching lines...) Expand all
4419 and_(overflow_dst, scratch, overflow_dst); 4419 and_(overflow_dst, scratch, overflow_dst);
4420 } 4420 }
4421 } 4421 }
4422 4422
4423 4423
4424 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 4424 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4425 Register left, 4425 Register left,
4426 Register right, 4426 Register right,
4427 Register overflow_dst, 4427 Register overflow_dst,
4428 Register scratch) { 4428 Register scratch) {
4429 ASSERT(!dst.is(overflow_dst)); 4429 DCHECK(!dst.is(overflow_dst));
4430 ASSERT(!dst.is(scratch)); 4430 DCHECK(!dst.is(scratch));
4431 ASSERT(!overflow_dst.is(scratch)); 4431 DCHECK(!overflow_dst.is(scratch));
4432 ASSERT(!overflow_dst.is(left)); 4432 DCHECK(!overflow_dst.is(left));
4433 ASSERT(!overflow_dst.is(right)); 4433 DCHECK(!overflow_dst.is(right));
4434 ASSERT(!scratch.is(left)); 4434 DCHECK(!scratch.is(left));
4435 ASSERT(!scratch.is(right)); 4435 DCHECK(!scratch.is(right));
4436 4436
4437 // This happens with some crankshaft code. Since Subu works fine if 4437 // This happens with some crankshaft code. Since Subu works fine if
4438 // left == right, let's not make that restriction here. 4438 // left == right, let's not make that restriction here.
4439 if (left.is(right)) { 4439 if (left.is(right)) {
4440 mov(dst, zero_reg); 4440 mov(dst, zero_reg);
4441 mov(overflow_dst, zero_reg); 4441 mov(overflow_dst, zero_reg);
4442 return; 4442 return;
4443 } 4443 }
4444 4444
4445 if (dst.is(left)) { 4445 if (dst.is(left)) {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
4526 zero_reg, 4526 zero_reg,
4527 Operand(zero_reg), 4527 Operand(zero_reg),
4528 bd); 4528 bd);
4529 } 4529 }
4530 4530
4531 4531
4532 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 4532 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4533 InvokeFlag flag, 4533 InvokeFlag flag,
4534 const CallWrapper& call_wrapper) { 4534 const CallWrapper& call_wrapper) {
4535 // You can't call a builtin without a valid frame. 4535 // You can't call a builtin without a valid frame.
4536 ASSERT(flag == JUMP_FUNCTION || has_frame()); 4536 DCHECK(flag == JUMP_FUNCTION || has_frame());
4537 4537
4538 GetBuiltinEntry(t9, id); 4538 GetBuiltinEntry(t9, id);
4539 if (flag == CALL_FUNCTION) { 4539 if (flag == CALL_FUNCTION) {
4540 call_wrapper.BeforeCall(CallSize(t9)); 4540 call_wrapper.BeforeCall(CallSize(t9));
4541 Call(t9); 4541 Call(t9);
4542 call_wrapper.AfterCall(); 4542 call_wrapper.AfterCall();
4543 } else { 4543 } else {
4544 ASSERT(flag == JUMP_FUNCTION); 4544 DCHECK(flag == JUMP_FUNCTION);
4545 Jump(t9); 4545 Jump(t9);
4546 } 4546 }
4547 } 4547 }
4548 4548
4549 4549
4550 void MacroAssembler::GetBuiltinFunction(Register target, 4550 void MacroAssembler::GetBuiltinFunction(Register target,
4551 Builtins::JavaScript id) { 4551 Builtins::JavaScript id) {
4552 // Load the builtins object into target register. 4552 // Load the builtins object into target register.
4553 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4553 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4554 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); 4554 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4555 // Load the JavaScript builtin function from the builtins object. 4555 // Load the JavaScript builtin function from the builtins object.
4556 ld(target, FieldMemOperand(target, 4556 ld(target, FieldMemOperand(target,
4557 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 4557 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4558 } 4558 }
4559 4559
4560 4560
4561 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 4561 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4562 ASSERT(!target.is(a1)); 4562 DCHECK(!target.is(a1));
4563 GetBuiltinFunction(a1, id); 4563 GetBuiltinFunction(a1, id);
4564 // Load the code entry point from the builtins object. 4564 // Load the code entry point from the builtins object.
4565 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4565 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4566 } 4566 }
4567 4567
4568 4568
4569 void MacroAssembler::SetCounter(StatsCounter* counter, int value, 4569 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4570 Register scratch1, Register scratch2) { 4570 Register scratch1, Register scratch2) {
4571 if (FLAG_native_code_counters && counter->Enabled()) { 4571 if (FLAG_native_code_counters && counter->Enabled()) {
4572 li(scratch1, Operand(value)); 4572 li(scratch1, Operand(value));
4573 li(scratch2, Operand(ExternalReference(counter))); 4573 li(scratch2, Operand(ExternalReference(counter)));
4574 sd(scratch1, MemOperand(scratch2)); 4574 sd(scratch1, MemOperand(scratch2));
4575 } 4575 }
4576 } 4576 }
4577 4577
4578 4578
4579 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 4579 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4580 Register scratch1, Register scratch2) { 4580 Register scratch1, Register scratch2) {
4581 ASSERT(value > 0); 4581 DCHECK(value > 0);
4582 if (FLAG_native_code_counters && counter->Enabled()) { 4582 if (FLAG_native_code_counters && counter->Enabled()) {
4583 li(scratch2, Operand(ExternalReference(counter))); 4583 li(scratch2, Operand(ExternalReference(counter)));
4584 ld(scratch1, MemOperand(scratch2)); 4584 ld(scratch1, MemOperand(scratch2));
4585 Daddu(scratch1, scratch1, Operand(value)); 4585 Daddu(scratch1, scratch1, Operand(value));
4586 sd(scratch1, MemOperand(scratch2)); 4586 sd(scratch1, MemOperand(scratch2));
4587 } 4587 }
4588 } 4588 }
4589 4589
4590 4590
4591 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 4591 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4592 Register scratch1, Register scratch2) { 4592 Register scratch1, Register scratch2) {
4593 ASSERT(value > 0); 4593 DCHECK(value > 0);
4594 if (FLAG_native_code_counters && counter->Enabled()) { 4594 if (FLAG_native_code_counters && counter->Enabled()) {
4595 li(scratch2, Operand(ExternalReference(counter))); 4595 li(scratch2, Operand(ExternalReference(counter)));
4596 ld(scratch1, MemOperand(scratch2)); 4596 ld(scratch1, MemOperand(scratch2));
4597 Dsubu(scratch1, scratch1, Operand(value)); 4597 Dsubu(scratch1, scratch1, Operand(value));
4598 sd(scratch1, MemOperand(scratch2)); 4598 sd(scratch1, MemOperand(scratch2));
4599 } 4599 }
4600 } 4600 }
4601 4601
4602 4602
4603 // ----------------------------------------------------------------------------- 4603 // -----------------------------------------------------------------------------
4604 // Debugging. 4604 // Debugging.
4605 4605
4606 void MacroAssembler::Assert(Condition cc, BailoutReason reason, 4606 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4607 Register rs, Operand rt) { 4607 Register rs, Operand rt) {
4608 if (emit_debug_code()) 4608 if (emit_debug_code())
4609 Check(cc, reason, rs, rt); 4609 Check(cc, reason, rs, rt);
4610 } 4610 }
4611 4611
4612 4612
4613 void MacroAssembler::AssertFastElements(Register elements) { 4613 void MacroAssembler::AssertFastElements(Register elements) {
4614 if (emit_debug_code()) { 4614 if (emit_debug_code()) {
4615 ASSERT(!elements.is(at)); 4615 DCHECK(!elements.is(at));
4616 Label ok; 4616 Label ok;
4617 push(elements); 4617 push(elements);
4618 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 4618 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4619 LoadRoot(at, Heap::kFixedArrayMapRootIndex); 4619 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4620 Branch(&ok, eq, elements, Operand(at)); 4620 Branch(&ok, eq, elements, Operand(at));
4621 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); 4621 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4622 Branch(&ok, eq, elements, Operand(at)); 4622 Branch(&ok, eq, elements, Operand(at));
4623 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); 4623 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4624 Branch(&ok, eq, elements, Operand(at)); 4624 Branch(&ok, eq, elements, Operand(at));
4625 Abort(kJSObjectWithFastElementsMapHasSlowElements); 4625 Abort(kJSObjectWithFastElementsMapHasSlowElements);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4668 } 4668 }
4669 // Will not return here. 4669 // Will not return here.
4670 if (is_trampoline_pool_blocked()) { 4670 if (is_trampoline_pool_blocked()) {
4671 // If the calling code cares about the exact number of 4671 // If the calling code cares about the exact number of
4672 // instructions generated, we insert padding here to keep the size 4672 // instructions generated, we insert padding here to keep the size
4673 // of the Abort macro constant. 4673 // of the Abort macro constant.
4674 // Currently in debug mode with debug_code enabled the number of 4674 // Currently in debug mode with debug_code enabled the number of
4675 // generated instructions is 10, so we use this as a maximum value. 4675 // generated instructions is 10, so we use this as a maximum value.
4676 static const int kExpectedAbortInstructions = 10; 4676 static const int kExpectedAbortInstructions = 10;
4677 int abort_instructions = InstructionsGeneratedSince(&abort_start); 4677 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4678 ASSERT(abort_instructions <= kExpectedAbortInstructions); 4678 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4679 while (abort_instructions++ < kExpectedAbortInstructions) { 4679 while (abort_instructions++ < kExpectedAbortInstructions) {
4680 nop(); 4680 nop();
4681 } 4681 }
4682 } 4682 }
4683 } 4683 }
4684 4684
4685 4685
4686 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 4686 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4687 if (context_chain_length > 0) { 4687 if (context_chain_length > 0) {
4688 // Move up the chain of contexts to the context containing the slot. 4688 // Move up the chain of contexts to the context containing the slot.
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
4859 // Remember: we only need to save every 2nd double FPU value. 4859 // Remember: we only need to save every 2nd double FPU value.
4860 for (int i = 0; i < kNumOfSavedRegisters; i++) { 4860 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4861 FPURegister reg = FPURegister::from_code(2 * i); 4861 FPURegister reg = FPURegister::from_code(2 * i);
4862 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 4862 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4863 } 4863 }
4864 } 4864 }
4865 4865
4866 // Reserve place for the return address, stack space and an optional slot 4866 // Reserve place for the return address, stack space and an optional slot
4867 // (used by the DirectCEntryStub to hold the return value if a struct is 4867 // (used by the DirectCEntryStub to hold the return value if a struct is
4868 // returned) and align the frame preparing for calling the runtime function. 4868 // returned) and align the frame preparing for calling the runtime function.
4869 ASSERT(stack_space >= 0); 4869 DCHECK(stack_space >= 0);
4870 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); 4870 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4871 if (frame_alignment > 0) { 4871 if (frame_alignment > 0) {
4872 ASSERT(IsPowerOf2(frame_alignment)); 4872 DCHECK(IsPowerOf2(frame_alignment));
4873 And(sp, sp, Operand(-frame_alignment)); // Align stack. 4873 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4874 } 4874 }
4875 4875
4876 // Set the exit frame sp value to point just before the return address 4876 // Set the exit frame sp value to point just before the return address
4877 // location. 4877 // location.
4878 daddiu(at, sp, kPointerSize); 4878 daddiu(at, sp, kPointerSize);
4879 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); 4879 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4880 } 4880 }
4881 4881
4882 4882
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
4960 } 4960 }
4961 4961
4962 4962
4963 void MacroAssembler::AssertStackIsAligned() { 4963 void MacroAssembler::AssertStackIsAligned() {
4964 if (emit_debug_code()) { 4964 if (emit_debug_code()) {
4965 const int frame_alignment = ActivationFrameAlignment(); 4965 const int frame_alignment = ActivationFrameAlignment();
4966 const int frame_alignment_mask = frame_alignment - 1; 4966 const int frame_alignment_mask = frame_alignment - 1;
4967 4967
4968 if (frame_alignment > kPointerSize) { 4968 if (frame_alignment > kPointerSize) {
4969 Label alignment_as_expected; 4969 Label alignment_as_expected;
4970 ASSERT(IsPowerOf2(frame_alignment)); 4970 DCHECK(IsPowerOf2(frame_alignment));
4971 andi(at, sp, frame_alignment_mask); 4971 andi(at, sp, frame_alignment_mask);
4972 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 4972 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4973 // Don't use Check here, as it will call Runtime_Abort re-entering here. 4973 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4974 stop("Unexpected stack alignment"); 4974 stop("Unexpected stack alignment");
4975 bind(&alignment_as_expected); 4975 bind(&alignment_as_expected);
4976 } 4976 }
4977 } 4977 }
4978 } 4978 }
4979 4979
4980 4980
4981 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( 4981 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4982 Register reg, 4982 Register reg,
4983 Register scratch, 4983 Register scratch,
4984 Label* not_power_of_two_or_zero) { 4984 Label* not_power_of_two_or_zero) {
4985 Dsubu(scratch, reg, Operand(1)); 4985 Dsubu(scratch, reg, Operand(1));
4986 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, 4986 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4987 scratch, Operand(zero_reg)); 4987 scratch, Operand(zero_reg));
4988 and_(at, scratch, reg); // In the delay slot. 4988 and_(at, scratch, reg); // In the delay slot.
4989 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); 4989 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4990 } 4990 }
4991 4991
4992 4992
4993 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { 4993 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4994 ASSERT(!reg.is(overflow)); 4994 DCHECK(!reg.is(overflow));
4995 mov(overflow, reg); // Save original value. 4995 mov(overflow, reg); // Save original value.
4996 SmiTag(reg); 4996 SmiTag(reg);
4997 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. 4997 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4998 } 4998 }
4999 4999
5000 5000
5001 void MacroAssembler::SmiTagCheckOverflow(Register dst, 5001 void MacroAssembler::SmiTagCheckOverflow(Register dst,
5002 Register src, 5002 Register src,
5003 Register overflow) { 5003 Register overflow) {
5004 if (dst.is(src)) { 5004 if (dst.is(src)) {
5005 // Fall back to slower case. 5005 // Fall back to slower case.
5006 SmiTagCheckOverflow(dst, overflow); 5006 SmiTagCheckOverflow(dst, overflow);
5007 } else { 5007 } else {
5008 ASSERT(!dst.is(src)); 5008 DCHECK(!dst.is(src));
5009 ASSERT(!dst.is(overflow)); 5009 DCHECK(!dst.is(overflow));
5010 ASSERT(!src.is(overflow)); 5010 DCHECK(!src.is(overflow));
5011 SmiTag(dst, src); 5011 SmiTag(dst, src);
5012 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. 5012 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
5013 } 5013 }
5014 } 5014 }
5015 5015
5016 5016
5017 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { 5017 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5018 if (SmiValuesAre32Bits()) { 5018 if (SmiValuesAre32Bits()) {
5019 lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); 5019 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5020 } else { 5020 } else {
5021 lw(dst, src); 5021 lw(dst, src);
5022 SmiUntag(dst); 5022 SmiUntag(dst);
5023 } 5023 }
5024 } 5024 }
5025 5025
5026 5026
5027 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { 5027 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5028 if (SmiValuesAre32Bits()) { 5028 if (SmiValuesAre32Bits()) {
5029 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark. 5029 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5030 lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); 5030 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5031 dsll(dst, dst, scale); 5031 dsll(dst, dst, scale);
5032 } else { 5032 } else {
5033 lw(dst, src); 5033 lw(dst, src);
5034 ASSERT(scale >= kSmiTagSize); 5034 DCHECK(scale >= kSmiTagSize);
5035 sll(dst, dst, scale - kSmiTagSize); 5035 sll(dst, dst, scale - kSmiTagSize);
5036 } 5036 }
5037 } 5037 }
5038 5038
5039 5039
5040 // Returns 2 values: the Smi and a scaled version of the int within the Smi. 5040 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
5041 void MacroAssembler::SmiLoadWithScale(Register d_smi, 5041 void MacroAssembler::SmiLoadWithScale(Register d_smi,
5042 Register d_scaled, 5042 Register d_scaled,
5043 MemOperand src, 5043 MemOperand src,
5044 int scale) { 5044 int scale) {
5045 if (SmiValuesAre32Bits()) { 5045 if (SmiValuesAre32Bits()) {
5046 ld(d_smi, src); 5046 ld(d_smi, src);
5047 dsra(d_scaled, d_smi, kSmiShift - scale); 5047 dsra(d_scaled, d_smi, kSmiShift - scale);
5048 } else { 5048 } else {
5049 lw(d_smi, src); 5049 lw(d_smi, src);
5050 ASSERT(scale >= kSmiTagSize); 5050 DCHECK(scale >= kSmiTagSize);
5051 sll(d_scaled, d_smi, scale - kSmiTagSize); 5051 sll(d_scaled, d_smi, scale - kSmiTagSize);
5052 } 5052 }
5053 } 5053 }
5054 5054
5055 5055
5056 // Returns 2 values: the untagged Smi (int32) and scaled version of that int. 5056 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5057 void MacroAssembler::SmiLoadUntagWithScale(Register d_int, 5057 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5058 Register d_scaled, 5058 Register d_scaled,
5059 MemOperand src, 5059 MemOperand src,
5060 int scale) { 5060 int scale) {
5061 if (SmiValuesAre32Bits()) { 5061 if (SmiValuesAre32Bits()) {
5062 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); 5062 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5063 dsll(d_scaled, d_int, scale); 5063 dsll(d_scaled, d_int, scale);
5064 } else { 5064 } else {
5065 lw(d_int, src); 5065 lw(d_int, src);
5066 // Need both the int and the scaled in, so use two instructions. 5066 // Need both the int and the scaled in, so use two instructions.
5067 SmiUntag(d_int); 5067 SmiUntag(d_int);
5068 sll(d_scaled, d_int, scale); 5068 sll(d_scaled, d_int, scale);
5069 } 5069 }
5070 } 5070 }
5071 5071
5072 5072
5073 void MacroAssembler::UntagAndJumpIfSmi(Register dst, 5073 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5074 Register src, 5074 Register src,
5075 Label* smi_case) { 5075 Label* smi_case) {
5076 // ASSERT(!dst.is(src)); 5076 // DCHECK(!dst.is(src));
5077 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); 5077 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5078 SmiUntag(dst, src); 5078 SmiUntag(dst, src);
5079 } 5079 }
5080 5080
5081 5081
5082 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, 5082 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
5083 Register src, 5083 Register src,
5084 Label* non_smi_case) { 5084 Label* non_smi_case) {
5085 // ASSERT(!dst.is(src)); 5085 // DCHECK(!dst.is(src));
5086 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); 5086 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
5087 SmiUntag(dst, src); 5087 SmiUntag(dst, src);
5088 } 5088 }
5089 5089
5090 void MacroAssembler::JumpIfSmi(Register value, 5090 void MacroAssembler::JumpIfSmi(Register value,
5091 Label* smi_label, 5091 Label* smi_label,
5092 Register scratch, 5092 Register scratch,
5093 BranchDelaySlot bd) { 5093 BranchDelaySlot bd) {
5094 ASSERT_EQ(0, kSmiTag); 5094 DCHECK_EQ(0, kSmiTag);
5095 andi(scratch, value, kSmiTagMask); 5095 andi(scratch, value, kSmiTagMask);
5096 Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); 5096 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5097 } 5097 }
5098 5098
5099 void MacroAssembler::JumpIfNotSmi(Register value, 5099 void MacroAssembler::JumpIfNotSmi(Register value,
5100 Label* not_smi_label, 5100 Label* not_smi_label,
5101 Register scratch, 5101 Register scratch,
5102 BranchDelaySlot bd) { 5102 BranchDelaySlot bd) {
5103 ASSERT_EQ(0, kSmiTag); 5103 DCHECK_EQ(0, kSmiTag);
5104 andi(scratch, value, kSmiTagMask); 5104 andi(scratch, value, kSmiTagMask);
5105 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); 5105 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5106 } 5106 }
5107 5107
5108 5108
5109 void MacroAssembler::JumpIfNotBothSmi(Register reg1, 5109 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5110 Register reg2, 5110 Register reg2,
5111 Label* on_not_both_smi) { 5111 Label* on_not_both_smi) {
5112 STATIC_ASSERT(kSmiTag == 0); 5112 STATIC_ASSERT(kSmiTag == 0);
5113 // TODO(plind): Find some better to fix this assert issue. 5113 // TODO(plind): Find some better to fix this assert issue.
5114 #if defined(__APPLE__) 5114 #if defined(__APPLE__)
5115 ASSERT_EQ(1, kSmiTagMask); 5115 DCHECK_EQ(1, kSmiTagMask);
5116 #else 5116 #else
5117 ASSERT_EQ((uint64_t)1, kSmiTagMask); 5117 DCHECK_EQ((uint64_t)1, kSmiTagMask);
5118 #endif 5118 #endif
5119 or_(at, reg1, reg2); 5119 or_(at, reg1, reg2);
5120 JumpIfNotSmi(at, on_not_both_smi); 5120 JumpIfNotSmi(at, on_not_both_smi);
5121 } 5121 }
5122 5122
5123 5123
5124 void MacroAssembler::JumpIfEitherSmi(Register reg1, 5124 void MacroAssembler::JumpIfEitherSmi(Register reg1,
5125 Register reg2, 5125 Register reg2,
5126 Label* on_either_smi) { 5126 Label* on_either_smi) {
5127 STATIC_ASSERT(kSmiTag == 0); 5127 STATIC_ASSERT(kSmiTag == 0);
5128 // TODO(plind): Find some better to fix this assert issue. 5128 // TODO(plind): Find some better to fix this assert issue.
5129 #if defined(__APPLE__) 5129 #if defined(__APPLE__)
5130 ASSERT_EQ(1, kSmiTagMask); 5130 DCHECK_EQ(1, kSmiTagMask);
5131 #else 5131 #else
5132 ASSERT_EQ((uint64_t)1, kSmiTagMask); 5132 DCHECK_EQ((uint64_t)1, kSmiTagMask);
5133 #endif 5133 #endif
5134 // Both Smi tags must be 1 (not Smi). 5134 // Both Smi tags must be 1 (not Smi).
5135 and_(at, reg1, reg2); 5135 and_(at, reg1, reg2);
5136 JumpIfSmi(at, on_either_smi); 5136 JumpIfSmi(at, on_either_smi);
5137 } 5137 }
5138 5138
5139 5139
5140 void MacroAssembler::AssertNotSmi(Register object) { 5140 void MacroAssembler::AssertNotSmi(Register object) {
5141 if (emit_debug_code()) { 5141 if (emit_debug_code()) {
5142 STATIC_ASSERT(kSmiTag == 0); 5142 STATIC_ASSERT(kSmiTag == 0);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
5195 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); 5195 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
5196 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); 5196 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
5197 pop(object); 5197 pop(object);
5198 bind(&done_checking); 5198 bind(&done_checking);
5199 } 5199 }
5200 } 5200 }
5201 5201
5202 5202
5203 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { 5203 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
5204 if (emit_debug_code()) { 5204 if (emit_debug_code()) {
5205 ASSERT(!reg.is(at)); 5205 DCHECK(!reg.is(at));
5206 LoadRoot(at, index); 5206 LoadRoot(at, index);
5207 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); 5207 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
5208 } 5208 }
5209 } 5209 }
5210 5210
5211 5211
5212 void MacroAssembler::JumpIfNotHeapNumber(Register object, 5212 void MacroAssembler::JumpIfNotHeapNumber(Register object,
5213 Register heap_number_map, 5213 Register heap_number_map,
5214 Register scratch, 5214 Register scratch,
5215 Label* on_not_heap_number) { 5215 Label* on_not_heap_number) {
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
5342 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( 5342 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5343 Register first, 5343 Register first,
5344 Register second, 5344 Register second,
5345 Register scratch1, 5345 Register scratch1,
5346 Register scratch2, 5346 Register scratch2,
5347 Label* failure) { 5347 Label* failure) {
5348 const int kFlatAsciiStringMask = 5348 const int kFlatAsciiStringMask =
5349 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 5349 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5350 const int kFlatAsciiStringTag = 5350 const int kFlatAsciiStringTag =
5351 kStringTag | kOneByteStringTag | kSeqStringTag; 5351 kStringTag | kOneByteStringTag | kSeqStringTag;
5352 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. 5352 DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5353 andi(scratch1, first, kFlatAsciiStringMask); 5353 andi(scratch1, first, kFlatAsciiStringMask);
5354 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); 5354 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5355 andi(scratch2, second, kFlatAsciiStringMask); 5355 andi(scratch2, second, kFlatAsciiStringMask);
5356 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag)); 5356 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5357 } 5357 }
5358 5358
5359 5359
5360 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, 5360 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5361 Register scratch, 5361 Register scratch,
5362 Label* failure) { 5362 Label* failure) {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
5400 5400
5401 andi(at, at, kStringRepresentationMask | kStringEncodingMask); 5401 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5402 li(scratch, Operand(encoding_mask)); 5402 li(scratch, Operand(encoding_mask));
5403 Check(eq, kUnexpectedStringType, at, Operand(scratch)); 5403 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5404 5404
5405 // TODO(plind): requires Smi size check code for mips32. 5405 // TODO(plind): requires Smi size check code for mips32.
5406 5406
5407 ld(at, FieldMemOperand(string, String::kLengthOffset)); 5407 ld(at, FieldMemOperand(string, String::kLengthOffset));
5408 Check(lt, kIndexIsTooLarge, index, Operand(at)); 5408 Check(lt, kIndexIsTooLarge, index, Operand(at));
5409 5409
5410 ASSERT(Smi::FromInt(0) == 0); 5410 DCHECK(Smi::FromInt(0) == 0);
5411 Check(ge, kIndexIsNegative, index, Operand(zero_reg)); 5411 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5412 } 5412 }
5413 5413
5414 5414
5415 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5415 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5416 int num_double_arguments, 5416 int num_double_arguments,
5417 Register scratch) { 5417 Register scratch) {
5418 int frame_alignment = ActivationFrameAlignment(); 5418 int frame_alignment = ActivationFrameAlignment();
5419 5419
5420 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. 5420 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5421 // O32: Up to four simple arguments are passed in registers a0..a3. 5421 // O32: Up to four simple arguments are passed in registers a0..a3.
5422 // Those four arguments must have reserved argument slots on the stack for 5422 // Those four arguments must have reserved argument slots on the stack for
5423 // mips, even though those argument slots are not normally used. 5423 // mips, even though those argument slots are not normally used.
5424 // Both ABIs: Remaining arguments are pushed on the stack, above (higher 5424 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5425 // address than) the (O32) argument slots. (arg slot calculation handled by 5425 // address than) the (O32) argument slots. (arg slot calculation handled by
5426 // CalculateStackPassedWords()). 5426 // CalculateStackPassedWords()).
5427 int stack_passed_arguments = CalculateStackPassedWords( 5427 int stack_passed_arguments = CalculateStackPassedWords(
5428 num_reg_arguments, num_double_arguments); 5428 num_reg_arguments, num_double_arguments);
5429 if (frame_alignment > kPointerSize) { 5429 if (frame_alignment > kPointerSize) {
5430 // Make stack end at alignment and make room for num_arguments - 4 words 5430 // Make stack end at alignment and make room for num_arguments - 4 words
5431 // and the original value of sp. 5431 // and the original value of sp.
5432 mov(scratch, sp); 5432 mov(scratch, sp);
5433 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 5433 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5434 ASSERT(IsPowerOf2(frame_alignment)); 5434 DCHECK(IsPowerOf2(frame_alignment));
5435 And(sp, sp, Operand(-frame_alignment)); 5435 And(sp, sp, Operand(-frame_alignment));
5436 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 5436 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5437 } else { 5437 } else {
5438 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 5438 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5439 } 5439 }
5440 } 5440 }
5441 5441
5442 5442
5443 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5443 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5444 Register scratch) { 5444 Register scratch) {
(...skipping 24 matching lines...) Expand all
5469 5469
5470 void MacroAssembler::CallCFunction(Register function, 5470 void MacroAssembler::CallCFunction(Register function,
5471 int num_arguments) { 5471 int num_arguments) {
5472 CallCFunction(function, num_arguments, 0); 5472 CallCFunction(function, num_arguments, 0);
5473 } 5473 }
5474 5474
5475 5475
5476 void MacroAssembler::CallCFunctionHelper(Register function, 5476 void MacroAssembler::CallCFunctionHelper(Register function,
5477 int num_reg_arguments, 5477 int num_reg_arguments,
5478 int num_double_arguments) { 5478 int num_double_arguments) {
5479 ASSERT(has_frame()); 5479 DCHECK(has_frame());
5480 // Make sure that the stack is aligned before calling a C function unless 5480 // Make sure that the stack is aligned before calling a C function unless
5481 // running in the simulator. The simulator has its own alignment check which 5481 // running in the simulator. The simulator has its own alignment check which
5482 // provides more information. 5482 // provides more information.
5483 // The argument stots are presumed to have been set up by 5483 // The argument stots are presumed to have been set up by
5484 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 5484 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5485 5485
5486 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 5486 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5487 if (emit_debug_code()) { 5487 if (emit_debug_code()) {
5488 int frame_alignment = base::OS::ActivationFrameAlignment(); 5488 int frame_alignment = base::OS::ActivationFrameAlignment();
5489 int frame_alignment_mask = frame_alignment - 1; 5489 int frame_alignment_mask = frame_alignment - 1;
5490 if (frame_alignment > kPointerSize) { 5490 if (frame_alignment > kPointerSize) {
5491 ASSERT(IsPowerOf2(frame_alignment)); 5491 DCHECK(IsPowerOf2(frame_alignment));
5492 Label alignment_as_expected; 5492 Label alignment_as_expected;
5493 And(at, sp, Operand(frame_alignment_mask)); 5493 And(at, sp, Operand(frame_alignment_mask));
5494 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 5494 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5495 // Don't use Check here, as it will call Runtime_Abort possibly 5495 // Don't use Check here, as it will call Runtime_Abort possibly
5496 // re-entering here. 5496 // re-entering here.
5497 stop("Unexpected alignment in CallCFunction"); 5497 stop("Unexpected alignment in CallCFunction");
5498 bind(&alignment_as_expected); 5498 bind(&alignment_as_expected);
5499 } 5499 }
5500 } 5500 }
5501 #endif // V8_HOST_ARCH_MIPS 5501 #endif // V8_HOST_ARCH_MIPS
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
5636 Branch(if_deprecated, ne, scratch, Operand(zero_reg)); 5636 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5637 } 5637 }
5638 } 5638 }
5639 5639
5640 5640
5641 void MacroAssembler::JumpIfBlack(Register object, 5641 void MacroAssembler::JumpIfBlack(Register object,
5642 Register scratch0, 5642 Register scratch0,
5643 Register scratch1, 5643 Register scratch1,
5644 Label* on_black) { 5644 Label* on_black) {
5645 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 5645 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5646 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5646 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5647 } 5647 }
5648 5648
5649 5649
5650 void MacroAssembler::HasColor(Register object, 5650 void MacroAssembler::HasColor(Register object,
5651 Register bitmap_scratch, 5651 Register bitmap_scratch,
5652 Register mask_scratch, 5652 Register mask_scratch,
5653 Label* has_color, 5653 Label* has_color,
5654 int first_bit, 5654 int first_bit,
5655 int second_bit) { 5655 int second_bit) {
5656 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); 5656 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5657 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); 5657 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5658 5658
5659 GetMarkBits(object, bitmap_scratch, mask_scratch); 5659 GetMarkBits(object, bitmap_scratch, mask_scratch);
5660 5660
5661 Label other_color; 5661 Label other_color;
5662 // Note that we are using a 4-byte aligned 8-byte load. 5662 // Note that we are using a 4-byte aligned 8-byte load.
5663 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5663 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5664 And(t8, t9, Operand(mask_scratch)); 5664 And(t8, t9, Operand(mask_scratch));
5665 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); 5665 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5666 // Shift left 1 by adding. 5666 // Shift left 1 by adding.
5667 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch)); 5667 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5668 And(t8, t9, Operand(mask_scratch)); 5668 And(t8, t9, Operand(mask_scratch));
5669 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); 5669 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5670 5670
5671 bind(&other_color); 5671 bind(&other_color);
5672 } 5672 }
5673 5673
5674 5674
5675 // Detect some, but not all, common pointer-free objects. This is used by the 5675 // Detect some, but not all, common pointer-free objects. This is used by the
5676 // incremental write barrier which doesn't care about oddballs (they are always 5676 // incremental write barrier which doesn't care about oddballs (they are always
5677 // marked black immediately so this code is not hit). 5677 // marked black immediately so this code is not hit).
5678 void MacroAssembler::JumpIfDataObject(Register value, 5678 void MacroAssembler::JumpIfDataObject(Register value,
5679 Register scratch, 5679 Register scratch,
5680 Label* not_data_object) { 5680 Label* not_data_object) {
5681 ASSERT(!AreAliased(value, scratch, t8, no_reg)); 5681 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5682 Label is_data_object; 5682 Label is_data_object;
5683 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5683 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5684 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5684 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5685 Branch(&is_data_object, eq, t8, Operand(scratch)); 5685 Branch(&is_data_object, eq, t8, Operand(scratch));
5686 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 5686 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5687 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 5687 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5688 // If it's a string and it's not a cons string then it's an object containing 5688 // If it's a string and it's not a cons string then it's an object containing
5689 // no GC pointers. 5689 // no GC pointers.
5690 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5690 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5691 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); 5691 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5692 Branch(not_data_object, ne, t8, Operand(zero_reg)); 5692 Branch(not_data_object, ne, t8, Operand(zero_reg));
5693 bind(&is_data_object); 5693 bind(&is_data_object);
5694 } 5694 }
5695 5695
5696 5696
5697 void MacroAssembler::GetMarkBits(Register addr_reg, 5697 void MacroAssembler::GetMarkBits(Register addr_reg,
5698 Register bitmap_reg, 5698 Register bitmap_reg,
5699 Register mask_reg) { 5699 Register mask_reg) {
5700 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); 5700 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5701 // addr_reg is divided into fields: 5701 // addr_reg is divided into fields:
5702 // |63 page base 20|19 high 8|7 shift 3|2 0| 5702 // |63 page base 20|19 high 8|7 shift 3|2 0|
5703 // 'high' gives the index of the cell holding color bits for the object. 5703 // 'high' gives the index of the cell holding color bits for the object.
5704 // 'shift' gives the offset in the cell for this object's color. 5704 // 'shift' gives the offset in the cell for this object's color.
5705 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); 5705 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5706 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 5706 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5707 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 5707 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5708 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); 5708 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5709 dsll(t8, t8, Bitmap::kBytesPerCellLog2); 5709 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5710 Daddu(bitmap_reg, bitmap_reg, t8); 5710 Daddu(bitmap_reg, bitmap_reg, t8);
5711 li(t8, Operand(1)); 5711 li(t8, Operand(1));
5712 dsllv(mask_reg, t8, mask_reg); 5712 dsllv(mask_reg, t8, mask_reg);
5713 } 5713 }
5714 5714
5715 5715
5716 void MacroAssembler::EnsureNotWhite( 5716 void MacroAssembler::EnsureNotWhite(
5717 Register value, 5717 Register value,
5718 Register bitmap_scratch, 5718 Register bitmap_scratch,
5719 Register mask_scratch, 5719 Register mask_scratch,
5720 Register load_scratch, 5720 Register load_scratch,
5721 Label* value_is_white_and_not_data) { 5721 Label* value_is_white_and_not_data) {
5722 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); 5722 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5723 GetMarkBits(value, bitmap_scratch, mask_scratch); 5723 GetMarkBits(value, bitmap_scratch, mask_scratch);
5724 5724
5725 // If the value is black or grey we don't need to do anything. 5725 // If the value is black or grey we don't need to do anything.
5726 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 5726 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5727 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5727 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5728 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 5728 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5729 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 5729 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5730 5730
5731 Label done; 5731 Label done;
5732 5732
5733 // Since both black and grey have a 1 in the first position and white does 5733 // Since both black and grey have a 1 in the first position and white does
5734 // not have a 1 there we only need to check one bit. 5734 // not have a 1 there we only need to check one bit.
5735 // Note that we are using a 4-byte aligned 8-byte load. 5735 // Note that we are using a 4-byte aligned 8-byte load.
5736 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5736 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5737 And(t8, mask_scratch, load_scratch); 5737 And(t8, mask_scratch, load_scratch);
5738 Branch(&done, ne, t8, Operand(zero_reg)); 5738 Branch(&done, ne, t8, Operand(zero_reg));
5739 5739
(...skipping 19 matching lines...) Expand all
5759 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5759 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5760 { 5760 {
5761 Label skip; 5761 Label skip;
5762 Branch(&skip, ne, t8, Operand(map)); 5762 Branch(&skip, ne, t8, Operand(map));
5763 li(length, HeapNumber::kSize); 5763 li(length, HeapNumber::kSize);
5764 Branch(&is_data_object); 5764 Branch(&is_data_object);
5765 bind(&skip); 5765 bind(&skip);
5766 } 5766 }
5767 5767
5768 // Check for strings. 5768 // Check for strings.
5769 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 5769 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5770 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 5770 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5771 // If it's a string and it's not a cons string then it's an object containing 5771 // If it's a string and it's not a cons string then it's an object containing
5772 // no GC pointers. 5772 // no GC pointers.
5773 Register instance_type = load_scratch; 5773 Register instance_type = load_scratch;
5774 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); 5774 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5775 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); 5775 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5776 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); 5776 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5777 // It's a non-indirect (non-cons and non-slice) string. 5777 // It's a non-indirect (non-cons and non-slice) string.
5778 // If it's external, the length is just ExternalString::kSize. 5778 // If it's external, the length is just ExternalString::kSize.
5779 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). 5779 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5780 // External strings are the only ones with the kExternalStringTag bit 5780 // External strings are the only ones with the kExternalStringTag bit
5781 // set. 5781 // set.
5782 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 5782 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5783 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 5783 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5784 And(t8, instance_type, Operand(kExternalStringTag)); 5784 And(t8, instance_type, Operand(kExternalStringTag));
5785 { 5785 {
5786 Label skip; 5786 Label skip;
5787 Branch(&skip, eq, t8, Operand(zero_reg)); 5787 Branch(&skip, eq, t8, Operand(zero_reg));
5788 li(length, ExternalString::kSize); 5788 li(length, ExternalString::kSize);
5789 Branch(&is_data_object); 5789 Branch(&is_data_object);
5790 bind(&skip); 5790 bind(&skip);
5791 } 5791 }
5792 5792
5793 // Sequential string, either ASCII or UC16. 5793 // Sequential string, either ASCII or UC16.
5794 // For ASCII (char-size of 1) we shift the smi tag away to get the length. 5794 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5795 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby 5795 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5796 // getting the length multiplied by 2. 5796 // getting the length multiplied by 2.
5797 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); 5797 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5798 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 5798 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5799 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset)); 5799 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5800 And(t8, instance_type, Operand(kStringEncodingMask)); 5800 And(t8, instance_type, Operand(kStringEncodingMask));
5801 { 5801 {
5802 Label skip; 5802 Label skip;
5803 Branch(&skip, ne, t8, Operand(zero_reg)); 5803 Branch(&skip, ne, t8, Operand(zero_reg));
5804 // Adjust length for UC16. 5804 // Adjust length for UC16.
5805 dsll(t9, t9, 1); 5805 dsll(t9, t9, 1);
5806 bind(&skip); 5806 bind(&skip);
5807 } 5807 }
5808 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); 5808 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5809 ASSERT(!length.is(t8)); 5809 DCHECK(!length.is(t8));
5810 And(length, length, Operand(~kObjectAlignmentMask)); 5810 And(length, length, Operand(~kObjectAlignmentMask));
5811 5811
5812 bind(&is_data_object); 5812 bind(&is_data_object);
5813 // Value is a data object, and it is white. Mark it black. Since we know 5813 // Value is a data object, and it is white. Mark it black. Since we know
5814 // that the object is white we can make it black by flipping one bit. 5814 // that the object is white we can make it black by flipping one bit.
5815 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5815 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5816 Or(t8, t8, Operand(mask_scratch)); 5816 Or(t8, t8, Operand(mask_scratch));
5817 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5817 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5818 5818
5819 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); 5819 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
5880 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); 5880 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5881 Branch(call_runtime, ne, a2, Operand(at)); 5881 Branch(call_runtime, ne, a2, Operand(at));
5882 5882
5883 bind(&no_elements); 5883 bind(&no_elements);
5884 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); 5884 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5885 Branch(&next, ne, a2, Operand(null_value)); 5885 Branch(&next, ne, a2, Operand(null_value));
5886 } 5886 }
5887 5887
5888 5888
5889 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { 5889 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5890 ASSERT(!output_reg.is(input_reg)); 5890 DCHECK(!output_reg.is(input_reg));
5891 Label done; 5891 Label done;
5892 li(output_reg, Operand(255)); 5892 li(output_reg, Operand(255));
5893 // Normal branch: nop in delay slot. 5893 // Normal branch: nop in delay slot.
5894 Branch(&done, gt, input_reg, Operand(output_reg)); 5894 Branch(&done, gt, input_reg, Operand(output_reg));
5895 // Use delay slot in this branch. 5895 // Use delay slot in this branch.
5896 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg)); 5896 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5897 mov(output_reg, zero_reg); // In delay slot. 5897 mov(output_reg, zero_reg); // In delay slot.
5898 mov(output_reg, input_reg); // Value is in range 0..255. 5898 mov(output_reg, input_reg); // Value is in range 0..255.
5899 bind(&done); 5899 bind(&done);
5900 } 5900 }
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
5975 UNREACHABLE(); 5975 UNREACHABLE();
5976 return no_reg; 5976 return no_reg;
5977 } 5977 }
5978 5978
5979 5979
5980 void MacroAssembler::JumpIfDictionaryInPrototypeChain( 5980 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5981 Register object, 5981 Register object,
5982 Register scratch0, 5982 Register scratch0,
5983 Register scratch1, 5983 Register scratch1,
5984 Label* found) { 5984 Label* found) {
5985 ASSERT(!scratch1.is(scratch0)); 5985 DCHECK(!scratch1.is(scratch0));
5986 Factory* factory = isolate()->factory(); 5986 Factory* factory = isolate()->factory();
5987 Register current = scratch0; 5987 Register current = scratch0;
5988 Label loop_again; 5988 Label loop_again;
5989 5989
5990 // Scratch contained elements pointer. 5990 // Scratch contained elements pointer.
5991 Move(current, object); 5991 Move(current, object);
5992 5992
5993 // Loop based on the map going up the prototype chain. 5993 // Loop based on the map going up the prototype chain.
5994 bind(&loop_again); 5994 bind(&loop_again);
5995 ld(current, FieldMemOperand(current, HeapObject::kMapOffset)); 5995 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
6031 CodePatcher::CodePatcher(byte* address, 6031 CodePatcher::CodePatcher(byte* address,
6032 int instructions, 6032 int instructions,
6033 FlushICache flush_cache) 6033 FlushICache flush_cache)
6034 : address_(address), 6034 : address_(address),
6035 size_(instructions * Assembler::kInstrSize), 6035 size_(instructions * Assembler::kInstrSize),
6036 masm_(NULL, address, size_ + Assembler::kGap), 6036 masm_(NULL, address, size_ + Assembler::kGap),
6037 flush_cache_(flush_cache) { 6037 flush_cache_(flush_cache) {
6038 // Create a new macro assembler pointing to the address of the code to patch. 6038 // Create a new macro assembler pointing to the address of the code to patch.
6039 // The size is adjusted with kGap on order for the assembler to generate size 6039 // The size is adjusted with kGap on order for the assembler to generate size
6040 // bytes of instructions without failing with buffer size constraints. 6040 // bytes of instructions without failing with buffer size constraints.
6041 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 6041 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6042 } 6042 }
6043 6043
6044 6044
6045 CodePatcher::~CodePatcher() { 6045 CodePatcher::~CodePatcher() {
6046 // Indicate that code has changed. 6046 // Indicate that code has changed.
6047 if (flush_cache_ == FLUSH) { 6047 if (flush_cache_ == FLUSH) {
6048 CpuFeatures::FlushICache(address_, size_); 6048 CpuFeatures::FlushICache(address_, size_);
6049 } 6049 }
6050 // Check that the code was patched as expected. 6050 // Check that the code was patched as expected.
6051 ASSERT(masm_.pc_ == address_ + size_); 6051 DCHECK(masm_.pc_ == address_ + size_);
6052 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 6052 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6053 } 6053 }
6054 6054
6055 6055
6056 void CodePatcher::Emit(Instr instr) { 6056 void CodePatcher::Emit(Instr instr) {
6057 masm()->emit(instr); 6057 masm()->emit(instr);
6058 } 6058 }
6059 6059
6060 6060
6061 void CodePatcher::Emit(Address addr) { 6061 void CodePatcher::Emit(Address addr) {
6062 // masm()->emit(reinterpret_cast<Instr>(addr)); 6062 // masm()->emit(reinterpret_cast<Instr>(addr));
6063 } 6063 }
6064 6064
6065 6065
6066 void CodePatcher::ChangeBranchCondition(Condition cond) { 6066 void CodePatcher::ChangeBranchCondition(Condition cond) {
6067 Instr instr = Assembler::instr_at(masm_.pc_); 6067 Instr instr = Assembler::instr_at(masm_.pc_);
6068 ASSERT(Assembler::IsBranch(instr)); 6068 DCHECK(Assembler::IsBranch(instr));
6069 uint32_t opcode = Assembler::GetOpcodeField(instr); 6069 uint32_t opcode = Assembler::GetOpcodeField(instr);
6070 // Currently only the 'eq' and 'ne' cond values are supported and the simple 6070 // Currently only the 'eq' and 'ne' cond values are supported and the simple
6071 // branch instructions (with opcode being the branch type). 6071 // branch instructions (with opcode being the branch type).
6072 // There are some special cases (see Assembler::IsBranch()) so extending this 6072 // There are some special cases (see Assembler::IsBranch()) so extending this
6073 // would be tricky. 6073 // would be tricky.
6074 ASSERT(opcode == BEQ || 6074 DCHECK(opcode == BEQ ||
6075 opcode == BNE || 6075 opcode == BNE ||
6076 opcode == BLEZ || 6076 opcode == BLEZ ||
6077 opcode == BGTZ || 6077 opcode == BGTZ ||
6078 opcode == BEQL || 6078 opcode == BEQL ||
6079 opcode == BNEL || 6079 opcode == BNEL ||
6080 opcode == BLEZL || 6080 opcode == BLEZL ||
6081 opcode == BGTZL); 6081 opcode == BGTZL);
6082 opcode = (cond == eq) ? BEQ : BNE; 6082 opcode = (cond == eq) ? BEQ : BNE;
6083 instr = (instr & ~kOpcodeMask) | opcode; 6083 instr = (instr & ~kOpcodeMask) | opcode;
6084 masm_.emit(instr); 6084 masm_.emit(instr);
6085 } 6085 }
6086 6086
6087 6087
6088 void MacroAssembler::TruncatingDiv(Register result, 6088 void MacroAssembler::TruncatingDiv(Register result,
6089 Register dividend, 6089 Register dividend,
6090 int32_t divisor) { 6090 int32_t divisor) {
6091 ASSERT(!dividend.is(result)); 6091 DCHECK(!dividend.is(result));
6092 ASSERT(!dividend.is(at)); 6092 DCHECK(!dividend.is(at));
6093 ASSERT(!result.is(at)); 6093 DCHECK(!result.is(at));
6094 MultiplierAndShift ms(divisor); 6094 MultiplierAndShift ms(divisor);
6095 li(at, Operand(ms.multiplier())); 6095 li(at, Operand(ms.multiplier()));
6096 Mulh(result, dividend, Operand(at)); 6096 Mulh(result, dividend, Operand(at));
6097 if (divisor > 0 && ms.multiplier() < 0) { 6097 if (divisor > 0 && ms.multiplier() < 0) {
6098 Addu(result, result, Operand(dividend)); 6098 Addu(result, result, Operand(dividend));
6099 } 6099 }
6100 if (divisor < 0 && ms.multiplier() > 0) { 6100 if (divisor < 0 && ms.multiplier() > 0) {
6101 Subu(result, result, Operand(dividend)); 6101 Subu(result, result, Operand(dividend));
6102 } 6102 }
6103 if (ms.shift() > 0) sra(result, result, ms.shift()); 6103 if (ms.shift() > 0) sra(result, result, ms.shift());
6104 srl(at, dividend, 31); 6104 srl(at, dividend, 31);
6105 Addu(result, result, Operand(at)); 6105 Addu(result, result, Operand(at));
6106 } 6106 }
6107 6107
6108 6108
6109 } } // namespace v8::internal 6109 } } // namespace v8::internal
6110 6110
6111 #endif // V8_TARGET_ARCH_MIPS64 6111 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/regexp-macro-assembler-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698