Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #if V8_TARGET_ARCH_MIPS 9 #if V8_TARGET_ARCH_MIPS
10 10
(...skipping 14 matching lines...) Expand all
25 if (isolate() != NULL) { 25 if (isolate() != NULL) {
26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), 26 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
27 isolate()); 27 isolate());
28 } 28 }
29 } 29 }
30 30
31 31
32 void MacroAssembler::Load(Register dst, 32 void MacroAssembler::Load(Register dst,
33 const MemOperand& src, 33 const MemOperand& src,
34 Representation r) { 34 Representation r) {
35 ASSERT(!r.IsDouble()); 35 DCHECK(!r.IsDouble());
36 if (r.IsInteger8()) { 36 if (r.IsInteger8()) {
37 lb(dst, src); 37 lb(dst, src);
38 } else if (r.IsUInteger8()) { 38 } else if (r.IsUInteger8()) {
39 lbu(dst, src); 39 lbu(dst, src);
40 } else if (r.IsInteger16()) { 40 } else if (r.IsInteger16()) {
41 lh(dst, src); 41 lh(dst, src);
42 } else if (r.IsUInteger16()) { 42 } else if (r.IsUInteger16()) {
43 lhu(dst, src); 43 lhu(dst, src);
44 } else { 44 } else {
45 lw(dst, src); 45 lw(dst, src);
46 } 46 }
47 } 47 }
48 48
49 49
50 void MacroAssembler::Store(Register src, 50 void MacroAssembler::Store(Register src,
51 const MemOperand& dst, 51 const MemOperand& dst,
52 Representation r) { 52 Representation r) {
53 ASSERT(!r.IsDouble()); 53 DCHECK(!r.IsDouble());
54 if (r.IsInteger8() || r.IsUInteger8()) { 54 if (r.IsInteger8() || r.IsUInteger8()) {
55 sb(src, dst); 55 sb(src, dst);
56 } else if (r.IsInteger16() || r.IsUInteger16()) { 56 } else if (r.IsInteger16() || r.IsUInteger16()) {
57 sh(src, dst); 57 sh(src, dst);
58 } else { 58 } else {
59 if (r.IsHeapObject()) { 59 if (r.IsHeapObject()) {
60 AssertNotSmi(src); 60 AssertNotSmi(src);
61 } else if (r.IsSmi()) { 61 } else if (r.IsSmi()) {
62 AssertSmi(src); 62 AssertSmi(src);
63 } 63 }
(...skipping 30 matching lines...) Expand all
94 Branch(2, NegateCondition(cond), src1, src2); 94 Branch(2, NegateCondition(cond), src1, src2);
95 sw(source, MemOperand(s6, index << kPointerSizeLog2)); 95 sw(source, MemOperand(s6, index << kPointerSizeLog2));
96 } 96 }
97 97
98 98
99 // Push and pop all registers that can hold pointers. 99 // Push and pop all registers that can hold pointers.
100 void MacroAssembler::PushSafepointRegisters() { 100 void MacroAssembler::PushSafepointRegisters() {
101 // Safepoints expect a block of kNumSafepointRegisters values on the 101 // Safepoints expect a block of kNumSafepointRegisters values on the
102 // stack, so adjust the stack for unsaved registers. 102 // stack, so adjust the stack for unsaved registers.
103 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 103 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
104 ASSERT(num_unsaved >= 0); 104 DCHECK(num_unsaved >= 0);
105 if (num_unsaved > 0) { 105 if (num_unsaved > 0) {
106 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); 106 Subu(sp, sp, Operand(num_unsaved * kPointerSize));
107 } 107 }
108 MultiPush(kSafepointSavedRegisters); 108 MultiPush(kSafepointSavedRegisters);
109 } 109 }
110 110
111 111
112 void MacroAssembler::PopSafepointRegisters() { 112 void MacroAssembler::PopSafepointRegisters() {
113 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 113 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
114 MultiPop(kSafepointSavedRegisters); 114 MultiPop(kSafepointSavedRegisters);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize; 146 int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
147 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 147 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
148 return MemOperand(sp, doubles_size + register_offset); 148 return MemOperand(sp, doubles_size + register_offset);
149 } 149 }
150 150
151 151
152 void MacroAssembler::InNewSpace(Register object, 152 void MacroAssembler::InNewSpace(Register object,
153 Register scratch, 153 Register scratch,
154 Condition cc, 154 Condition cc,
155 Label* branch) { 155 Label* branch) {
156 ASSERT(cc == eq || cc == ne); 156 DCHECK(cc == eq || cc == ne);
157 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); 157 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
158 Branch(branch, cc, scratch, 158 Branch(branch, cc, scratch,
159 Operand(ExternalReference::new_space_start(isolate()))); 159 Operand(ExternalReference::new_space_start(isolate())));
160 } 160 }
161 161
162 162
163 void MacroAssembler::RecordWriteField( 163 void MacroAssembler::RecordWriteField(
164 Register object, 164 Register object,
165 int offset, 165 int offset,
166 Register value, 166 Register value,
167 Register dst, 167 Register dst,
168 RAStatus ra_status, 168 RAStatus ra_status,
169 SaveFPRegsMode save_fp, 169 SaveFPRegsMode save_fp,
170 RememberedSetAction remembered_set_action, 170 RememberedSetAction remembered_set_action,
171 SmiCheck smi_check, 171 SmiCheck smi_check,
172 PointersToHereCheck pointers_to_here_check_for_value) { 172 PointersToHereCheck pointers_to_here_check_for_value) {
173 ASSERT(!AreAliased(value, dst, t8, object)); 173 DCHECK(!AreAliased(value, dst, t8, object));
174 // First, check if a write barrier is even needed. The tests below 174 // First, check if a write barrier is even needed. The tests below
175 // catch stores of Smis. 175 // catch stores of Smis.
176 Label done; 176 Label done;
177 177
178 // Skip barrier if writing a smi. 178 // Skip barrier if writing a smi.
179 if (smi_check == INLINE_SMI_CHECK) { 179 if (smi_check == INLINE_SMI_CHECK) {
180 JumpIfSmi(value, &done); 180 JumpIfSmi(value, &done);
181 } 181 }
182 182
183 // Although the object register is tagged, the offset is relative to the start 183 // Although the object register is tagged, the offset is relative to the start
184 // of the object, so so offset must be a multiple of kPointerSize. 184 // of the object, so so offset must be a multiple of kPointerSize.
185 ASSERT(IsAligned(offset, kPointerSize)); 185 DCHECK(IsAligned(offset, kPointerSize));
186 186
187 Addu(dst, object, Operand(offset - kHeapObjectTag)); 187 Addu(dst, object, Operand(offset - kHeapObjectTag));
188 if (emit_debug_code()) { 188 if (emit_debug_code()) {
189 Label ok; 189 Label ok;
190 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); 190 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
191 Branch(&ok, eq, t8, Operand(zero_reg)); 191 Branch(&ok, eq, t8, Operand(zero_reg));
192 stop("Unaligned cell in write barrier"); 192 stop("Unaligned cell in write barrier");
193 bind(&ok); 193 bind(&ok);
194 } 194 }
195 195
(...skipping 18 matching lines...) Expand all
214 214
215 215
216 // Will clobber 4 registers: object, map, dst, ip. The 216 // Will clobber 4 registers: object, map, dst, ip. The
217 // register 'object' contains a heap object pointer. 217 // register 'object' contains a heap object pointer.
218 void MacroAssembler::RecordWriteForMap(Register object, 218 void MacroAssembler::RecordWriteForMap(Register object,
219 Register map, 219 Register map,
220 Register dst, 220 Register dst,
221 RAStatus ra_status, 221 RAStatus ra_status,
222 SaveFPRegsMode fp_mode) { 222 SaveFPRegsMode fp_mode) {
223 if (emit_debug_code()) { 223 if (emit_debug_code()) {
224 ASSERT(!dst.is(at)); 224 DCHECK(!dst.is(at));
225 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset)); 225 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
226 Check(eq, 226 Check(eq,
227 kWrongAddressOrValuePassedToRecordWrite, 227 kWrongAddressOrValuePassedToRecordWrite,
228 dst, 228 dst,
229 Operand(isolate()->factory()->meta_map())); 229 Operand(isolate()->factory()->meta_map()));
230 } 230 }
231 231
232 if (!FLAG_incremental_marking) { 232 if (!FLAG_incremental_marking) {
233 return; 233 return;
234 } 234 }
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 // tag is shifted away. 293 // tag is shifted away.
294 void MacroAssembler::RecordWrite( 294 void MacroAssembler::RecordWrite(
295 Register object, 295 Register object,
296 Register address, 296 Register address,
297 Register value, 297 Register value,
298 RAStatus ra_status, 298 RAStatus ra_status,
299 SaveFPRegsMode fp_mode, 299 SaveFPRegsMode fp_mode,
300 RememberedSetAction remembered_set_action, 300 RememberedSetAction remembered_set_action,
301 SmiCheck smi_check, 301 SmiCheck smi_check,
302 PointersToHereCheck pointers_to_here_check_for_value) { 302 PointersToHereCheck pointers_to_here_check_for_value) {
303 ASSERT(!AreAliased(object, address, value, t8)); 303 DCHECK(!AreAliased(object, address, value, t8));
304 ASSERT(!AreAliased(object, address, value, t9)); 304 DCHECK(!AreAliased(object, address, value, t9));
305 305
306 if (emit_debug_code()) { 306 if (emit_debug_code()) {
307 lw(at, MemOperand(address)); 307 lw(at, MemOperand(address));
308 Assert( 308 Assert(
309 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); 309 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
310 } 310 }
311 311
312 if (remembered_set_action == OMIT_REMEMBERED_SET && 312 if (remembered_set_action == OMIT_REMEMBERED_SET &&
313 !FLAG_incremental_marking) { 313 !FLAG_incremental_marking) {
314 return; 314 return;
315 } 315 }
316 316
317 // First, check if a write barrier is even needed. The tests below 317 // First, check if a write barrier is even needed. The tests below
318 // catch stores of smis and stores into the young generation. 318 // catch stores of smis and stores into the young generation.
319 Label done; 319 Label done;
320 320
321 if (smi_check == INLINE_SMI_CHECK) { 321 if (smi_check == INLINE_SMI_CHECK) {
322 ASSERT_EQ(0, kSmiTag); 322 DCHECK_EQ(0, kSmiTag);
323 JumpIfSmi(value, &done); 323 JumpIfSmi(value, &done);
324 } 324 }
325 325
326 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { 326 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
327 CheckPageFlag(value, 327 CheckPageFlag(value,
328 value, // Used as scratch. 328 value, // Used as scratch.
329 MemoryChunk::kPointersToHereAreInterestingMask, 329 MemoryChunk::kPointersToHereAreInterestingMask,
330 eq, 330 eq,
331 &done); 331 &done);
332 } 332 }
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
384 sw(address, MemOperand(scratch)); 384 sw(address, MemOperand(scratch));
385 Addu(scratch, scratch, kPointerSize); 385 Addu(scratch, scratch, kPointerSize);
386 // Write back new top of buffer. 386 // Write back new top of buffer.
387 sw(scratch, MemOperand(t8)); 387 sw(scratch, MemOperand(t8));
388 // Call stub on end of buffer. 388 // Call stub on end of buffer.
389 // Check for end of buffer. 389 // Check for end of buffer.
390 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); 390 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
391 if (and_then == kFallThroughAtEnd) { 391 if (and_then == kFallThroughAtEnd) {
392 Branch(&done, eq, t8, Operand(zero_reg)); 392 Branch(&done, eq, t8, Operand(zero_reg));
393 } else { 393 } else {
394 ASSERT(and_then == kReturnAtEnd); 394 DCHECK(and_then == kReturnAtEnd);
395 Ret(eq, t8, Operand(zero_reg)); 395 Ret(eq, t8, Operand(zero_reg));
396 } 396 }
397 push(ra); 397 push(ra);
398 StoreBufferOverflowStub store_buffer_overflow = 398 StoreBufferOverflowStub store_buffer_overflow =
399 StoreBufferOverflowStub(isolate(), fp_mode); 399 StoreBufferOverflowStub(isolate(), fp_mode);
400 CallStub(&store_buffer_overflow); 400 CallStub(&store_buffer_overflow);
401 pop(ra); 401 pop(ra);
402 bind(&done); 402 bind(&done);
403 if (and_then == kReturnAtEnd) { 403 if (and_then == kReturnAtEnd) {
404 Ret(); 404 Ret();
405 } 405 }
406 } 406 }
407 407
408 408
409 // ----------------------------------------------------------------------------- 409 // -----------------------------------------------------------------------------
410 // Allocation support. 410 // Allocation support.
411 411
412 412
413 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 413 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
414 Register scratch, 414 Register scratch,
415 Label* miss) { 415 Label* miss) {
416 Label same_contexts; 416 Label same_contexts;
417 417
418 ASSERT(!holder_reg.is(scratch)); 418 DCHECK(!holder_reg.is(scratch));
419 ASSERT(!holder_reg.is(at)); 419 DCHECK(!holder_reg.is(at));
420 ASSERT(!scratch.is(at)); 420 DCHECK(!scratch.is(at));
421 421
422 // Load current lexical context from the stack frame. 422 // Load current lexical context from the stack frame.
423 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 423 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
424 // In debug mode, make sure the lexical context is set. 424 // In debug mode, make sure the lexical context is set.
425 #ifdef DEBUG 425 #ifdef DEBUG
426 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext, 426 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
427 scratch, Operand(zero_reg)); 427 scratch, Operand(zero_reg));
428 #endif 428 #endif
429 429
430 // Load the native context of the current context. 430 // Load the native context of the current context.
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 for (int i = 0; i < kNumberDictionaryProbes; i++) { 565 for (int i = 0; i < kNumberDictionaryProbes; i++) {
566 // Use reg2 for index calculations and keep the hash intact in reg0. 566 // Use reg2 for index calculations and keep the hash intact in reg0.
567 mov(reg2, reg0); 567 mov(reg2, reg0);
568 // Compute the masked index: (hash + i + i * i) & mask. 568 // Compute the masked index: (hash + i + i * i) & mask.
569 if (i > 0) { 569 if (i > 0) {
570 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i))); 570 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
571 } 571 }
572 and_(reg2, reg2, reg1); 572 and_(reg2, reg2, reg1);
573 573
574 // Scale the index by multiplying by the element size. 574 // Scale the index by multiplying by the element size.
575 ASSERT(SeededNumberDictionary::kEntrySize == 3); 575 DCHECK(SeededNumberDictionary::kEntrySize == 3);
576 sll(at, reg2, 1); // 2x. 576 sll(at, reg2, 1); // 2x.
577 addu(reg2, reg2, at); // reg2 = reg2 * 3. 577 addu(reg2, reg2, at); // reg2 = reg2 * 3.
578 578
579 // Check if the key is identical to the name. 579 // Check if the key is identical to the name.
580 sll(at, reg2, kPointerSizeLog2); 580 sll(at, reg2, kPointerSizeLog2);
581 addu(reg2, elements, at); 581 addu(reg2, elements, at);
582 582
583 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset)); 583 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
584 if (i != kNumberDictionaryProbes - 1) { 584 if (i != kNumberDictionaryProbes - 1) {
585 Branch(&done, eq, key, Operand(at)); 585 Branch(&done, eq, key, Operand(at));
(...skipping 22 matching lines...) Expand all
608 // Instruction macros. 608 // Instruction macros.
609 609
610 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 610 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
611 if (rt.is_reg()) { 611 if (rt.is_reg()) {
612 addu(rd, rs, rt.rm()); 612 addu(rd, rs, rt.rm());
613 } else { 613 } else {
614 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 614 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
615 addiu(rd, rs, rt.imm32_); 615 addiu(rd, rs, rt.imm32_);
616 } else { 616 } else {
617 // li handles the relocation. 617 // li handles the relocation.
618 ASSERT(!rs.is(at)); 618 DCHECK(!rs.is(at));
619 li(at, rt); 619 li(at, rt);
620 addu(rd, rs, at); 620 addu(rd, rs, at);
621 } 621 }
622 } 622 }
623 } 623 }
624 624
625 625
626 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 626 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
627 if (rt.is_reg()) { 627 if (rt.is_reg()) {
628 subu(rd, rs, rt.rm()); 628 subu(rd, rs, rt.rm());
629 } else { 629 } else {
630 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 630 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
631 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm). 631 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
632 } else { 632 } else {
633 // li handles the relocation. 633 // li handles the relocation.
634 ASSERT(!rs.is(at)); 634 DCHECK(!rs.is(at));
635 li(at, rt); 635 li(at, rt);
636 subu(rd, rs, at); 636 subu(rd, rs, at);
637 } 637 }
638 } 638 }
639 } 639 }
640 640
641 641
642 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 642 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
643 if (rt.is_reg()) { 643 if (rt.is_reg()) {
644 if (kArchVariant == kLoongson) { 644 if (kArchVariant == kLoongson) {
645 mult(rs, rt.rm()); 645 mult(rs, rt.rm());
646 mflo(rd); 646 mflo(rd);
647 } else { 647 } else {
648 mul(rd, rs, rt.rm()); 648 mul(rd, rs, rt.rm());
649 } 649 }
650 } else { 650 } else {
651 // li handles the relocation. 651 // li handles the relocation.
652 ASSERT(!rs.is(at)); 652 DCHECK(!rs.is(at));
653 li(at, rt); 653 li(at, rt);
654 if (kArchVariant == kLoongson) { 654 if (kArchVariant == kLoongson) {
655 mult(rs, at); 655 mult(rs, at);
656 mflo(rd); 656 mflo(rd);
657 } else { 657 } else {
658 mul(rd, rs, at); 658 mul(rd, rs, at);
659 } 659 }
660 } 660 }
661 } 661 }
662 662
663 663
664 void MacroAssembler::Mult(Register rs, const Operand& rt) { 664 void MacroAssembler::Mult(Register rs, const Operand& rt) {
665 if (rt.is_reg()) { 665 if (rt.is_reg()) {
666 mult(rs, rt.rm()); 666 mult(rs, rt.rm());
667 } else { 667 } else {
668 // li handles the relocation. 668 // li handles the relocation.
669 ASSERT(!rs.is(at)); 669 DCHECK(!rs.is(at));
670 li(at, rt); 670 li(at, rt);
671 mult(rs, at); 671 mult(rs, at);
672 } 672 }
673 } 673 }
674 674
675 675
676 void MacroAssembler::Multu(Register rs, const Operand& rt) { 676 void MacroAssembler::Multu(Register rs, const Operand& rt) {
677 if (rt.is_reg()) { 677 if (rt.is_reg()) {
678 multu(rs, rt.rm()); 678 multu(rs, rt.rm());
679 } else { 679 } else {
680 // li handles the relocation. 680 // li handles the relocation.
681 ASSERT(!rs.is(at)); 681 DCHECK(!rs.is(at));
682 li(at, rt); 682 li(at, rt);
683 multu(rs, at); 683 multu(rs, at);
684 } 684 }
685 } 685 }
686 686
687 687
688 void MacroAssembler::Div(Register rs, const Operand& rt) { 688 void MacroAssembler::Div(Register rs, const Operand& rt) {
689 if (rt.is_reg()) { 689 if (rt.is_reg()) {
690 div(rs, rt.rm()); 690 div(rs, rt.rm());
691 } else { 691 } else {
692 // li handles the relocation. 692 // li handles the relocation.
693 ASSERT(!rs.is(at)); 693 DCHECK(!rs.is(at));
694 li(at, rt); 694 li(at, rt);
695 div(rs, at); 695 div(rs, at);
696 } 696 }
697 } 697 }
698 698
699 699
700 void MacroAssembler::Divu(Register rs, const Operand& rt) { 700 void MacroAssembler::Divu(Register rs, const Operand& rt) {
701 if (rt.is_reg()) { 701 if (rt.is_reg()) {
702 divu(rs, rt.rm()); 702 divu(rs, rt.rm());
703 } else { 703 } else {
704 // li handles the relocation. 704 // li handles the relocation.
705 ASSERT(!rs.is(at)); 705 DCHECK(!rs.is(at));
706 li(at, rt); 706 li(at, rt);
707 divu(rs, at); 707 divu(rs, at);
708 } 708 }
709 } 709 }
710 710
711 711
712 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 712 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
713 if (rt.is_reg()) { 713 if (rt.is_reg()) {
714 and_(rd, rs, rt.rm()); 714 and_(rd, rs, rt.rm());
715 } else { 715 } else {
716 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 716 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
717 andi(rd, rs, rt.imm32_); 717 andi(rd, rs, rt.imm32_);
718 } else { 718 } else {
719 // li handles the relocation. 719 // li handles the relocation.
720 ASSERT(!rs.is(at)); 720 DCHECK(!rs.is(at));
721 li(at, rt); 721 li(at, rt);
722 and_(rd, rs, at); 722 and_(rd, rs, at);
723 } 723 }
724 } 724 }
725 } 725 }
726 726
727 727
728 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 728 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
729 if (rt.is_reg()) { 729 if (rt.is_reg()) {
730 or_(rd, rs, rt.rm()); 730 or_(rd, rs, rt.rm());
731 } else { 731 } else {
732 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 732 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
733 ori(rd, rs, rt.imm32_); 733 ori(rd, rs, rt.imm32_);
734 } else { 734 } else {
735 // li handles the relocation. 735 // li handles the relocation.
736 ASSERT(!rs.is(at)); 736 DCHECK(!rs.is(at));
737 li(at, rt); 737 li(at, rt);
738 or_(rd, rs, at); 738 or_(rd, rs, at);
739 } 739 }
740 } 740 }
741 } 741 }
742 742
743 743
744 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 744 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
745 if (rt.is_reg()) { 745 if (rt.is_reg()) {
746 xor_(rd, rs, rt.rm()); 746 xor_(rd, rs, rt.rm());
747 } else { 747 } else {
748 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 748 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
749 xori(rd, rs, rt.imm32_); 749 xori(rd, rs, rt.imm32_);
750 } else { 750 } else {
751 // li handles the relocation. 751 // li handles the relocation.
752 ASSERT(!rs.is(at)); 752 DCHECK(!rs.is(at));
753 li(at, rt); 753 li(at, rt);
754 xor_(rd, rs, at); 754 xor_(rd, rs, at);
755 } 755 }
756 } 756 }
757 } 757 }
758 758
759 759
760 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { 760 void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
761 if (rt.is_reg()) { 761 if (rt.is_reg()) {
762 nor(rd, rs, rt.rm()); 762 nor(rd, rs, rt.rm());
763 } else { 763 } else {
764 // li handles the relocation. 764 // li handles the relocation.
765 ASSERT(!rs.is(at)); 765 DCHECK(!rs.is(at));
766 li(at, rt); 766 li(at, rt);
767 nor(rd, rs, at); 767 nor(rd, rs, at);
768 } 768 }
769 } 769 }
770 770
771 771
772 void MacroAssembler::Neg(Register rs, const Operand& rt) { 772 void MacroAssembler::Neg(Register rs, const Operand& rt) {
773 ASSERT(rt.is_reg()); 773 DCHECK(rt.is_reg());
774 ASSERT(!at.is(rs)); 774 DCHECK(!at.is(rs));
775 ASSERT(!at.is(rt.rm())); 775 DCHECK(!at.is(rt.rm()));
776 li(at, -1); 776 li(at, -1);
777 xor_(rs, rt.rm(), at); 777 xor_(rs, rt.rm(), at);
778 } 778 }
779 779
780 780
781 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 781 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
782 if (rt.is_reg()) { 782 if (rt.is_reg()) {
783 slt(rd, rs, rt.rm()); 783 slt(rd, rs, rt.rm());
784 } else { 784 } else {
785 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 785 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
786 slti(rd, rs, rt.imm32_); 786 slti(rd, rs, rt.imm32_);
787 } else { 787 } else {
788 // li handles the relocation. 788 // li handles the relocation.
789 ASSERT(!rs.is(at)); 789 DCHECK(!rs.is(at));
790 li(at, rt); 790 li(at, rt);
791 slt(rd, rs, at); 791 slt(rd, rs, at);
792 } 792 }
793 } 793 }
794 } 794 }
795 795
796 796
797 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 797 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
798 if (rt.is_reg()) { 798 if (rt.is_reg()) {
799 sltu(rd, rs, rt.rm()); 799 sltu(rd, rs, rt.rm());
800 } else { 800 } else {
801 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 801 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
802 sltiu(rd, rs, rt.imm32_); 802 sltiu(rd, rs, rt.imm32_);
803 } else { 803 } else {
804 // li handles the relocation. 804 // li handles the relocation.
805 ASSERT(!rs.is(at)); 805 DCHECK(!rs.is(at));
806 li(at, rt); 806 li(at, rt);
807 sltu(rd, rs, at); 807 sltu(rd, rs, at);
808 } 808 }
809 } 809 }
810 } 810 }
811 811
812 812
813 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { 813 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
814 if (kArchVariant == kMips32r2) { 814 if (kArchVariant == kMips32r2) {
815 if (rt.is_reg()) { 815 if (rt.is_reg()) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
857 swr(rd, rs); 857 swr(rd, rs);
858 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 858 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
859 } 859 }
860 860
861 861
862 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 862 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
863 AllowDeferredHandleDereference smi_check; 863 AllowDeferredHandleDereference smi_check;
864 if (value->IsSmi()) { 864 if (value->IsSmi()) {
865 li(dst, Operand(value), mode); 865 li(dst, Operand(value), mode);
866 } else { 866 } else {
867 ASSERT(value->IsHeapObject()); 867 DCHECK(value->IsHeapObject());
868 if (isolate()->heap()->InNewSpace(*value)) { 868 if (isolate()->heap()->InNewSpace(*value)) {
869 Handle<Cell> cell = isolate()->factory()->NewCell(value); 869 Handle<Cell> cell = isolate()->factory()->NewCell(value);
870 li(dst, Operand(cell)); 870 li(dst, Operand(cell));
871 lw(dst, FieldMemOperand(dst, Cell::kValueOffset)); 871 lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
872 } else { 872 } else {
873 li(dst, Operand(value)); 873 li(dst, Operand(value));
874 } 874 }
875 } 875 }
876 } 876 }
877 877
878 878
879 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 879 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
880 ASSERT(!j.is_reg()); 880 DCHECK(!j.is_reg());
881 BlockTrampolinePoolScope block_trampoline_pool(this); 881 BlockTrampolinePoolScope block_trampoline_pool(this);
882 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 882 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
883 // Normal load of an immediate value which does not need Relocation Info. 883 // Normal load of an immediate value which does not need Relocation Info.
884 if (is_int16(j.imm32_)) { 884 if (is_int16(j.imm32_)) {
885 addiu(rd, zero_reg, j.imm32_); 885 addiu(rd, zero_reg, j.imm32_);
886 } else if (!(j.imm32_ & kHiMask)) { 886 } else if (!(j.imm32_ & kHiMask)) {
887 ori(rd, zero_reg, j.imm32_); 887 ori(rd, zero_reg, j.imm32_);
888 } else if (!(j.imm32_ & kImm16Mask)) { 888 } else if (!(j.imm32_ & kImm16Mask)) {
889 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask); 889 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
890 } else { 890 } else {
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
1023 li(a1, instructions * kInstrSize); 1023 li(a1, instructions * kInstrSize);
1024 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); 1024 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1025 MultiPop(saved_regs); 1025 MultiPop(saved_regs);
1026 } 1026 }
1027 1027
1028 1028
1029 void MacroAssembler::Ext(Register rt, 1029 void MacroAssembler::Ext(Register rt,
1030 Register rs, 1030 Register rs,
1031 uint16_t pos, 1031 uint16_t pos,
1032 uint16_t size) { 1032 uint16_t size) {
1033 ASSERT(pos < 32); 1033 DCHECK(pos < 32);
1034 ASSERT(pos + size < 33); 1034 DCHECK(pos + size < 33);
1035 1035
1036 if (kArchVariant == kMips32r2) { 1036 if (kArchVariant == kMips32r2) {
1037 ext_(rt, rs, pos, size); 1037 ext_(rt, rs, pos, size);
1038 } else { 1038 } else {
1039 // Move rs to rt and shift it left then right to get the 1039 // Move rs to rt and shift it left then right to get the
1040 // desired bitfield on the right side and zeroes on the left. 1040 // desired bitfield on the right side and zeroes on the left.
1041 int shift_left = 32 - (pos + size); 1041 int shift_left = 32 - (pos + size);
1042 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. 1042 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1043 1043
1044 int shift_right = 32 - size; 1044 int shift_right = 32 - size;
1045 if (shift_right > 0) { 1045 if (shift_right > 0) {
1046 srl(rt, rt, shift_right); 1046 srl(rt, rt, shift_right);
1047 } 1047 }
1048 } 1048 }
1049 } 1049 }
1050 1050
1051 1051
1052 void MacroAssembler::Ins(Register rt, 1052 void MacroAssembler::Ins(Register rt,
1053 Register rs, 1053 Register rs,
1054 uint16_t pos, 1054 uint16_t pos,
1055 uint16_t size) { 1055 uint16_t size) {
1056 ASSERT(pos < 32); 1056 DCHECK(pos < 32);
1057 ASSERT(pos + size <= 32); 1057 DCHECK(pos + size <= 32);
1058 ASSERT(size != 0); 1058 DCHECK(size != 0);
1059 1059
1060 if (kArchVariant == kMips32r2) { 1060 if (kArchVariant == kMips32r2) {
1061 ins_(rt, rs, pos, size); 1061 ins_(rt, rs, pos, size);
1062 } else { 1062 } else {
1063 ASSERT(!rt.is(t8) && !rs.is(t8)); 1063 DCHECK(!rt.is(t8) && !rs.is(t8));
1064 Subu(at, zero_reg, Operand(1)); 1064 Subu(at, zero_reg, Operand(1));
1065 srl(at, at, 32 - size); 1065 srl(at, at, 32 - size);
1066 and_(t8, rs, at); 1066 and_(t8, rs, at);
1067 sll(t8, t8, pos); 1067 sll(t8, t8, pos);
1068 sll(at, at, pos); 1068 sll(at, at, pos);
1069 nor(at, at, zero_reg); 1069 nor(at, at, zero_reg);
1070 and_(at, rt, at); 1070 and_(at, rt, at);
1071 or_(rt, t8, at); 1071 or_(rt, t8, at);
1072 } 1072 }
1073 } 1073 }
1074 1074
1075 1075
1076 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1076 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1077 FPURegister fs, 1077 FPURegister fs,
1078 FPURegister scratch) { 1078 FPURegister scratch) {
1079 // Move the data from fs to t8. 1079 // Move the data from fs to t8.
1080 mfc1(t8, fs); 1080 mfc1(t8, fs);
1081 Cvt_d_uw(fd, t8, scratch); 1081 Cvt_d_uw(fd, t8, scratch);
1082 } 1082 }
1083 1083
1084 1084
1085 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1085 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1086 Register rs, 1086 Register rs,
1087 FPURegister scratch) { 1087 FPURegister scratch) {
1088 // Convert rs to a FP value in fd (and fd + 1). 1088 // Convert rs to a FP value in fd (and fd + 1).
1089 // We do this by converting rs minus the MSB to avoid sign conversion, 1089 // We do this by converting rs minus the MSB to avoid sign conversion,
1090 // then adding 2^31 to the result (if needed). 1090 // then adding 2^31 to the result (if needed).
1091 1091
1092 ASSERT(!fd.is(scratch)); 1092 DCHECK(!fd.is(scratch));
1093 ASSERT(!rs.is(t9)); 1093 DCHECK(!rs.is(t9));
1094 ASSERT(!rs.is(at)); 1094 DCHECK(!rs.is(at));
1095 1095
1096 // Save rs's MSB to t9. 1096 // Save rs's MSB to t9.
1097 Ext(t9, rs, 31, 1); 1097 Ext(t9, rs, 31, 1);
1098 // Remove rs's MSB. 1098 // Remove rs's MSB.
1099 Ext(at, rs, 0, 31); 1099 Ext(at, rs, 0, 31);
1100 // Move the result to fd. 1100 // Move the result to fd.
1101 mtc1(at, fd); 1101 mtc1(at, fd);
1102 1102
1103 // Convert fd to a real FP value. 1103 // Convert fd to a real FP value.
1104 cvt_d_w(fd, fd); 1104 cvt_d_w(fd, fd);
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1168 mtc1(t8, FPURegister::from_code(fs.code() + 1)); 1168 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1169 } else { 1169 } else {
1170 ceil_w_d(fd, fs); 1170 ceil_w_d(fd, fs);
1171 } 1171 }
1172 } 1172 }
1173 1173
1174 1174
1175 void MacroAssembler::Trunc_uw_d(FPURegister fd, 1175 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1176 Register rs, 1176 Register rs,
1177 FPURegister scratch) { 1177 FPURegister scratch) {
1178 ASSERT(!fd.is(scratch)); 1178 DCHECK(!fd.is(scratch));
1179 ASSERT(!rs.is(at)); 1179 DCHECK(!rs.is(at));
1180 1180
1181 // Load 2^31 into scratch as its float representation. 1181 // Load 2^31 into scratch as its float representation.
1182 li(at, 0x41E00000); 1182 li(at, 0x41E00000);
1183 mtc1(at, FPURegister::from_code(scratch.code() + 1)); 1183 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1184 mtc1(zero_reg, scratch); 1184 mtc1(zero_reg, scratch);
1185 // Test if scratch > fd. 1185 // Test if scratch > fd.
1186 // If fd < 2^31 we can convert it normally. 1186 // If fd < 2^31 we can convert it normally.
1187 Label simple_convert; 1187 Label simple_convert;
1188 BranchF(&simple_convert, NULL, lt, fd, scratch); 1188 BranchF(&simple_convert, NULL, lt, fd, scratch);
1189 1189
(...skipping 20 matching lines...) Expand all
1210 Condition cc, 1210 Condition cc,
1211 FPURegister cmp1, 1211 FPURegister cmp1,
1212 FPURegister cmp2, 1212 FPURegister cmp2,
1213 BranchDelaySlot bd) { 1213 BranchDelaySlot bd) {
1214 BlockTrampolinePoolScope block_trampoline_pool(this); 1214 BlockTrampolinePoolScope block_trampoline_pool(this);
1215 if (cc == al) { 1215 if (cc == al) {
1216 Branch(bd, target); 1216 Branch(bd, target);
1217 return; 1217 return;
1218 } 1218 }
1219 1219
1220 ASSERT(nan || target); 1220 DCHECK(nan || target);
1221 // Check for unordered (NaN) cases. 1221 // Check for unordered (NaN) cases.
1222 if (nan) { 1222 if (nan) {
1223 c(UN, D, cmp1, cmp2); 1223 c(UN, D, cmp1, cmp2);
1224 bc1t(nan); 1224 bc1t(nan);
1225 } 1225 }
1226 1226
1227 if (target) { 1227 if (target) {
1228 // Here NaN cases were either handled by this function or are assumed to 1228 // Here NaN cases were either handled by this function or are assumed to
1229 // have been handled by the caller. 1229 // have been handled by the caller.
1230 // Unsigned conditions are treated as their signed counterpart. 1230 // Unsigned conditions are treated as their signed counterpart.
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
1326 } else { 1326 } else {
1327 movn(rd, rs, rt); 1327 movn(rd, rs, rt);
1328 } 1328 }
1329 } 1329 }
1330 1330
1331 1331
1332 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { 1332 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1333 if (kArchVariant == kLoongson) { 1333 if (kArchVariant == kLoongson) {
1334 // Tests an FP condition code and then conditionally move rs to rd. 1334 // Tests an FP condition code and then conditionally move rs to rd.
1335 // We do not currently use any FPU cc bit other than bit 0. 1335 // We do not currently use any FPU cc bit other than bit 0.
1336 ASSERT(cc == 0); 1336 DCHECK(cc == 0);
1337 ASSERT(!(rs.is(t8) || rd.is(t8))); 1337 DCHECK(!(rs.is(t8) || rd.is(t8)));
1338 Label done; 1338 Label done;
1339 Register scratch = t8; 1339 Register scratch = t8;
1340 // For testing purposes we need to fetch content of the FCSR register and 1340 // For testing purposes we need to fetch content of the FCSR register and
1341 // than test its cc (floating point condition code) bit (for cc = 0, it is 1341 // than test its cc (floating point condition code) bit (for cc = 0, it is
1342 // 24. bit of the FCSR). 1342 // 24. bit of the FCSR).
1343 cfc1(scratch, FCSR); 1343 cfc1(scratch, FCSR);
1344 // For the MIPS I, II and III architectures, the contents of scratch is 1344 // For the MIPS I, II and III architectures, the contents of scratch is
1345 // UNPREDICTABLE for the instruction immediately following CFC1. 1345 // UNPREDICTABLE for the instruction immediately following CFC1.
1346 nop(); 1346 nop();
1347 srl(scratch, scratch, 16); 1347 srl(scratch, scratch, 16);
1348 andi(scratch, scratch, 0x0080); 1348 andi(scratch, scratch, 0x0080);
1349 Branch(&done, eq, scratch, Operand(zero_reg)); 1349 Branch(&done, eq, scratch, Operand(zero_reg));
1350 mov(rd, rs); 1350 mov(rd, rs);
1351 bind(&done); 1351 bind(&done);
1352 } else { 1352 } else {
1353 movt(rd, rs, cc); 1353 movt(rd, rs, cc);
1354 } 1354 }
1355 } 1355 }
1356 1356
1357 1357
1358 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { 1358 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1359 if (kArchVariant == kLoongson) { 1359 if (kArchVariant == kLoongson) {
1360 // Tests an FP condition code and then conditionally move rs to rd. 1360 // Tests an FP condition code and then conditionally move rs to rd.
1361 // We do not currently use any FPU cc bit other than bit 0. 1361 // We do not currently use any FPU cc bit other than bit 0.
1362 ASSERT(cc == 0); 1362 DCHECK(cc == 0);
1363 ASSERT(!(rs.is(t8) || rd.is(t8))); 1363 DCHECK(!(rs.is(t8) || rd.is(t8)));
1364 Label done; 1364 Label done;
1365 Register scratch = t8; 1365 Register scratch = t8;
1366 // For testing purposes we need to fetch content of the FCSR register and 1366 // For testing purposes we need to fetch content of the FCSR register and
1367 // than test its cc (floating point condition code) bit (for cc = 0, it is 1367 // than test its cc (floating point condition code) bit (for cc = 0, it is
1368 // 24. bit of the FCSR). 1368 // 24. bit of the FCSR).
1369 cfc1(scratch, FCSR); 1369 cfc1(scratch, FCSR);
1370 // For the MIPS I, II and III architectures, the contents of scratch is 1370 // For the MIPS I, II and III architectures, the contents of scratch is
1371 // UNPREDICTABLE for the instruction immediately following CFC1. 1371 // UNPREDICTABLE for the instruction immediately following CFC1.
1372 nop(); 1372 nop();
1373 srl(scratch, scratch, 16); 1373 srl(scratch, scratch, 16);
1374 andi(scratch, scratch, 0x0080); 1374 andi(scratch, scratch, 0x0080);
1375 Branch(&done, ne, scratch, Operand(zero_reg)); 1375 Branch(&done, ne, scratch, Operand(zero_reg));
1376 mov(rd, rs); 1376 mov(rd, rs);
1377 bind(&done); 1377 bind(&done);
1378 } else { 1378 } else {
1379 movf(rd, rs, cc); 1379 movf(rd, rs, cc);
1380 } 1380 }
1381 } 1381 }
1382 1382
1383 1383
1384 void MacroAssembler::Clz(Register rd, Register rs) { 1384 void MacroAssembler::Clz(Register rd, Register rs) {
1385 if (kArchVariant == kLoongson) { 1385 if (kArchVariant == kLoongson) {
1386 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9))); 1386 DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1387 Register mask = t8; 1387 Register mask = t8;
1388 Register scratch = t9; 1388 Register scratch = t9;
1389 Label loop, end; 1389 Label loop, end;
1390 mov(at, rs); 1390 mov(at, rs);
1391 mov(rd, zero_reg); 1391 mov(rd, zero_reg);
1392 lui(mask, 0x8000); 1392 lui(mask, 0x8000);
1393 bind(&loop); 1393 bind(&loop);
1394 and_(scratch, at, mask); 1394 and_(scratch, at, mask);
1395 Branch(&end, ne, scratch, Operand(zero_reg)); 1395 Branch(&end, ne, scratch, Operand(zero_reg));
1396 addiu(rd, rd, 1); 1396 addiu(rd, rd, 1);
1397 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT); 1397 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1398 srl(mask, mask, 1); 1398 srl(mask, mask, 1);
1399 bind(&end); 1399 bind(&end);
1400 } else { 1400 } else {
1401 clz(rd, rs); 1401 clz(rd, rs);
1402 } 1402 }
1403 } 1403 }
1404 1404
1405 1405
1406 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, 1406 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1407 Register result, 1407 Register result,
1408 DoubleRegister double_input, 1408 DoubleRegister double_input,
1409 Register scratch, 1409 Register scratch,
1410 DoubleRegister double_scratch, 1410 DoubleRegister double_scratch,
1411 Register except_flag, 1411 Register except_flag,
1412 CheckForInexactConversion check_inexact) { 1412 CheckForInexactConversion check_inexact) {
1413 ASSERT(!result.is(scratch)); 1413 DCHECK(!result.is(scratch));
1414 ASSERT(!double_input.is(double_scratch)); 1414 DCHECK(!double_input.is(double_scratch));
1415 ASSERT(!except_flag.is(scratch)); 1415 DCHECK(!except_flag.is(scratch));
1416 1416
1417 Label done; 1417 Label done;
1418 1418
1419 // Clear the except flag (0 = no exception) 1419 // Clear the except flag (0 = no exception)
1420 mov(except_flag, zero_reg); 1420 mov(except_flag, zero_reg);
1421 1421
1422 // Test for values that can be exactly represented as a signed 32-bit integer. 1422 // Test for values that can be exactly represented as a signed 32-bit integer.
1423 cvt_w_d(double_scratch, double_input); 1423 cvt_w_d(double_scratch, double_input);
1424 mfc1(result, double_scratch); 1424 mfc1(result, double_scratch);
1425 cvt_d_w(double_scratch, double_scratch); 1425 cvt_d_w(double_scratch, double_scratch);
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
1509 Addu(sp, sp, Operand(kDoubleSize)); 1509 Addu(sp, sp, Operand(kDoubleSize));
1510 pop(ra); 1510 pop(ra);
1511 1511
1512 bind(&done); 1512 bind(&done);
1513 } 1513 }
1514 1514
1515 1515
1516 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { 1516 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1517 Label done; 1517 Label done;
1518 DoubleRegister double_scratch = f12; 1518 DoubleRegister double_scratch = f12;
1519 ASSERT(!result.is(object)); 1519 DCHECK(!result.is(object));
1520 1520
1521 ldc1(double_scratch, 1521 ldc1(double_scratch,
1522 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); 1522 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1523 TryInlineTruncateDoubleToI(result, double_scratch, &done); 1523 TryInlineTruncateDoubleToI(result, double_scratch, &done);
1524 1524
1525 // If we fell through then inline version didn't succeed - call stub instead. 1525 // If we fell through then inline version didn't succeed - call stub instead.
1526 push(ra); 1526 push(ra);
1527 DoubleToIStub stub(isolate(), 1527 DoubleToIStub stub(isolate(),
1528 object, 1528 object,
1529 result, 1529 result,
1530 HeapNumber::kValueOffset - kHeapObjectTag, 1530 HeapNumber::kValueOffset - kHeapObjectTag,
1531 true, 1531 true,
1532 true); 1532 true);
1533 CallStub(&stub); 1533 CallStub(&stub);
1534 pop(ra); 1534 pop(ra);
1535 1535
1536 bind(&done); 1536 bind(&done);
1537 } 1537 }
1538 1538
1539 1539
1540 void MacroAssembler::TruncateNumberToI(Register object, 1540 void MacroAssembler::TruncateNumberToI(Register object,
1541 Register result, 1541 Register result,
1542 Register heap_number_map, 1542 Register heap_number_map,
1543 Register scratch, 1543 Register scratch,
1544 Label* not_number) { 1544 Label* not_number) {
1545 Label done; 1545 Label done;
1546 ASSERT(!result.is(object)); 1546 DCHECK(!result.is(object));
1547 1547
1548 UntagAndJumpIfSmi(result, object, &done); 1548 UntagAndJumpIfSmi(result, object, &done);
1549 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); 1549 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1550 TruncateHeapNumberToI(result, object); 1550 TruncateHeapNumberToI(result, object);
1551 1551
1552 bind(&done); 1552 bind(&done);
1553 } 1553 }
1554 1554
1555 1555
1556 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1556 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1557 Register src, 1557 Register src,
1558 int num_least_bits) { 1558 int num_least_bits) {
1559 Ext(dst, src, kSmiTagSize, num_least_bits); 1559 Ext(dst, src, kSmiTagSize, num_least_bits);
1560 } 1560 }
1561 1561
1562 1562
1563 void MacroAssembler::GetLeastBitsFromInt32(Register dst, 1563 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1564 Register src, 1564 Register src,
1565 int num_least_bits) { 1565 int num_least_bits) {
1566 And(dst, src, Operand((1 << num_least_bits) - 1)); 1566 And(dst, src, Operand((1 << num_least_bits) - 1));
1567 } 1567 }
1568 1568
1569 1569
1570 // Emulated condtional branches do not emit a nop in the branch delay slot. 1570 // Emulated condtional branches do not emit a nop in the branch delay slot.
1571 // 1571 //
1572 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1572 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1573 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1573 #define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
1574 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1574 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1575 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1575 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1576 1576
1577 1577
1578 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { 1578 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1579 BranchShort(offset, bdslot); 1579 BranchShort(offset, bdslot);
1580 } 1580 }
1581 1581
1582 1582
1583 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, 1583 void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
1655 // Emit a nop in the branch delay slot if required. 1655 // Emit a nop in the branch delay slot if required.
1656 if (bdslot == PROTECT) 1656 if (bdslot == PROTECT)
1657 nop(); 1657 nop();
1658 } 1658 }
1659 1659
1660 1660
1661 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, 1661 void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1662 const Operand& rt, 1662 const Operand& rt,
1663 BranchDelaySlot bdslot) { 1663 BranchDelaySlot bdslot) {
1664 BRANCH_ARGS_CHECK(cond, rs, rt); 1664 BRANCH_ARGS_CHECK(cond, rs, rt);
1665 ASSERT(!rs.is(zero_reg)); 1665 DCHECK(!rs.is(zero_reg));
1666 Register r2 = no_reg; 1666 Register r2 = no_reg;
1667 Register scratch = at; 1667 Register scratch = at;
1668 1668
1669 if (rt.is_reg()) { 1669 if (rt.is_reg()) {
1670 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or 1670 // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1671 // rt. 1671 // rt.
1672 BlockTrampolinePoolScope block_trampoline_pool(this); 1672 BlockTrampolinePoolScope block_trampoline_pool(this);
1673 r2 = rt.rm_; 1673 r2 = rt.rm_;
1674 switch (cond) { 1674 switch (cond) {
1675 case cc_always: 1675 case cc_always:
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1755 // Be careful to always use shifted_branch_offset only just before the 1755 // Be careful to always use shifted_branch_offset only just before the
1756 // branch instruction, as the location will be remember for patching the 1756 // branch instruction, as the location will be remember for patching the
1757 // target. 1757 // target.
1758 BlockTrampolinePoolScope block_trampoline_pool(this); 1758 BlockTrampolinePoolScope block_trampoline_pool(this);
1759 switch (cond) { 1759 switch (cond) {
1760 case cc_always: 1760 case cc_always:
1761 b(offset); 1761 b(offset);
1762 break; 1762 break;
1763 case eq: 1763 case eq:
1764 // We don't want any other register but scratch clobbered. 1764 // We don't want any other register but scratch clobbered.
1765 ASSERT(!scratch.is(rs)); 1765 DCHECK(!scratch.is(rs));
1766 r2 = scratch; 1766 r2 = scratch;
1767 li(r2, rt); 1767 li(r2, rt);
1768 beq(rs, r2, offset); 1768 beq(rs, r2, offset);
1769 break; 1769 break;
1770 case ne: 1770 case ne:
1771 // We don't want any other register but scratch clobbered. 1771 // We don't want any other register but scratch clobbered.
1772 ASSERT(!scratch.is(rs)); 1772 DCHECK(!scratch.is(rs));
1773 r2 = scratch; 1773 r2 = scratch;
1774 li(r2, rt); 1774 li(r2, rt);
1775 bne(rs, r2, offset); 1775 bne(rs, r2, offset);
1776 break; 1776 break;
1777 // Signed comparison. 1777 // Signed comparison.
1778 case greater: 1778 case greater:
1779 if (rt.imm32_ == 0) { 1779 if (rt.imm32_ == 0) {
1780 bgtz(rs, offset); 1780 bgtz(rs, offset);
1781 } else { 1781 } else {
1782 r2 = scratch; 1782 r2 = scratch;
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
2007 // Be careful to always use shifted_branch_offset only just before the 2007 // Be careful to always use shifted_branch_offset only just before the
2008 // branch instruction, as the location will be remember for patching the 2008 // branch instruction, as the location will be remember for patching the
2009 // target. 2009 // target.
2010 BlockTrampolinePoolScope block_trampoline_pool(this); 2010 BlockTrampolinePoolScope block_trampoline_pool(this);
2011 switch (cond) { 2011 switch (cond) {
2012 case cc_always: 2012 case cc_always:
2013 offset = shifted_branch_offset(L, false); 2013 offset = shifted_branch_offset(L, false);
2014 b(offset); 2014 b(offset);
2015 break; 2015 break;
2016 case eq: 2016 case eq:
2017 ASSERT(!scratch.is(rs)); 2017 DCHECK(!scratch.is(rs));
2018 r2 = scratch; 2018 r2 = scratch;
2019 li(r2, rt); 2019 li(r2, rt);
2020 offset = shifted_branch_offset(L, false); 2020 offset = shifted_branch_offset(L, false);
2021 beq(rs, r2, offset); 2021 beq(rs, r2, offset);
2022 break; 2022 break;
2023 case ne: 2023 case ne:
2024 ASSERT(!scratch.is(rs)); 2024 DCHECK(!scratch.is(rs));
2025 r2 = scratch; 2025 r2 = scratch;
2026 li(r2, rt); 2026 li(r2, rt);
2027 offset = shifted_branch_offset(L, false); 2027 offset = shifted_branch_offset(L, false);
2028 bne(rs, r2, offset); 2028 bne(rs, r2, offset);
2029 break; 2029 break;
2030 // Signed comparison. 2030 // Signed comparison.
2031 case greater: 2031 case greater:
2032 if (rt.imm32_ == 0) { 2032 if (rt.imm32_ == 0) {
2033 offset = shifted_branch_offset(L, false); 2033 offset = shifted_branch_offset(L, false);
2034 bgtz(rs, offset); 2034 bgtz(rs, offset);
2035 } else { 2035 } else {
2036 ASSERT(!scratch.is(rs)); 2036 DCHECK(!scratch.is(rs));
2037 r2 = scratch; 2037 r2 = scratch;
2038 li(r2, rt); 2038 li(r2, rt);
2039 slt(scratch, r2, rs); 2039 slt(scratch, r2, rs);
2040 offset = shifted_branch_offset(L, false); 2040 offset = shifted_branch_offset(L, false);
2041 bne(scratch, zero_reg, offset); 2041 bne(scratch, zero_reg, offset);
2042 } 2042 }
2043 break; 2043 break;
2044 case greater_equal: 2044 case greater_equal:
2045 if (rt.imm32_ == 0) { 2045 if (rt.imm32_ == 0) {
2046 offset = shifted_branch_offset(L, false); 2046 offset = shifted_branch_offset(L, false);
2047 bgez(rs, offset); 2047 bgez(rs, offset);
2048 } else if (is_int16(rt.imm32_)) { 2048 } else if (is_int16(rt.imm32_)) {
2049 slti(scratch, rs, rt.imm32_); 2049 slti(scratch, rs, rt.imm32_);
2050 offset = shifted_branch_offset(L, false); 2050 offset = shifted_branch_offset(L, false);
2051 beq(scratch, zero_reg, offset); 2051 beq(scratch, zero_reg, offset);
2052 } else { 2052 } else {
2053 ASSERT(!scratch.is(rs)); 2053 DCHECK(!scratch.is(rs));
2054 r2 = scratch; 2054 r2 = scratch;
2055 li(r2, rt); 2055 li(r2, rt);
2056 slt(scratch, rs, r2); 2056 slt(scratch, rs, r2);
2057 offset = shifted_branch_offset(L, false); 2057 offset = shifted_branch_offset(L, false);
2058 beq(scratch, zero_reg, offset); 2058 beq(scratch, zero_reg, offset);
2059 } 2059 }
2060 break; 2060 break;
2061 case less: 2061 case less:
2062 if (rt.imm32_ == 0) { 2062 if (rt.imm32_ == 0) {
2063 offset = shifted_branch_offset(L, false); 2063 offset = shifted_branch_offset(L, false);
2064 bltz(rs, offset); 2064 bltz(rs, offset);
2065 } else if (is_int16(rt.imm32_)) { 2065 } else if (is_int16(rt.imm32_)) {
2066 slti(scratch, rs, rt.imm32_); 2066 slti(scratch, rs, rt.imm32_);
2067 offset = shifted_branch_offset(L, false); 2067 offset = shifted_branch_offset(L, false);
2068 bne(scratch, zero_reg, offset); 2068 bne(scratch, zero_reg, offset);
2069 } else { 2069 } else {
2070 ASSERT(!scratch.is(rs)); 2070 DCHECK(!scratch.is(rs));
2071 r2 = scratch; 2071 r2 = scratch;
2072 li(r2, rt); 2072 li(r2, rt);
2073 slt(scratch, rs, r2); 2073 slt(scratch, rs, r2);
2074 offset = shifted_branch_offset(L, false); 2074 offset = shifted_branch_offset(L, false);
2075 bne(scratch, zero_reg, offset); 2075 bne(scratch, zero_reg, offset);
2076 } 2076 }
2077 break; 2077 break;
2078 case less_equal: 2078 case less_equal:
2079 if (rt.imm32_ == 0) { 2079 if (rt.imm32_ == 0) {
2080 offset = shifted_branch_offset(L, false); 2080 offset = shifted_branch_offset(L, false);
2081 blez(rs, offset); 2081 blez(rs, offset);
2082 } else { 2082 } else {
2083 ASSERT(!scratch.is(rs)); 2083 DCHECK(!scratch.is(rs));
2084 r2 = scratch; 2084 r2 = scratch;
2085 li(r2, rt); 2085 li(r2, rt);
2086 slt(scratch, r2, rs); 2086 slt(scratch, r2, rs);
2087 offset = shifted_branch_offset(L, false); 2087 offset = shifted_branch_offset(L, false);
2088 beq(scratch, zero_reg, offset); 2088 beq(scratch, zero_reg, offset);
2089 } 2089 }
2090 break; 2090 break;
2091 // Unsigned comparison. 2091 // Unsigned comparison.
2092 case Ugreater: 2092 case Ugreater:
2093 if (rt.imm32_ == 0) { 2093 if (rt.imm32_ == 0) {
2094 offset = shifted_branch_offset(L, false); 2094 offset = shifted_branch_offset(L, false);
2095 bne(rs, zero_reg, offset); 2095 bne(rs, zero_reg, offset);
2096 } else { 2096 } else {
2097 ASSERT(!scratch.is(rs)); 2097 DCHECK(!scratch.is(rs));
2098 r2 = scratch; 2098 r2 = scratch;
2099 li(r2, rt); 2099 li(r2, rt);
2100 sltu(scratch, r2, rs); 2100 sltu(scratch, r2, rs);
2101 offset = shifted_branch_offset(L, false); 2101 offset = shifted_branch_offset(L, false);
2102 bne(scratch, zero_reg, offset); 2102 bne(scratch, zero_reg, offset);
2103 } 2103 }
2104 break; 2104 break;
2105 case Ugreater_equal: 2105 case Ugreater_equal:
2106 if (rt.imm32_ == 0) { 2106 if (rt.imm32_ == 0) {
2107 offset = shifted_branch_offset(L, false); 2107 offset = shifted_branch_offset(L, false);
2108 bgez(rs, offset); 2108 bgez(rs, offset);
2109 } else if (is_int16(rt.imm32_)) { 2109 } else if (is_int16(rt.imm32_)) {
2110 sltiu(scratch, rs, rt.imm32_); 2110 sltiu(scratch, rs, rt.imm32_);
2111 offset = shifted_branch_offset(L, false); 2111 offset = shifted_branch_offset(L, false);
2112 beq(scratch, zero_reg, offset); 2112 beq(scratch, zero_reg, offset);
2113 } else { 2113 } else {
2114 ASSERT(!scratch.is(rs)); 2114 DCHECK(!scratch.is(rs));
2115 r2 = scratch; 2115 r2 = scratch;
2116 li(r2, rt); 2116 li(r2, rt);
2117 sltu(scratch, rs, r2); 2117 sltu(scratch, rs, r2);
2118 offset = shifted_branch_offset(L, false); 2118 offset = shifted_branch_offset(L, false);
2119 beq(scratch, zero_reg, offset); 2119 beq(scratch, zero_reg, offset);
2120 } 2120 }
2121 break; 2121 break;
2122 case Uless: 2122 case Uless:
2123 if (rt.imm32_ == 0) { 2123 if (rt.imm32_ == 0) {
2124 // No code needs to be emitted. 2124 // No code needs to be emitted.
2125 return; 2125 return;
2126 } else if (is_int16(rt.imm32_)) { 2126 } else if (is_int16(rt.imm32_)) {
2127 sltiu(scratch, rs, rt.imm32_); 2127 sltiu(scratch, rs, rt.imm32_);
2128 offset = shifted_branch_offset(L, false); 2128 offset = shifted_branch_offset(L, false);
2129 bne(scratch, zero_reg, offset); 2129 bne(scratch, zero_reg, offset);
2130 } else { 2130 } else {
2131 ASSERT(!scratch.is(rs)); 2131 DCHECK(!scratch.is(rs));
2132 r2 = scratch; 2132 r2 = scratch;
2133 li(r2, rt); 2133 li(r2, rt);
2134 sltu(scratch, rs, r2); 2134 sltu(scratch, rs, r2);
2135 offset = shifted_branch_offset(L, false); 2135 offset = shifted_branch_offset(L, false);
2136 bne(scratch, zero_reg, offset); 2136 bne(scratch, zero_reg, offset);
2137 } 2137 }
2138 break; 2138 break;
2139 case Uless_equal: 2139 case Uless_equal:
2140 if (rt.imm32_ == 0) { 2140 if (rt.imm32_ == 0) {
2141 offset = shifted_branch_offset(L, false); 2141 offset = shifted_branch_offset(L, false);
2142 beq(rs, zero_reg, offset); 2142 beq(rs, zero_reg, offset);
2143 } else { 2143 } else {
2144 ASSERT(!scratch.is(rs)); 2144 DCHECK(!scratch.is(rs));
2145 r2 = scratch; 2145 r2 = scratch;
2146 li(r2, rt); 2146 li(r2, rt);
2147 sltu(scratch, r2, rs); 2147 sltu(scratch, r2, rs);
2148 offset = shifted_branch_offset(L, false); 2148 offset = shifted_branch_offset(L, false);
2149 beq(scratch, zero_reg, offset); 2149 beq(scratch, zero_reg, offset);
2150 } 2150 }
2151 break; 2151 break;
2152 default: 2152 default:
2153 UNREACHABLE(); 2153 UNREACHABLE();
2154 } 2154 }
2155 } 2155 }
2156 // Check that offset could actually hold on an int16_t. 2156 // Check that offset could actually hold on an int16_t.
2157 ASSERT(is_int16(offset)); 2157 DCHECK(is_int16(offset));
2158 // Emit a nop in the branch delay slot if required. 2158 // Emit a nop in the branch delay slot if required.
2159 if (bdslot == PROTECT) 2159 if (bdslot == PROTECT)
2160 nop(); 2160 nop();
2161 } 2161 }
2162 2162
2163 2163
2164 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { 2164 void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2165 BranchAndLinkShort(offset, bdslot); 2165 BranchAndLinkShort(offset, bdslot);
2166 } 2166 }
2167 2167
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
2409 addiu(scratch, scratch, -1); 2409 addiu(scratch, scratch, -1);
2410 offset = shifted_branch_offset(L, false); 2410 offset = shifted_branch_offset(L, false);
2411 bltzal(scratch, offset); 2411 bltzal(scratch, offset);
2412 break; 2412 break;
2413 2413
2414 default: 2414 default:
2415 UNREACHABLE(); 2415 UNREACHABLE();
2416 } 2416 }
2417 } 2417 }
2418 // Check that offset could actually hold on an int16_t. 2418 // Check that offset could actually hold on an int16_t.
2419 ASSERT(is_int16(offset)); 2419 DCHECK(is_int16(offset));
2420 2420
2421 // Emit a nop in the branch delay slot if required. 2421 // Emit a nop in the branch delay slot if required.
2422 if (bdslot == PROTECT) 2422 if (bdslot == PROTECT)
2423 nop(); 2423 nop();
2424 } 2424 }
2425 2425
2426 2426
2427 void MacroAssembler::Jump(Register target, 2427 void MacroAssembler::Jump(Register target,
2428 Condition cond, 2428 Condition cond,
2429 Register rs, 2429 Register rs,
(...skipping 30 matching lines...) Expand all
2460 bind(&skip); 2460 bind(&skip);
2461 } 2461 }
2462 2462
2463 2463
2464 void MacroAssembler::Jump(Address target, 2464 void MacroAssembler::Jump(Address target,
2465 RelocInfo::Mode rmode, 2465 RelocInfo::Mode rmode,
2466 Condition cond, 2466 Condition cond,
2467 Register rs, 2467 Register rs,
2468 const Operand& rt, 2468 const Operand& rt,
2469 BranchDelaySlot bd) { 2469 BranchDelaySlot bd) {
2470 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 2470 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2471 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); 2471 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2472 } 2472 }
2473 2473
2474 2474
2475 void MacroAssembler::Jump(Handle<Code> code, 2475 void MacroAssembler::Jump(Handle<Code> code,
2476 RelocInfo::Mode rmode, 2476 RelocInfo::Mode rmode,
2477 Condition cond, 2477 Condition cond,
2478 Register rs, 2478 Register rs,
2479 const Operand& rt, 2479 const Operand& rt,
2480 BranchDelaySlot bd) { 2480 BranchDelaySlot bd) {
2481 ASSERT(RelocInfo::IsCodeTarget(rmode)); 2481 DCHECK(RelocInfo::IsCodeTarget(rmode));
2482 AllowDeferredHandleDereference embedding_raw_address; 2482 AllowDeferredHandleDereference embedding_raw_address;
2483 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); 2483 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2484 } 2484 }
2485 2485
2486 2486
2487 int MacroAssembler::CallSize(Register target, 2487 int MacroAssembler::CallSize(Register target,
2488 Condition cond, 2488 Condition cond,
2489 Register rs, 2489 Register rs,
2490 const Operand& rt, 2490 const Operand& rt,
2491 BranchDelaySlot bd) { 2491 BranchDelaySlot bd) {
(...skipping 25 matching lines...) Expand all
2517 jalr(target); 2517 jalr(target);
2518 } else { 2518 } else {
2519 BRANCH_ARGS_CHECK(cond, rs, rt); 2519 BRANCH_ARGS_CHECK(cond, rs, rt);
2520 Branch(2, NegateCondition(cond), rs, rt); 2520 Branch(2, NegateCondition(cond), rs, rt);
2521 jalr(target); 2521 jalr(target);
2522 } 2522 }
2523 // Emit a nop in the branch delay slot if required. 2523 // Emit a nop in the branch delay slot if required.
2524 if (bd == PROTECT) 2524 if (bd == PROTECT)
2525 nop(); 2525 nop();
2526 2526
2527 ASSERT_EQ(CallSize(target, cond, rs, rt, bd), 2527 DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
2528 SizeOfCodeGeneratedSince(&start)); 2528 SizeOfCodeGeneratedSince(&start));
2529 } 2529 }
2530 2530
2531 2531
2532 int MacroAssembler::CallSize(Address target, 2532 int MacroAssembler::CallSize(Address target,
2533 RelocInfo::Mode rmode, 2533 RelocInfo::Mode rmode,
2534 Condition cond, 2534 Condition cond,
2535 Register rs, 2535 Register rs,
2536 const Operand& rt, 2536 const Operand& rt,
2537 BranchDelaySlot bd) { 2537 BranchDelaySlot bd) {
(...skipping 10 matching lines...) Expand all
2548 BranchDelaySlot bd) { 2548 BranchDelaySlot bd) {
2549 BlockTrampolinePoolScope block_trampoline_pool(this); 2549 BlockTrampolinePoolScope block_trampoline_pool(this);
2550 Label start; 2550 Label start;
2551 bind(&start); 2551 bind(&start);
2552 int32_t target_int = reinterpret_cast<int32_t>(target); 2552 int32_t target_int = reinterpret_cast<int32_t>(target);
2553 // Must record previous source positions before the 2553 // Must record previous source positions before the
2554 // li() generates a new code target. 2554 // li() generates a new code target.
2555 positions_recorder()->WriteRecordedPositions(); 2555 positions_recorder()->WriteRecordedPositions();
2556 li(t9, Operand(target_int, rmode), CONSTANT_SIZE); 2556 li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2557 Call(t9, cond, rs, rt, bd); 2557 Call(t9, cond, rs, rt, bd);
2558 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), 2558 DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2559 SizeOfCodeGeneratedSince(&start)); 2559 SizeOfCodeGeneratedSince(&start));
2560 } 2560 }
2561 2561
2562 2562
2563 int MacroAssembler::CallSize(Handle<Code> code, 2563 int MacroAssembler::CallSize(Handle<Code> code,
2564 RelocInfo::Mode rmode, 2564 RelocInfo::Mode rmode,
2565 TypeFeedbackId ast_id, 2565 TypeFeedbackId ast_id,
2566 Condition cond, 2566 Condition cond,
2567 Register rs, 2567 Register rs,
2568 const Operand& rt, 2568 const Operand& rt,
2569 BranchDelaySlot bd) { 2569 BranchDelaySlot bd) {
2570 AllowDeferredHandleDereference using_raw_address; 2570 AllowDeferredHandleDereference using_raw_address;
2571 return CallSize(reinterpret_cast<Address>(code.location()), 2571 return CallSize(reinterpret_cast<Address>(code.location()),
2572 rmode, cond, rs, rt, bd); 2572 rmode, cond, rs, rt, bd);
2573 } 2573 }
2574 2574
2575 2575
2576 void MacroAssembler::Call(Handle<Code> code, 2576 void MacroAssembler::Call(Handle<Code> code,
2577 RelocInfo::Mode rmode, 2577 RelocInfo::Mode rmode,
2578 TypeFeedbackId ast_id, 2578 TypeFeedbackId ast_id,
2579 Condition cond, 2579 Condition cond,
2580 Register rs, 2580 Register rs,
2581 const Operand& rt, 2581 const Operand& rt,
2582 BranchDelaySlot bd) { 2582 BranchDelaySlot bd) {
2583 BlockTrampolinePoolScope block_trampoline_pool(this); 2583 BlockTrampolinePoolScope block_trampoline_pool(this);
2584 Label start; 2584 Label start;
2585 bind(&start); 2585 bind(&start);
2586 ASSERT(RelocInfo::IsCodeTarget(rmode)); 2586 DCHECK(RelocInfo::IsCodeTarget(rmode));
2587 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { 2587 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2588 SetRecordedAstId(ast_id); 2588 SetRecordedAstId(ast_id);
2589 rmode = RelocInfo::CODE_TARGET_WITH_ID; 2589 rmode = RelocInfo::CODE_TARGET_WITH_ID;
2590 } 2590 }
2591 AllowDeferredHandleDereference embedding_raw_address; 2591 AllowDeferredHandleDereference embedding_raw_address;
2592 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); 2592 Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2593 ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), 2593 DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2594 SizeOfCodeGeneratedSince(&start)); 2594 SizeOfCodeGeneratedSince(&start));
2595 } 2595 }
2596 2596
2597 2597
2598 void MacroAssembler::Ret(Condition cond, 2598 void MacroAssembler::Ret(Condition cond,
2599 Register rs, 2599 Register rs,
2600 const Operand& rt, 2600 const Operand& rt,
2601 BranchDelaySlot bd) { 2601 BranchDelaySlot bd) {
2602 Jump(ra, cond, rs, rt, bd); 2602 Jump(ra, cond, rs, rt, bd);
2603 } 2603 }
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after
2731 void MacroAssembler::Push(Handle<Object> handle) { 2731 void MacroAssembler::Push(Handle<Object> handle) {
2732 li(at, Operand(handle)); 2732 li(at, Operand(handle));
2733 push(at); 2733 push(at);
2734 } 2734 }
2735 2735
2736 2736
2737 void MacroAssembler::DebugBreak() { 2737 void MacroAssembler::DebugBreak() {
2738 PrepareCEntryArgs(0); 2738 PrepareCEntryArgs(0);
2739 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); 2739 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2740 CEntryStub ces(isolate(), 1); 2740 CEntryStub ces(isolate(), 1);
2741 ASSERT(AllowThisStubCall(&ces)); 2741 DCHECK(AllowThisStubCall(&ces));
2742 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 2742 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2743 } 2743 }
2744 2744
2745 2745
2746 // --------------------------------------------------------------------------- 2746 // ---------------------------------------------------------------------------
2747 // Exception handling. 2747 // Exception handling.
2748 2748
2749 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 2749 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2750 int handler_index) { 2750 int handler_index) {
2751 // Adjust this code if not the case. 2751 // Adjust this code if not the case.
2752 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 2752 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2753 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 2753 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2754 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 2754 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2755 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 2755 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2756 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 2756 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2757 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 2757 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2758 2758
2759 // For the JSEntry handler, we must preserve a0-a3 and s0. 2759 // For the JSEntry handler, we must preserve a0-a3 and s0.
2760 // t1-t3 are available. We will build up the handler from the bottom by 2760 // t1-t3 are available. We will build up the handler from the bottom by
2761 // pushing on the stack. 2761 // pushing on the stack.
2762 // Set up the code object (t1) and the state (t2) for pushing. 2762 // Set up the code object (t1) and the state (t2) for pushing.
2763 unsigned state = 2763 unsigned state =
2764 StackHandler::IndexField::encode(handler_index) | 2764 StackHandler::IndexField::encode(handler_index) |
2765 StackHandler::KindField::encode(kind); 2765 StackHandler::KindField::encode(kind);
2766 li(t1, Operand(CodeObject()), CONSTANT_SIZE); 2766 li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2767 li(t2, Operand(state)); 2767 li(t2, Operand(state));
2768 2768
2769 // Push the frame pointer, context, state, and code object. 2769 // Push the frame pointer, context, state, and code object.
2770 if (kind == StackHandler::JS_ENTRY) { 2770 if (kind == StackHandler::JS_ENTRY) {
2771 ASSERT_EQ(Smi::FromInt(0), 0); 2771 DCHECK_EQ(Smi::FromInt(0), 0);
2772 // The second zero_reg indicates no context. 2772 // The second zero_reg indicates no context.
2773 // The first zero_reg is the NULL frame pointer. 2773 // The first zero_reg is the NULL frame pointer.
2774 // The operands are reversed to match the order of MultiPush/Pop. 2774 // The operands are reversed to match the order of MultiPush/Pop.
2775 Push(zero_reg, zero_reg, t2, t1); 2775 Push(zero_reg, zero_reg, t2, t1);
2776 } else { 2776 } else {
2777 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit()); 2777 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2778 } 2778 }
2779 2779
2780 // Link the current handler as the next handler. 2780 // Link the current handler as the next handler.
2781 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 2781 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
2889 JumpToHandlerEntry(); 2889 JumpToHandlerEntry();
2890 } 2890 }
2891 2891
2892 2892
2893 void MacroAssembler::Allocate(int object_size, 2893 void MacroAssembler::Allocate(int object_size,
2894 Register result, 2894 Register result,
2895 Register scratch1, 2895 Register scratch1,
2896 Register scratch2, 2896 Register scratch2,
2897 Label* gc_required, 2897 Label* gc_required,
2898 AllocationFlags flags) { 2898 AllocationFlags flags) {
2899 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 2899 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2900 if (!FLAG_inline_new) { 2900 if (!FLAG_inline_new) {
2901 if (emit_debug_code()) { 2901 if (emit_debug_code()) {
2902 // Trash the registers to simulate an allocation failure. 2902 // Trash the registers to simulate an allocation failure.
2903 li(result, 0x7091); 2903 li(result, 0x7091);
2904 li(scratch1, 0x7191); 2904 li(scratch1, 0x7191);
2905 li(scratch2, 0x7291); 2905 li(scratch2, 0x7291);
2906 } 2906 }
2907 jmp(gc_required); 2907 jmp(gc_required);
2908 return; 2908 return;
2909 } 2909 }
2910 2910
2911 ASSERT(!result.is(scratch1)); 2911 DCHECK(!result.is(scratch1));
2912 ASSERT(!result.is(scratch2)); 2912 DCHECK(!result.is(scratch2));
2913 ASSERT(!scratch1.is(scratch2)); 2913 DCHECK(!scratch1.is(scratch2));
2914 ASSERT(!scratch1.is(t9)); 2914 DCHECK(!scratch1.is(t9));
2915 ASSERT(!scratch2.is(t9)); 2915 DCHECK(!scratch2.is(t9));
2916 ASSERT(!result.is(t9)); 2916 DCHECK(!result.is(t9));
2917 2917
2918 // Make object size into bytes. 2918 // Make object size into bytes.
2919 if ((flags & SIZE_IN_WORDS) != 0) { 2919 if ((flags & SIZE_IN_WORDS) != 0) {
2920 object_size *= kPointerSize; 2920 object_size *= kPointerSize;
2921 } 2921 }
2922 ASSERT_EQ(0, object_size & kObjectAlignmentMask); 2922 DCHECK_EQ(0, object_size & kObjectAlignmentMask);
2923 2923
2924 // Check relative positions of allocation top and limit addresses. 2924 // Check relative positions of allocation top and limit addresses.
2925 // ARM adds additional checks to make sure the ldm instruction can be 2925 // ARM adds additional checks to make sure the ldm instruction can be
2926 // used. On MIPS we don't have ldm so we don't need additional checks either. 2926 // used. On MIPS we don't have ldm so we don't need additional checks either.
2927 ExternalReference allocation_top = 2927 ExternalReference allocation_top =
2928 AllocationUtils::GetAllocationTopReference(isolate(), flags); 2928 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2929 ExternalReference allocation_limit = 2929 ExternalReference allocation_limit =
2930 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 2930 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2931 2931
2932 intptr_t top = 2932 intptr_t top =
2933 reinterpret_cast<intptr_t>(allocation_top.address()); 2933 reinterpret_cast<intptr_t>(allocation_top.address());
2934 intptr_t limit = 2934 intptr_t limit =
2935 reinterpret_cast<intptr_t>(allocation_limit.address()); 2935 reinterpret_cast<intptr_t>(allocation_limit.address());
2936 ASSERT((limit - top) == kPointerSize); 2936 DCHECK((limit - top) == kPointerSize);
2937 2937
2938 // Set up allocation top address and object size registers. 2938 // Set up allocation top address and object size registers.
2939 Register topaddr = scratch1; 2939 Register topaddr = scratch1;
2940 li(topaddr, Operand(allocation_top)); 2940 li(topaddr, Operand(allocation_top));
2941 2941
2942 // This code stores a temporary value in t9. 2942 // This code stores a temporary value in t9.
2943 if ((flags & RESULT_CONTAINS_TOP) == 0) { 2943 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2944 // Load allocation top into result and allocation limit into t9. 2944 // Load allocation top into result and allocation limit into t9.
2945 lw(result, MemOperand(topaddr)); 2945 lw(result, MemOperand(topaddr));
2946 lw(t9, MemOperand(topaddr, kPointerSize)); 2946 lw(t9, MemOperand(topaddr, kPointerSize));
2947 } else { 2947 } else {
2948 if (emit_debug_code()) { 2948 if (emit_debug_code()) {
2949 // Assert that result actually contains top on entry. t9 is used 2949 // Assert that result actually contains top on entry. t9 is used
2950 // immediately below so this use of t9 does not cause difference with 2950 // immediately below so this use of t9 does not cause difference with
2951 // respect to register content between debug and release mode. 2951 // respect to register content between debug and release mode.
2952 lw(t9, MemOperand(topaddr)); 2952 lw(t9, MemOperand(topaddr));
2953 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 2953 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2954 } 2954 }
2955 // Load allocation limit into t9. Result already contains allocation top. 2955 // Load allocation limit into t9. Result already contains allocation top.
2956 lw(t9, MemOperand(topaddr, limit - top)); 2956 lw(t9, MemOperand(topaddr, limit - top));
2957 } 2957 }
2958 2958
2959 if ((flags & DOUBLE_ALIGNMENT) != 0) { 2959 if ((flags & DOUBLE_ALIGNMENT) != 0) {
2960 // Align the next allocation. Storing the filler map without checking top is 2960 // Align the next allocation. Storing the filler map without checking top is
2961 // safe in new-space because the limit of the heap is aligned there. 2961 // safe in new-space because the limit of the heap is aligned there.
2962 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 2962 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2963 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 2963 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
2964 And(scratch2, result, Operand(kDoubleAlignmentMask)); 2964 And(scratch2, result, Operand(kDoubleAlignmentMask));
2965 Label aligned; 2965 Label aligned;
2966 Branch(&aligned, eq, scratch2, Operand(zero_reg)); 2966 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2967 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { 2967 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2968 Branch(gc_required, Ugreater_equal, result, Operand(t9)); 2968 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2969 } 2969 }
2970 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 2970 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2971 sw(scratch2, MemOperand(result)); 2971 sw(scratch2, MemOperand(result));
2972 Addu(result, result, Operand(kDoubleSize / 2)); 2972 Addu(result, result, Operand(kDoubleSize / 2));
2973 bind(&aligned); 2973 bind(&aligned);
(...skipping 22 matching lines...) Expand all
2996 if (emit_debug_code()) { 2996 if (emit_debug_code()) {
2997 // Trash the registers to simulate an allocation failure. 2997 // Trash the registers to simulate an allocation failure.
2998 li(result, 0x7091); 2998 li(result, 0x7091);
2999 li(scratch1, 0x7191); 2999 li(scratch1, 0x7191);
3000 li(scratch2, 0x7291); 3000 li(scratch2, 0x7291);
3001 } 3001 }
3002 jmp(gc_required); 3002 jmp(gc_required);
3003 return; 3003 return;
3004 } 3004 }
3005 3005
3006 ASSERT(!result.is(scratch1)); 3006 DCHECK(!result.is(scratch1));
3007 ASSERT(!result.is(scratch2)); 3007 DCHECK(!result.is(scratch2));
3008 ASSERT(!scratch1.is(scratch2)); 3008 DCHECK(!scratch1.is(scratch2));
3009 ASSERT(!object_size.is(t9)); 3009 DCHECK(!object_size.is(t9));
3010 ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); 3010 DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
3011 3011
3012 // Check relative positions of allocation top and limit addresses. 3012 // Check relative positions of allocation top and limit addresses.
3013 // ARM adds additional checks to make sure the ldm instruction can be 3013 // ARM adds additional checks to make sure the ldm instruction can be
3014 // used. On MIPS we don't have ldm so we don't need additional checks either. 3014 // used. On MIPS we don't have ldm so we don't need additional checks either.
3015 ExternalReference allocation_top = 3015 ExternalReference allocation_top =
3016 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3016 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3017 ExternalReference allocation_limit = 3017 ExternalReference allocation_limit =
3018 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3018 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3019 intptr_t top = 3019 intptr_t top =
3020 reinterpret_cast<intptr_t>(allocation_top.address()); 3020 reinterpret_cast<intptr_t>(allocation_top.address());
3021 intptr_t limit = 3021 intptr_t limit =
3022 reinterpret_cast<intptr_t>(allocation_limit.address()); 3022 reinterpret_cast<intptr_t>(allocation_limit.address());
3023 ASSERT((limit - top) == kPointerSize); 3023 DCHECK((limit - top) == kPointerSize);
3024 3024
3025 // Set up allocation top address and object size registers. 3025 // Set up allocation top address and object size registers.
3026 Register topaddr = scratch1; 3026 Register topaddr = scratch1;
3027 li(topaddr, Operand(allocation_top)); 3027 li(topaddr, Operand(allocation_top));
3028 3028
3029 // This code stores a temporary value in t9. 3029 // This code stores a temporary value in t9.
3030 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3030 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3031 // Load allocation top into result and allocation limit into t9. 3031 // Load allocation top into result and allocation limit into t9.
3032 lw(result, MemOperand(topaddr)); 3032 lw(result, MemOperand(topaddr));
3033 lw(t9, MemOperand(topaddr, kPointerSize)); 3033 lw(t9, MemOperand(topaddr, kPointerSize));
3034 } else { 3034 } else {
3035 if (emit_debug_code()) { 3035 if (emit_debug_code()) {
3036 // Assert that result actually contains top on entry. t9 is used 3036 // Assert that result actually contains top on entry. t9 is used
3037 // immediately below so this use of t9 does not cause difference with 3037 // immediately below so this use of t9 does not cause difference with
3038 // respect to register content between debug and release mode. 3038 // respect to register content between debug and release mode.
3039 lw(t9, MemOperand(topaddr)); 3039 lw(t9, MemOperand(topaddr));
3040 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3040 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3041 } 3041 }
3042 // Load allocation limit into t9. Result already contains allocation top. 3042 // Load allocation limit into t9. Result already contains allocation top.
3043 lw(t9, MemOperand(topaddr, limit - top)); 3043 lw(t9, MemOperand(topaddr, limit - top));
3044 } 3044 }
3045 3045
3046 if ((flags & DOUBLE_ALIGNMENT) != 0) { 3046 if ((flags & DOUBLE_ALIGNMENT) != 0) {
3047 // Align the next allocation. Storing the filler map without checking top is 3047 // Align the next allocation. Storing the filler map without checking top is
3048 // safe in new-space because the limit of the heap is aligned there. 3048 // safe in new-space because the limit of the heap is aligned there.
3049 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 3049 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
3050 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 3050 DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
3051 And(scratch2, result, Operand(kDoubleAlignmentMask)); 3051 And(scratch2, result, Operand(kDoubleAlignmentMask));
3052 Label aligned; 3052 Label aligned;
3053 Branch(&aligned, eq, scratch2, Operand(zero_reg)); 3053 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3054 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { 3054 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3055 Branch(gc_required, Ugreater_equal, result, Operand(t9)); 3055 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3056 } 3056 }
3057 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 3057 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3058 sw(scratch2, MemOperand(result)); 3058 sw(scratch2, MemOperand(result));
3059 Addu(result, result, Operand(kDoubleSize / 2)); 3059 Addu(result, result, Operand(kDoubleSize / 2));
3060 bind(&aligned); 3060 bind(&aligned);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3106 3106
3107 3107
3108 void MacroAssembler::AllocateTwoByteString(Register result, 3108 void MacroAssembler::AllocateTwoByteString(Register result,
3109 Register length, 3109 Register length,
3110 Register scratch1, 3110 Register scratch1,
3111 Register scratch2, 3111 Register scratch2,
3112 Register scratch3, 3112 Register scratch3,
3113 Label* gc_required) { 3113 Label* gc_required) {
3114 // Calculate the number of bytes needed for the characters in the string while 3114 // Calculate the number of bytes needed for the characters in the string while
3115 // observing object alignment. 3115 // observing object alignment.
3116 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3116 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3117 sll(scratch1, length, 1); // Length in bytes, not chars. 3117 sll(scratch1, length, 1); // Length in bytes, not chars.
3118 addiu(scratch1, scratch1, 3118 addiu(scratch1, scratch1,
3119 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); 3119 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3120 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3120 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3121 3121
3122 // Allocate two-byte string in new space. 3122 // Allocate two-byte string in new space.
3123 Allocate(scratch1, 3123 Allocate(scratch1,
3124 result, 3124 result,
3125 scratch2, 3125 scratch2,
3126 scratch3, 3126 scratch3,
(...skipping 10 matching lines...) Expand all
3137 3137
3138 3138
3139 void MacroAssembler::AllocateAsciiString(Register result, 3139 void MacroAssembler::AllocateAsciiString(Register result,
3140 Register length, 3140 Register length,
3141 Register scratch1, 3141 Register scratch1,
3142 Register scratch2, 3142 Register scratch2,
3143 Register scratch3, 3143 Register scratch3,
3144 Label* gc_required) { 3144 Label* gc_required) {
3145 // Calculate the number of bytes needed for the characters in the string 3145 // Calculate the number of bytes needed for the characters in the string
3146 // while observing object alignment. 3146 // while observing object alignment.
3147 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3147 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3148 ASSERT(kCharSize == 1); 3148 DCHECK(kCharSize == 1);
3149 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); 3149 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3150 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3150 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3151 3151
3152 // Allocate ASCII string in new space. 3152 // Allocate ASCII string in new space.
3153 Allocate(scratch1, 3153 Allocate(scratch1,
3154 result, 3154 result,
3155 scratch2, 3155 scratch2,
3156 scratch3, 3156 scratch3,
3157 gc_required, 3157 gc_required,
3158 TAG_OBJECT); 3158 TAG_OBJECT);
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
3282 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); 3282 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3283 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); 3283 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3284 } 3284 }
3285 3285
3286 3286
3287 // Copies a fixed number of fields of heap objects from src to dst. 3287 // Copies a fixed number of fields of heap objects from src to dst.
3288 void MacroAssembler::CopyFields(Register dst, 3288 void MacroAssembler::CopyFields(Register dst,
3289 Register src, 3289 Register src,
3290 RegList temps, 3290 RegList temps,
3291 int field_count) { 3291 int field_count) {
3292 ASSERT((temps & dst.bit()) == 0); 3292 DCHECK((temps & dst.bit()) == 0);
3293 ASSERT((temps & src.bit()) == 0); 3293 DCHECK((temps & src.bit()) == 0);
3294 // Primitive implementation using only one temporary register. 3294 // Primitive implementation using only one temporary register.
3295 3295
3296 Register tmp = no_reg; 3296 Register tmp = no_reg;
3297 // Find a temp register in temps list. 3297 // Find a temp register in temps list.
3298 for (int i = 0; i < kNumRegisters; i++) { 3298 for (int i = 0; i < kNumRegisters; i++) {
3299 if ((temps & (1 << i)) != 0) { 3299 if ((temps & (1 << i)) != 0) {
3300 tmp.code_ = i; 3300 tmp.code_ = i;
3301 break; 3301 break;
3302 } 3302 }
3303 } 3303 }
3304 ASSERT(!tmp.is(no_reg)); 3304 DCHECK(!tmp.is(no_reg));
3305 3305
3306 for (int i = 0; i < field_count; i++) { 3306 for (int i = 0; i < field_count; i++) {
3307 lw(tmp, FieldMemOperand(src, i * kPointerSize)); 3307 lw(tmp, FieldMemOperand(src, i * kPointerSize));
3308 sw(tmp, FieldMemOperand(dst, i * kPointerSize)); 3308 sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3309 } 3309 }
3310 } 3310 }
3311 3311
3312 3312
3313 void MacroAssembler::CopyBytes(Register src, 3313 void MacroAssembler::CopyBytes(Register src,
3314 Register dst, 3314 Register dst,
(...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after
3613 Move(v1, v0, src); 3613 Move(v1, v0, src);
3614 } 3614 }
3615 } 3615 }
3616 } 3616 }
3617 3617
3618 3618
3619 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, 3619 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3620 DoubleRegister src2) { 3620 DoubleRegister src2) {
3621 if (!IsMipsSoftFloatABI) { 3621 if (!IsMipsSoftFloatABI) {
3622 if (src2.is(f12)) { 3622 if (src2.is(f12)) {
3623 ASSERT(!src1.is(f14)); 3623 DCHECK(!src1.is(f14));
3624 Move(f14, src2); 3624 Move(f14, src2);
3625 Move(f12, src1); 3625 Move(f12, src1);
3626 } else { 3626 } else {
3627 Move(f12, src1); 3627 Move(f12, src1);
3628 Move(f14, src2); 3628 Move(f14, src2);
3629 } 3629 }
3630 } else { 3630 } else {
3631 if (kArchEndian == kLittle) { 3631 if (kArchEndian == kLittle) {
3632 Move(a0, a1, src1); 3632 Move(a0, a1, src1);
3633 Move(a2, a3, src2); 3633 Move(a2, a3, src2);
(...skipping 22 matching lines...) Expand all
3656 3656
3657 // Check whether the expected and actual arguments count match. If not, 3657 // Check whether the expected and actual arguments count match. If not,
3658 // setup registers according to contract with ArgumentsAdaptorTrampoline: 3658 // setup registers according to contract with ArgumentsAdaptorTrampoline:
3659 // a0: actual arguments count 3659 // a0: actual arguments count
3660 // a1: function (passed through to callee) 3660 // a1: function (passed through to callee)
3661 // a2: expected arguments count 3661 // a2: expected arguments count
3662 3662
3663 // The code below is made a lot easier because the calling code already sets 3663 // The code below is made a lot easier because the calling code already sets
3664 // up actual and expected registers according to the contract if values are 3664 // up actual and expected registers according to the contract if values are
3665 // passed in registers. 3665 // passed in registers.
3666 ASSERT(actual.is_immediate() || actual.reg().is(a0)); 3666 DCHECK(actual.is_immediate() || actual.reg().is(a0));
3667 ASSERT(expected.is_immediate() || expected.reg().is(a2)); 3667 DCHECK(expected.is_immediate() || expected.reg().is(a2));
3668 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); 3668 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3669 3669
3670 if (expected.is_immediate()) { 3670 if (expected.is_immediate()) {
3671 ASSERT(actual.is_immediate()); 3671 DCHECK(actual.is_immediate());
3672 if (expected.immediate() == actual.immediate()) { 3672 if (expected.immediate() == actual.immediate()) {
3673 definitely_matches = true; 3673 definitely_matches = true;
3674 } else { 3674 } else {
3675 li(a0, Operand(actual.immediate())); 3675 li(a0, Operand(actual.immediate()));
3676 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3676 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3677 if (expected.immediate() == sentinel) { 3677 if (expected.immediate() == sentinel) {
3678 // Don't worry about adapting arguments for builtins that 3678 // Don't worry about adapting arguments for builtins that
3679 // don't want that done. Skip adaption code by making it look 3679 // don't want that done. Skip adaption code by making it look
3680 // like we have a match between expected and actual number of 3680 // like we have a match between expected and actual number of
3681 // arguments. 3681 // arguments.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3714 } 3714 }
3715 } 3715 }
3716 3716
3717 3717
3718 void MacroAssembler::InvokeCode(Register code, 3718 void MacroAssembler::InvokeCode(Register code,
3719 const ParameterCount& expected, 3719 const ParameterCount& expected,
3720 const ParameterCount& actual, 3720 const ParameterCount& actual,
3721 InvokeFlag flag, 3721 InvokeFlag flag,
3722 const CallWrapper& call_wrapper) { 3722 const CallWrapper& call_wrapper) {
3723 // You can't call a function without a valid frame. 3723 // You can't call a function without a valid frame.
3724 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3724 DCHECK(flag == JUMP_FUNCTION || has_frame());
3725 3725
3726 Label done; 3726 Label done;
3727 3727
3728 bool definitely_mismatches = false; 3728 bool definitely_mismatches = false;
3729 InvokePrologue(expected, actual, Handle<Code>::null(), code, 3729 InvokePrologue(expected, actual, Handle<Code>::null(), code,
3730 &done, &definitely_mismatches, flag, 3730 &done, &definitely_mismatches, flag,
3731 call_wrapper); 3731 call_wrapper);
3732 if (!definitely_mismatches) { 3732 if (!definitely_mismatches) {
3733 if (flag == CALL_FUNCTION) { 3733 if (flag == CALL_FUNCTION) {
3734 call_wrapper.BeforeCall(CallSize(code)); 3734 call_wrapper.BeforeCall(CallSize(code));
3735 Call(code); 3735 Call(code);
3736 call_wrapper.AfterCall(); 3736 call_wrapper.AfterCall();
3737 } else { 3737 } else {
3738 ASSERT(flag == JUMP_FUNCTION); 3738 DCHECK(flag == JUMP_FUNCTION);
3739 Jump(code); 3739 Jump(code);
3740 } 3740 }
3741 // Continue here if InvokePrologue does handle the invocation due to 3741 // Continue here if InvokePrologue does handle the invocation due to
3742 // mismatched parameter counts. 3742 // mismatched parameter counts.
3743 bind(&done); 3743 bind(&done);
3744 } 3744 }
3745 } 3745 }
3746 3746
3747 3747
3748 void MacroAssembler::InvokeFunction(Register function, 3748 void MacroAssembler::InvokeFunction(Register function,
3749 const ParameterCount& actual, 3749 const ParameterCount& actual,
3750 InvokeFlag flag, 3750 InvokeFlag flag,
3751 const CallWrapper& call_wrapper) { 3751 const CallWrapper& call_wrapper) {
3752 // You can't call a function without a valid frame. 3752 // You can't call a function without a valid frame.
3753 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3753 DCHECK(flag == JUMP_FUNCTION || has_frame());
3754 3754
3755 // Contract with called JS functions requires that function is passed in a1. 3755 // Contract with called JS functions requires that function is passed in a1.
3756 ASSERT(function.is(a1)); 3756 DCHECK(function.is(a1));
3757 Register expected_reg = a2; 3757 Register expected_reg = a2;
3758 Register code_reg = a3; 3758 Register code_reg = a3;
3759 3759
3760 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 3760 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3761 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3761 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3762 lw(expected_reg, 3762 lw(expected_reg,
3763 FieldMemOperand(code_reg, 3763 FieldMemOperand(code_reg,
3764 SharedFunctionInfo::kFormalParameterCountOffset)); 3764 SharedFunctionInfo::kFormalParameterCountOffset));
3765 sra(expected_reg, expected_reg, kSmiTagSize); 3765 sra(expected_reg, expected_reg, kSmiTagSize);
3766 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 3766 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3767 3767
3768 ParameterCount expected(expected_reg); 3768 ParameterCount expected(expected_reg);
3769 InvokeCode(code_reg, expected, actual, flag, call_wrapper); 3769 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3770 } 3770 }
3771 3771
3772 3772
3773 void MacroAssembler::InvokeFunction(Register function, 3773 void MacroAssembler::InvokeFunction(Register function,
3774 const ParameterCount& expected, 3774 const ParameterCount& expected,
3775 const ParameterCount& actual, 3775 const ParameterCount& actual,
3776 InvokeFlag flag, 3776 InvokeFlag flag,
3777 const CallWrapper& call_wrapper) { 3777 const CallWrapper& call_wrapper) {
3778 // You can't call a function without a valid frame. 3778 // You can't call a function without a valid frame.
3779 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3779 DCHECK(flag == JUMP_FUNCTION || has_frame());
3780 3780
3781 // Contract with called JS functions requires that function is passed in a1. 3781 // Contract with called JS functions requires that function is passed in a1.
3782 ASSERT(function.is(a1)); 3782 DCHECK(function.is(a1));
3783 3783
3784 // Get the function and setup the context. 3784 // Get the function and setup the context.
3785 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3785 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3786 3786
3787 // We call indirectly through the code field in the function to 3787 // We call indirectly through the code field in the function to
3788 // allow recompilation to take effect without changing any of the 3788 // allow recompilation to take effect without changing any of the
3789 // call sites. 3789 // call sites.
3790 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 3790 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3791 InvokeCode(a3, expected, actual, flag, call_wrapper); 3791 InvokeCode(a3, expected, actual, flag, call_wrapper);
3792 } 3792 }
(...skipping 23 matching lines...) Expand all
3816 Label* fail) { 3816 Label* fail) {
3817 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 3817 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3818 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 3818 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3819 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 3819 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3820 } 3820 }
3821 3821
3822 3822
3823 void MacroAssembler::IsObjectJSStringType(Register object, 3823 void MacroAssembler::IsObjectJSStringType(Register object,
3824 Register scratch, 3824 Register scratch,
3825 Label* fail) { 3825 Label* fail) {
3826 ASSERT(kNotStringTag != 0); 3826 DCHECK(kNotStringTag != 0);
3827 3827
3828 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3828 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3829 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 3829 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3830 And(scratch, scratch, Operand(kIsNotStringMask)); 3830 And(scratch, scratch, Operand(kIsNotStringMask));
3831 Branch(fail, ne, scratch, Operand(zero_reg)); 3831 Branch(fail, ne, scratch, Operand(zero_reg));
3832 } 3832 }
3833 3833
3834 3834
3835 void MacroAssembler::IsObjectNameType(Register object, 3835 void MacroAssembler::IsObjectNameType(Register object,
3836 Register scratch, 3836 Register scratch,
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
3915 3915
3916 // ----------------------------------------------------------------------------- 3916 // -----------------------------------------------------------------------------
3917 // Runtime calls. 3917 // Runtime calls.
3918 3918
3919 void MacroAssembler::CallStub(CodeStub* stub, 3919 void MacroAssembler::CallStub(CodeStub* stub,
3920 TypeFeedbackId ast_id, 3920 TypeFeedbackId ast_id,
3921 Condition cond, 3921 Condition cond,
3922 Register r1, 3922 Register r1,
3923 const Operand& r2, 3923 const Operand& r2,
3924 BranchDelaySlot bd) { 3924 BranchDelaySlot bd) {
3925 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 3925 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3926 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, 3926 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
3927 cond, r1, r2, bd); 3927 cond, r1, r2, bd);
3928 } 3928 }
3929 3929
3930 3930
3931 void MacroAssembler::TailCallStub(CodeStub* stub, 3931 void MacroAssembler::TailCallStub(CodeStub* stub,
3932 Condition cond, 3932 Condition cond,
3933 Register r1, 3933 Register r1,
3934 const Operand& r2, 3934 const Operand& r2,
3935 BranchDelaySlot bd) { 3935 BranchDelaySlot bd) {
(...skipping 15 matching lines...) Expand all
3951 ExternalReference next_address = 3951 ExternalReference next_address =
3952 ExternalReference::handle_scope_next_address(isolate()); 3952 ExternalReference::handle_scope_next_address(isolate());
3953 const int kNextOffset = 0; 3953 const int kNextOffset = 0;
3954 const int kLimitOffset = AddressOffset( 3954 const int kLimitOffset = AddressOffset(
3955 ExternalReference::handle_scope_limit_address(isolate()), 3955 ExternalReference::handle_scope_limit_address(isolate()),
3956 next_address); 3956 next_address);
3957 const int kLevelOffset = AddressOffset( 3957 const int kLevelOffset = AddressOffset(
3958 ExternalReference::handle_scope_level_address(isolate()), 3958 ExternalReference::handle_scope_level_address(isolate()),
3959 next_address); 3959 next_address);
3960 3960
3961 ASSERT(function_address.is(a1) || function_address.is(a2)); 3961 DCHECK(function_address.is(a1) || function_address.is(a2));
3962 3962
3963 Label profiler_disabled; 3963 Label profiler_disabled;
3964 Label end_profiler_check; 3964 Label end_profiler_check;
3965 li(t9, Operand(ExternalReference::is_profiling_address(isolate()))); 3965 li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
3966 lb(t9, MemOperand(t9, 0)); 3966 lb(t9, MemOperand(t9, 0));
3967 Branch(&profiler_disabled, eq, t9, Operand(zero_reg)); 3967 Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
3968 3968
3969 // Additional parameter is the address of the actual callback. 3969 // Additional parameter is the address of the actual callback.
3970 li(t9, Operand(thunk_ref)); 3970 li(t9, Operand(thunk_ref));
3971 jmp(&end_profiler_check); 3971 jmp(&end_profiler_check);
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
4069 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 4069 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4070 return has_frame_ || !stub->SometimesSetsUpAFrame(); 4070 return has_frame_ || !stub->SometimesSetsUpAFrame();
4071 } 4071 }
4072 4072
4073 4073
4074 void MacroAssembler::IndexFromHash(Register hash, Register index) { 4074 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4075 // If the hash field contains an array index pick it out. The assert checks 4075 // If the hash field contains an array index pick it out. The assert checks
4076 // that the constants for the maximum number of digits for an array index 4076 // that the constants for the maximum number of digits for an array index
4077 // cached in the hash field and the number of bits reserved for it does not 4077 // cached in the hash field and the number of bits reserved for it does not
4078 // conflict. 4078 // conflict.
4079 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 4079 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4080 (1 << String::kArrayIndexValueBits)); 4080 (1 << String::kArrayIndexValueBits));
4081 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); 4081 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
4082 } 4082 }
4083 4083
4084 4084
4085 void MacroAssembler::ObjectToDoubleFPURegister(Register object, 4085 void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4086 FPURegister result, 4086 FPURegister result,
4087 Register scratch1, 4087 Register scratch1,
4088 Register scratch2, 4088 Register scratch2,
4089 Register heap_number_map, 4089 Register heap_number_map,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
4126 mtc1(scratch1, value); 4126 mtc1(scratch1, value);
4127 cvt_d_w(value, value); 4127 cvt_d_w(value, value);
4128 } 4128 }
4129 4129
4130 4130
4131 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 4131 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4132 Register left, 4132 Register left,
4133 Register right, 4133 Register right,
4134 Register overflow_dst, 4134 Register overflow_dst,
4135 Register scratch) { 4135 Register scratch) {
4136 ASSERT(!dst.is(overflow_dst)); 4136 DCHECK(!dst.is(overflow_dst));
4137 ASSERT(!dst.is(scratch)); 4137 DCHECK(!dst.is(scratch));
4138 ASSERT(!overflow_dst.is(scratch)); 4138 DCHECK(!overflow_dst.is(scratch));
4139 ASSERT(!overflow_dst.is(left)); 4139 DCHECK(!overflow_dst.is(left));
4140 ASSERT(!overflow_dst.is(right)); 4140 DCHECK(!overflow_dst.is(right));
4141 4141
4142 if (left.is(right) && dst.is(left)) { 4142 if (left.is(right) && dst.is(left)) {
4143 ASSERT(!dst.is(t9)); 4143 DCHECK(!dst.is(t9));
4144 ASSERT(!scratch.is(t9)); 4144 DCHECK(!scratch.is(t9));
4145 ASSERT(!left.is(t9)); 4145 DCHECK(!left.is(t9));
4146 ASSERT(!right.is(t9)); 4146 DCHECK(!right.is(t9));
4147 ASSERT(!overflow_dst.is(t9)); 4147 DCHECK(!overflow_dst.is(t9));
4148 mov(t9, right); 4148 mov(t9, right);
4149 right = t9; 4149 right = t9;
4150 } 4150 }
4151 4151
4152 if (dst.is(left)) { 4152 if (dst.is(left)) {
4153 mov(scratch, left); // Preserve left. 4153 mov(scratch, left); // Preserve left.
4154 addu(dst, left, right); // Left is overwritten. 4154 addu(dst, left, right); // Left is overwritten.
4155 xor_(scratch, dst, scratch); // Original left. 4155 xor_(scratch, dst, scratch); // Original left.
4156 xor_(overflow_dst, dst, right); 4156 xor_(overflow_dst, dst, right);
4157 and_(overflow_dst, overflow_dst, scratch); 4157 and_(overflow_dst, overflow_dst, scratch);
(...skipping 10 matching lines...) Expand all
4168 and_(overflow_dst, scratch, overflow_dst); 4168 and_(overflow_dst, scratch, overflow_dst);
4169 } 4169 }
4170 } 4170 }
4171 4171
4172 4172
4173 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 4173 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4174 Register left, 4174 Register left,
4175 Register right, 4175 Register right,
4176 Register overflow_dst, 4176 Register overflow_dst,
4177 Register scratch) { 4177 Register scratch) {
4178 ASSERT(!dst.is(overflow_dst)); 4178 DCHECK(!dst.is(overflow_dst));
4179 ASSERT(!dst.is(scratch)); 4179 DCHECK(!dst.is(scratch));
4180 ASSERT(!overflow_dst.is(scratch)); 4180 DCHECK(!overflow_dst.is(scratch));
4181 ASSERT(!overflow_dst.is(left)); 4181 DCHECK(!overflow_dst.is(left));
4182 ASSERT(!overflow_dst.is(right)); 4182 DCHECK(!overflow_dst.is(right));
4183 ASSERT(!scratch.is(left)); 4183 DCHECK(!scratch.is(left));
4184 ASSERT(!scratch.is(right)); 4184 DCHECK(!scratch.is(right));
4185 4185
4186 // This happens with some crankshaft code. Since Subu works fine if 4186 // This happens with some crankshaft code. Since Subu works fine if
4187 // left == right, let's not make that restriction here. 4187 // left == right, let's not make that restriction here.
4188 if (left.is(right)) { 4188 if (left.is(right)) {
4189 mov(dst, zero_reg); 4189 mov(dst, zero_reg);
4190 mov(overflow_dst, zero_reg); 4190 mov(overflow_dst, zero_reg);
4191 return; 4191 return;
4192 } 4192 }
4193 4193
4194 if (dst.is(left)) { 4194 if (dst.is(left)) {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
4275 zero_reg, 4275 zero_reg,
4276 Operand(zero_reg), 4276 Operand(zero_reg),
4277 bd); 4277 bd);
4278 } 4278 }
4279 4279
4280 4280
4281 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 4281 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4282 InvokeFlag flag, 4282 InvokeFlag flag,
4283 const CallWrapper& call_wrapper) { 4283 const CallWrapper& call_wrapper) {
4284 // You can't call a builtin without a valid frame. 4284 // You can't call a builtin without a valid frame.
4285 ASSERT(flag == JUMP_FUNCTION || has_frame()); 4285 DCHECK(flag == JUMP_FUNCTION || has_frame());
4286 4286
4287 GetBuiltinEntry(t9, id); 4287 GetBuiltinEntry(t9, id);
4288 if (flag == CALL_FUNCTION) { 4288 if (flag == CALL_FUNCTION) {
4289 call_wrapper.BeforeCall(CallSize(t9)); 4289 call_wrapper.BeforeCall(CallSize(t9));
4290 Call(t9); 4290 Call(t9);
4291 call_wrapper.AfterCall(); 4291 call_wrapper.AfterCall();
4292 } else { 4292 } else {
4293 ASSERT(flag == JUMP_FUNCTION); 4293 DCHECK(flag == JUMP_FUNCTION);
4294 Jump(t9); 4294 Jump(t9);
4295 } 4295 }
4296 } 4296 }
4297 4297
4298 4298
4299 void MacroAssembler::GetBuiltinFunction(Register target, 4299 void MacroAssembler::GetBuiltinFunction(Register target,
4300 Builtins::JavaScript id) { 4300 Builtins::JavaScript id) {
4301 // Load the builtins object into target register. 4301 // Load the builtins object into target register.
4302 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4302 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4303 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); 4303 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4304 // Load the JavaScript builtin function from the builtins object. 4304 // Load the JavaScript builtin function from the builtins object.
4305 lw(target, FieldMemOperand(target, 4305 lw(target, FieldMemOperand(target,
4306 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 4306 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4307 } 4307 }
4308 4308
4309 4309
4310 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 4310 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4311 ASSERT(!target.is(a1)); 4311 DCHECK(!target.is(a1));
4312 GetBuiltinFunction(a1, id); 4312 GetBuiltinFunction(a1, id);
4313 // Load the code entry point from the builtins object. 4313 // Load the code entry point from the builtins object.
4314 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4314 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4315 } 4315 }
4316 4316
4317 4317
4318 void MacroAssembler::SetCounter(StatsCounter* counter, int value, 4318 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4319 Register scratch1, Register scratch2) { 4319 Register scratch1, Register scratch2) {
4320 if (FLAG_native_code_counters && counter->Enabled()) { 4320 if (FLAG_native_code_counters && counter->Enabled()) {
4321 li(scratch1, Operand(value)); 4321 li(scratch1, Operand(value));
4322 li(scratch2, Operand(ExternalReference(counter))); 4322 li(scratch2, Operand(ExternalReference(counter)));
4323 sw(scratch1, MemOperand(scratch2)); 4323 sw(scratch1, MemOperand(scratch2));
4324 } 4324 }
4325 } 4325 }
4326 4326
4327 4327
4328 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 4328 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4329 Register scratch1, Register scratch2) { 4329 Register scratch1, Register scratch2) {
4330 ASSERT(value > 0); 4330 DCHECK(value > 0);
4331 if (FLAG_native_code_counters && counter->Enabled()) { 4331 if (FLAG_native_code_counters && counter->Enabled()) {
4332 li(scratch2, Operand(ExternalReference(counter))); 4332 li(scratch2, Operand(ExternalReference(counter)));
4333 lw(scratch1, MemOperand(scratch2)); 4333 lw(scratch1, MemOperand(scratch2));
4334 Addu(scratch1, scratch1, Operand(value)); 4334 Addu(scratch1, scratch1, Operand(value));
4335 sw(scratch1, MemOperand(scratch2)); 4335 sw(scratch1, MemOperand(scratch2));
4336 } 4336 }
4337 } 4337 }
4338 4338
4339 4339
4340 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 4340 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4341 Register scratch1, Register scratch2) { 4341 Register scratch1, Register scratch2) {
4342 ASSERT(value > 0); 4342 DCHECK(value > 0);
4343 if (FLAG_native_code_counters && counter->Enabled()) { 4343 if (FLAG_native_code_counters && counter->Enabled()) {
4344 li(scratch2, Operand(ExternalReference(counter))); 4344 li(scratch2, Operand(ExternalReference(counter)));
4345 lw(scratch1, MemOperand(scratch2)); 4345 lw(scratch1, MemOperand(scratch2));
4346 Subu(scratch1, scratch1, Operand(value)); 4346 Subu(scratch1, scratch1, Operand(value));
4347 sw(scratch1, MemOperand(scratch2)); 4347 sw(scratch1, MemOperand(scratch2));
4348 } 4348 }
4349 } 4349 }
4350 4350
4351 4351
4352 // ----------------------------------------------------------------------------- 4352 // -----------------------------------------------------------------------------
4353 // Debugging. 4353 // Debugging.
4354 4354
4355 void MacroAssembler::Assert(Condition cc, BailoutReason reason, 4355 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4356 Register rs, Operand rt) { 4356 Register rs, Operand rt) {
4357 if (emit_debug_code()) 4357 if (emit_debug_code())
4358 Check(cc, reason, rs, rt); 4358 Check(cc, reason, rs, rt);
4359 } 4359 }
4360 4360
4361 4361
4362 void MacroAssembler::AssertFastElements(Register elements) { 4362 void MacroAssembler::AssertFastElements(Register elements) {
4363 if (emit_debug_code()) { 4363 if (emit_debug_code()) {
4364 ASSERT(!elements.is(at)); 4364 DCHECK(!elements.is(at));
4365 Label ok; 4365 Label ok;
4366 push(elements); 4366 push(elements);
4367 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 4367 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4368 LoadRoot(at, Heap::kFixedArrayMapRootIndex); 4368 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4369 Branch(&ok, eq, elements, Operand(at)); 4369 Branch(&ok, eq, elements, Operand(at));
4370 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); 4370 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4371 Branch(&ok, eq, elements, Operand(at)); 4371 Branch(&ok, eq, elements, Operand(at));
4372 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); 4372 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4373 Branch(&ok, eq, elements, Operand(at)); 4373 Branch(&ok, eq, elements, Operand(at));
4374 Abort(kJSObjectWithFastElementsMapHasSlowElements); 4374 Abort(kJSObjectWithFastElementsMapHasSlowElements);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
4417 } 4417 }
4418 // Will not return here. 4418 // Will not return here.
4419 if (is_trampoline_pool_blocked()) { 4419 if (is_trampoline_pool_blocked()) {
4420 // If the calling code cares about the exact number of 4420 // If the calling code cares about the exact number of
4421 // instructions generated, we insert padding here to keep the size 4421 // instructions generated, we insert padding here to keep the size
4422 // of the Abort macro constant. 4422 // of the Abort macro constant.
4423 // Currently in debug mode with debug_code enabled the number of 4423 // Currently in debug mode with debug_code enabled the number of
4424 // generated instructions is 10, so we use this as a maximum value. 4424 // generated instructions is 10, so we use this as a maximum value.
4425 static const int kExpectedAbortInstructions = 10; 4425 static const int kExpectedAbortInstructions = 10;
4426 int abort_instructions = InstructionsGeneratedSince(&abort_start); 4426 int abort_instructions = InstructionsGeneratedSince(&abort_start);
4427 ASSERT(abort_instructions <= kExpectedAbortInstructions); 4427 DCHECK(abort_instructions <= kExpectedAbortInstructions);
4428 while (abort_instructions++ < kExpectedAbortInstructions) { 4428 while (abort_instructions++ < kExpectedAbortInstructions) {
4429 nop(); 4429 nop();
4430 } 4430 }
4431 } 4431 }
4432 } 4432 }
4433 4433
4434 4434
4435 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 4435 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4436 if (context_chain_length > 0) { 4436 if (context_chain_length > 0) {
4437 // Move up the chain of contexts to the context containing the slot. 4437 // Move up the chain of contexts to the context containing the slot.
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
4593 4593
4594 // Save the frame pointer and the context in top. 4594 // Save the frame pointer and the context in top.
4595 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 4595 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4596 sw(fp, MemOperand(t8)); 4596 sw(fp, MemOperand(t8));
4597 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 4597 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4598 sw(cp, MemOperand(t8)); 4598 sw(cp, MemOperand(t8));
4599 4599
4600 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); 4600 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4601 if (save_doubles) { 4601 if (save_doubles) {
4602 // The stack must be allign to 0 modulo 8 for stores with sdc1. 4602 // The stack must be allign to 0 modulo 8 for stores with sdc1.
4603 ASSERT(kDoubleSize == frame_alignment); 4603 DCHECK(kDoubleSize == frame_alignment);
4604 if (frame_alignment > 0) { 4604 if (frame_alignment > 0) {
4605 ASSERT(IsPowerOf2(frame_alignment)); 4605 DCHECK(IsPowerOf2(frame_alignment));
4606 And(sp, sp, Operand(-frame_alignment)); // Align stack. 4606 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4607 } 4607 }
4608 int space = FPURegister::kMaxNumRegisters * kDoubleSize; 4608 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4609 Subu(sp, sp, Operand(space)); 4609 Subu(sp, sp, Operand(space));
4610 // Remember: we only need to save every 2nd double FPU value. 4610 // Remember: we only need to save every 2nd double FPU value.
4611 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { 4611 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4612 FPURegister reg = FPURegister::from_code(i); 4612 FPURegister reg = FPURegister::from_code(i);
4613 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 4613 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4614 } 4614 }
4615 } 4615 }
4616 4616
4617 // Reserve place for the return address, stack space and an optional slot 4617 // Reserve place for the return address, stack space and an optional slot
4618 // (used by the DirectCEntryStub to hold the return value if a struct is 4618 // (used by the DirectCEntryStub to hold the return value if a struct is
4619 // returned) and align the frame preparing for calling the runtime function. 4619 // returned) and align the frame preparing for calling the runtime function.
4620 ASSERT(stack_space >= 0); 4620 DCHECK(stack_space >= 0);
4621 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); 4621 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4622 if (frame_alignment > 0) { 4622 if (frame_alignment > 0) {
4623 ASSERT(IsPowerOf2(frame_alignment)); 4623 DCHECK(IsPowerOf2(frame_alignment));
4624 And(sp, sp, Operand(-frame_alignment)); // Align stack. 4624 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4625 } 4625 }
4626 4626
4627 // Set the exit frame sp value to point just before the return address 4627 // Set the exit frame sp value to point just before the return address
4628 // location. 4628 // location.
4629 addiu(at, sp, kPointerSize); 4629 addiu(at, sp, kPointerSize);
4630 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); 4630 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4631 } 4631 }
4632 4632
4633 4633
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
4708 } 4708 }
4709 4709
4710 4710
4711 void MacroAssembler::AssertStackIsAligned() { 4711 void MacroAssembler::AssertStackIsAligned() {
4712 if (emit_debug_code()) { 4712 if (emit_debug_code()) {
4713 const int frame_alignment = ActivationFrameAlignment(); 4713 const int frame_alignment = ActivationFrameAlignment();
4714 const int frame_alignment_mask = frame_alignment - 1; 4714 const int frame_alignment_mask = frame_alignment - 1;
4715 4715
4716 if (frame_alignment > kPointerSize) { 4716 if (frame_alignment > kPointerSize) {
4717 Label alignment_as_expected; 4717 Label alignment_as_expected;
4718 ASSERT(IsPowerOf2(frame_alignment)); 4718 DCHECK(IsPowerOf2(frame_alignment));
4719 andi(at, sp, frame_alignment_mask); 4719 andi(at, sp, frame_alignment_mask);
4720 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 4720 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4721 // Don't use Check here, as it will call Runtime_Abort re-entering here. 4721 // Don't use Check here, as it will call Runtime_Abort re-entering here.
4722 stop("Unexpected stack alignment"); 4722 stop("Unexpected stack alignment");
4723 bind(&alignment_as_expected); 4723 bind(&alignment_as_expected);
4724 } 4724 }
4725 } 4725 }
4726 } 4726 }
4727 4727
4728 4728
4729 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( 4729 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4730 Register reg, 4730 Register reg,
4731 Register scratch, 4731 Register scratch,
4732 Label* not_power_of_two_or_zero) { 4732 Label* not_power_of_two_or_zero) {
4733 Subu(scratch, reg, Operand(1)); 4733 Subu(scratch, reg, Operand(1));
4734 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, 4734 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4735 scratch, Operand(zero_reg)); 4735 scratch, Operand(zero_reg));
4736 and_(at, scratch, reg); // In the delay slot. 4736 and_(at, scratch, reg); // In the delay slot.
4737 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); 4737 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4738 } 4738 }
4739 4739
4740 4740
4741 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { 4741 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4742 ASSERT(!reg.is(overflow)); 4742 DCHECK(!reg.is(overflow));
4743 mov(overflow, reg); // Save original value. 4743 mov(overflow, reg); // Save original value.
4744 SmiTag(reg); 4744 SmiTag(reg);
4745 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. 4745 xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
4746 } 4746 }
4747 4747
4748 4748
4749 void MacroAssembler::SmiTagCheckOverflow(Register dst, 4749 void MacroAssembler::SmiTagCheckOverflow(Register dst,
4750 Register src, 4750 Register src,
4751 Register overflow) { 4751 Register overflow) {
4752 if (dst.is(src)) { 4752 if (dst.is(src)) {
4753 // Fall back to slower case. 4753 // Fall back to slower case.
4754 SmiTagCheckOverflow(dst, overflow); 4754 SmiTagCheckOverflow(dst, overflow);
4755 } else { 4755 } else {
4756 ASSERT(!dst.is(src)); 4756 DCHECK(!dst.is(src));
4757 ASSERT(!dst.is(overflow)); 4757 DCHECK(!dst.is(overflow));
4758 ASSERT(!src.is(overflow)); 4758 DCHECK(!src.is(overflow));
4759 SmiTag(dst, src); 4759 SmiTag(dst, src);
4760 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. 4760 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4761 } 4761 }
4762 } 4762 }
4763 4763
4764 4764
4765 void MacroAssembler::UntagAndJumpIfSmi(Register dst, 4765 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4766 Register src, 4766 Register src,
4767 Label* smi_case) { 4767 Label* smi_case) {
4768 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); 4768 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4769 SmiUntag(dst, src); 4769 SmiUntag(dst, src);
4770 } 4770 }
4771 4771
4772 4772
4773 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, 4773 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4774 Register src, 4774 Register src,
4775 Label* non_smi_case) { 4775 Label* non_smi_case) {
4776 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); 4776 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4777 SmiUntag(dst, src); 4777 SmiUntag(dst, src);
4778 } 4778 }
4779 4779
4780 void MacroAssembler::JumpIfSmi(Register value, 4780 void MacroAssembler::JumpIfSmi(Register value,
4781 Label* smi_label, 4781 Label* smi_label,
4782 Register scratch, 4782 Register scratch,
4783 BranchDelaySlot bd) { 4783 BranchDelaySlot bd) {
4784 ASSERT_EQ(0, kSmiTag); 4784 DCHECK_EQ(0, kSmiTag);
4785 andi(scratch, value, kSmiTagMask); 4785 andi(scratch, value, kSmiTagMask);
4786 Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); 4786 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4787 } 4787 }
4788 4788
4789 void MacroAssembler::JumpIfNotSmi(Register value, 4789 void MacroAssembler::JumpIfNotSmi(Register value,
4790 Label* not_smi_label, 4790 Label* not_smi_label,
4791 Register scratch, 4791 Register scratch,
4792 BranchDelaySlot bd) { 4792 BranchDelaySlot bd) {
4793 ASSERT_EQ(0, kSmiTag); 4793 DCHECK_EQ(0, kSmiTag);
4794 andi(scratch, value, kSmiTagMask); 4794 andi(scratch, value, kSmiTagMask);
4795 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); 4795 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4796 } 4796 }
4797 4797
4798 4798
4799 void MacroAssembler::JumpIfNotBothSmi(Register reg1, 4799 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4800 Register reg2, 4800 Register reg2,
4801 Label* on_not_both_smi) { 4801 Label* on_not_both_smi) {
4802 STATIC_ASSERT(kSmiTag == 0); 4802 STATIC_ASSERT(kSmiTag == 0);
4803 ASSERT_EQ(1, kSmiTagMask); 4803 DCHECK_EQ(1, kSmiTagMask);
4804 or_(at, reg1, reg2); 4804 or_(at, reg1, reg2);
4805 JumpIfNotSmi(at, on_not_both_smi); 4805 JumpIfNotSmi(at, on_not_both_smi);
4806 } 4806 }
4807 4807
4808 4808
4809 void MacroAssembler::JumpIfEitherSmi(Register reg1, 4809 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4810 Register reg2, 4810 Register reg2,
4811 Label* on_either_smi) { 4811 Label* on_either_smi) {
4812 STATIC_ASSERT(kSmiTag == 0); 4812 STATIC_ASSERT(kSmiTag == 0);
4813 ASSERT_EQ(1, kSmiTagMask); 4813 DCHECK_EQ(1, kSmiTagMask);
4814 // Both Smi tags must be 1 (not Smi). 4814 // Both Smi tags must be 1 (not Smi).
4815 and_(at, reg1, reg2); 4815 and_(at, reg1, reg2);
4816 JumpIfSmi(at, on_either_smi); 4816 JumpIfSmi(at, on_either_smi);
4817 } 4817 }
4818 4818
4819 4819
4820 void MacroAssembler::AssertNotSmi(Register object) { 4820 void MacroAssembler::AssertNotSmi(Register object) {
4821 if (emit_debug_code()) { 4821 if (emit_debug_code()) {
4822 STATIC_ASSERT(kSmiTag == 0); 4822 STATIC_ASSERT(kSmiTag == 0);
4823 andi(at, object, kSmiTagMask); 4823 andi(at, object, kSmiTagMask);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
4875 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); 4875 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4876 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); 4876 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
4877 pop(object); 4877 pop(object);
4878 bind(&done_checking); 4878 bind(&done_checking);
4879 } 4879 }
4880 } 4880 }
4881 4881
4882 4882
4883 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { 4883 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4884 if (emit_debug_code()) { 4884 if (emit_debug_code()) {
4885 ASSERT(!reg.is(at)); 4885 DCHECK(!reg.is(at));
4886 LoadRoot(at, index); 4886 LoadRoot(at, index);
4887 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); 4887 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4888 } 4888 }
4889 } 4889 }
4890 4890
4891 4891
4892 void MacroAssembler::JumpIfNotHeapNumber(Register object, 4892 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4893 Register heap_number_map, 4893 Register heap_number_map,
4894 Register scratch, 4894 Register scratch,
4895 Label* on_not_heap_number) { 4895 Label* on_not_heap_number) {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
5020 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( 5020 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5021 Register first, 5021 Register first,
5022 Register second, 5022 Register second,
5023 Register scratch1, 5023 Register scratch1,
5024 Register scratch2, 5024 Register scratch2,
5025 Label* failure) { 5025 Label* failure) {
5026 const int kFlatAsciiStringMask = 5026 const int kFlatAsciiStringMask =
5027 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 5027 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5028 const int kFlatAsciiStringTag = 5028 const int kFlatAsciiStringTag =
5029 kStringTag | kOneByteStringTag | kSeqStringTag; 5029 kStringTag | kOneByteStringTag | kSeqStringTag;
5030 ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. 5030 DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
5031 andi(scratch1, first, kFlatAsciiStringMask); 5031 andi(scratch1, first, kFlatAsciiStringMask);
5032 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); 5032 Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5033 andi(scratch2, second, kFlatAsciiStringMask); 5033 andi(scratch2, second, kFlatAsciiStringMask);
5034 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag)); 5034 Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5035 } 5035 }
5036 5036
5037 5037
5038 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, 5038 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5039 Register scratch, 5039 Register scratch,
5040 Label* failure) { 5040 Label* failure) {
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
5085 Label index_tag_ok, index_tag_bad; 5085 Label index_tag_ok, index_tag_bad;
5086 TrySmiTag(index, scratch, &index_tag_bad); 5086 TrySmiTag(index, scratch, &index_tag_bad);
5087 Branch(&index_tag_ok); 5087 Branch(&index_tag_ok);
5088 bind(&index_tag_bad); 5088 bind(&index_tag_bad);
5089 Abort(kIndexIsTooLarge); 5089 Abort(kIndexIsTooLarge);
5090 bind(&index_tag_ok); 5090 bind(&index_tag_ok);
5091 5091
5092 lw(at, FieldMemOperand(string, String::kLengthOffset)); 5092 lw(at, FieldMemOperand(string, String::kLengthOffset));
5093 Check(lt, kIndexIsTooLarge, index, Operand(at)); 5093 Check(lt, kIndexIsTooLarge, index, Operand(at));
5094 5094
5095 ASSERT(Smi::FromInt(0) == 0); 5095 DCHECK(Smi::FromInt(0) == 0);
5096 Check(ge, kIndexIsNegative, index, Operand(zero_reg)); 5096 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5097 5097
5098 SmiUntag(index, index); 5098 SmiUntag(index, index);
5099 } 5099 }
5100 5100
5101 5101
5102 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5102 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5103 int num_double_arguments, 5103 int num_double_arguments,
5104 Register scratch) { 5104 Register scratch) {
5105 int frame_alignment = ActivationFrameAlignment(); 5105 int frame_alignment = ActivationFrameAlignment();
5106 5106
5107 // Up to four simple arguments are passed in registers a0..a3. 5107 // Up to four simple arguments are passed in registers a0..a3.
5108 // Those four arguments must have reserved argument slots on the stack for 5108 // Those four arguments must have reserved argument slots on the stack for
5109 // mips, even though those argument slots are not normally used. 5109 // mips, even though those argument slots are not normally used.
5110 // Remaining arguments are pushed on the stack, above (higher address than) 5110 // Remaining arguments are pushed on the stack, above (higher address than)
5111 // the argument slots. 5111 // the argument slots.
5112 int stack_passed_arguments = CalculateStackPassedWords( 5112 int stack_passed_arguments = CalculateStackPassedWords(
5113 num_reg_arguments, num_double_arguments); 5113 num_reg_arguments, num_double_arguments);
5114 if (frame_alignment > kPointerSize) { 5114 if (frame_alignment > kPointerSize) {
5115 // Make stack end at alignment and make room for num_arguments - 4 words 5115 // Make stack end at alignment and make room for num_arguments - 4 words
5116 // and the original value of sp. 5116 // and the original value of sp.
5117 mov(scratch, sp); 5117 mov(scratch, sp);
5118 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 5118 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5119 ASSERT(IsPowerOf2(frame_alignment)); 5119 DCHECK(IsPowerOf2(frame_alignment));
5120 And(sp, sp, Operand(-frame_alignment)); 5120 And(sp, sp, Operand(-frame_alignment));
5121 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 5121 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5122 } else { 5122 } else {
5123 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 5123 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5124 } 5124 }
5125 } 5125 }
5126 5126
5127 5127
5128 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5128 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5129 Register scratch) { 5129 Register scratch) {
(...skipping 24 matching lines...) Expand all
5154 5154
5155 void MacroAssembler::CallCFunction(Register function, 5155 void MacroAssembler::CallCFunction(Register function,
5156 int num_arguments) { 5156 int num_arguments) {
5157 CallCFunction(function, num_arguments, 0); 5157 CallCFunction(function, num_arguments, 0);
5158 } 5158 }
5159 5159
5160 5160
5161 void MacroAssembler::CallCFunctionHelper(Register function, 5161 void MacroAssembler::CallCFunctionHelper(Register function,
5162 int num_reg_arguments, 5162 int num_reg_arguments,
5163 int num_double_arguments) { 5163 int num_double_arguments) {
5164 ASSERT(has_frame()); 5164 DCHECK(has_frame());
5165 // Make sure that the stack is aligned before calling a C function unless 5165 // Make sure that the stack is aligned before calling a C function unless
5166 // running in the simulator. The simulator has its own alignment check which 5166 // running in the simulator. The simulator has its own alignment check which
5167 // provides more information. 5167 // provides more information.
5168 // The argument stots are presumed to have been set up by 5168 // The argument stots are presumed to have been set up by
5169 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 5169 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5170 5170
5171 #if V8_HOST_ARCH_MIPS 5171 #if V8_HOST_ARCH_MIPS
5172 if (emit_debug_code()) { 5172 if (emit_debug_code()) {
5173 int frame_alignment = base::OS::ActivationFrameAlignment(); 5173 int frame_alignment = base::OS::ActivationFrameAlignment();
5174 int frame_alignment_mask = frame_alignment - 1; 5174 int frame_alignment_mask = frame_alignment - 1;
5175 if (frame_alignment > kPointerSize) { 5175 if (frame_alignment > kPointerSize) {
5176 ASSERT(IsPowerOf2(frame_alignment)); 5176 DCHECK(IsPowerOf2(frame_alignment));
5177 Label alignment_as_expected; 5177 Label alignment_as_expected;
5178 And(at, sp, Operand(frame_alignment_mask)); 5178 And(at, sp, Operand(frame_alignment_mask));
5179 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 5179 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5180 // Don't use Check here, as it will call Runtime_Abort possibly 5180 // Don't use Check here, as it will call Runtime_Abort possibly
5181 // re-entering here. 5181 // re-entering here.
5182 stop("Unexpected alignment in CallCFunction"); 5182 stop("Unexpected alignment in CallCFunction");
5183 bind(&alignment_as_expected); 5183 bind(&alignment_as_expected);
5184 } 5184 }
5185 } 5185 }
5186 #endif // V8_HOST_ARCH_MIPS 5186 #endif // V8_HOST_ARCH_MIPS
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
5292 Branch(if_deprecated, ne, scratch, Operand(zero_reg)); 5292 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5293 } 5293 }
5294 } 5294 }
5295 5295
5296 5296
5297 void MacroAssembler::JumpIfBlack(Register object, 5297 void MacroAssembler::JumpIfBlack(Register object,
5298 Register scratch0, 5298 Register scratch0,
5299 Register scratch1, 5299 Register scratch1,
5300 Label* on_black) { 5300 Label* on_black) {
5301 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 5301 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5302 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5302 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5303 } 5303 }
5304 5304
5305 5305
5306 void MacroAssembler::HasColor(Register object, 5306 void MacroAssembler::HasColor(Register object,
5307 Register bitmap_scratch, 5307 Register bitmap_scratch,
5308 Register mask_scratch, 5308 Register mask_scratch,
5309 Label* has_color, 5309 Label* has_color,
5310 int first_bit, 5310 int first_bit,
5311 int second_bit) { 5311 int second_bit) {
5312 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); 5312 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5313 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); 5313 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5314 5314
5315 GetMarkBits(object, bitmap_scratch, mask_scratch); 5315 GetMarkBits(object, bitmap_scratch, mask_scratch);
5316 5316
5317 Label other_color, word_boundary; 5317 Label other_color, word_boundary;
5318 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5318 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5319 And(t8, t9, Operand(mask_scratch)); 5319 And(t8, t9, Operand(mask_scratch));
5320 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); 5320 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5321 // Shift left 1 by adding. 5321 // Shift left 1 by adding.
5322 Addu(mask_scratch, mask_scratch, Operand(mask_scratch)); 5322 Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5323 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg)); 5323 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5324 And(t8, t9, Operand(mask_scratch)); 5324 And(t8, t9, Operand(mask_scratch));
5325 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); 5325 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5326 jmp(&other_color); 5326 jmp(&other_color);
5327 5327
5328 bind(&word_boundary); 5328 bind(&word_boundary);
5329 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); 5329 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5330 And(t9, t9, Operand(1)); 5330 And(t9, t9, Operand(1));
5331 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg)); 5331 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5332 bind(&other_color); 5332 bind(&other_color);
5333 } 5333 }
5334 5334
5335 5335
5336 // Detect some, but not all, common pointer-free objects. This is used by the 5336 // Detect some, but not all, common pointer-free objects. This is used by the
5337 // incremental write barrier which doesn't care about oddballs (they are always 5337 // incremental write barrier which doesn't care about oddballs (they are always
5338 // marked black immediately so this code is not hit). 5338 // marked black immediately so this code is not hit).
5339 void MacroAssembler::JumpIfDataObject(Register value, 5339 void MacroAssembler::JumpIfDataObject(Register value,
5340 Register scratch, 5340 Register scratch,
5341 Label* not_data_object) { 5341 Label* not_data_object) {
5342 ASSERT(!AreAliased(value, scratch, t8, no_reg)); 5342 DCHECK(!AreAliased(value, scratch, t8, no_reg));
5343 Label is_data_object; 5343 Label is_data_object;
5344 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5344 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5345 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5345 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5346 Branch(&is_data_object, eq, t8, Operand(scratch)); 5346 Branch(&is_data_object, eq, t8, Operand(scratch));
5347 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 5347 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5348 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 5348 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5349 // If it's a string and it's not a cons string then it's an object containing 5349 // If it's a string and it's not a cons string then it's an object containing
5350 // no GC pointers. 5350 // no GC pointers.
5351 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5351 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5352 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); 5352 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5353 Branch(not_data_object, ne, t8, Operand(zero_reg)); 5353 Branch(not_data_object, ne, t8, Operand(zero_reg));
5354 bind(&is_data_object); 5354 bind(&is_data_object);
5355 } 5355 }
5356 5356
5357 5357
5358 void MacroAssembler::GetMarkBits(Register addr_reg, 5358 void MacroAssembler::GetMarkBits(Register addr_reg,
5359 Register bitmap_reg, 5359 Register bitmap_reg,
5360 Register mask_reg) { 5360 Register mask_reg) {
5361 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); 5361 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5362 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); 5362 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5363 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 5363 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5364 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 5364 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5365 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); 5365 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5366 sll(t8, t8, kPointerSizeLog2); 5366 sll(t8, t8, kPointerSizeLog2);
5367 Addu(bitmap_reg, bitmap_reg, t8); 5367 Addu(bitmap_reg, bitmap_reg, t8);
5368 li(t8, Operand(1)); 5368 li(t8, Operand(1));
5369 sllv(mask_reg, t8, mask_reg); 5369 sllv(mask_reg, t8, mask_reg);
5370 } 5370 }
5371 5371
5372 5372
5373 void MacroAssembler::EnsureNotWhite( 5373 void MacroAssembler::EnsureNotWhite(
5374 Register value, 5374 Register value,
5375 Register bitmap_scratch, 5375 Register bitmap_scratch,
5376 Register mask_scratch, 5376 Register mask_scratch,
5377 Register load_scratch, 5377 Register load_scratch,
5378 Label* value_is_white_and_not_data) { 5378 Label* value_is_white_and_not_data) {
5379 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); 5379 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5380 GetMarkBits(value, bitmap_scratch, mask_scratch); 5380 GetMarkBits(value, bitmap_scratch, mask_scratch);
5381 5381
5382 // If the value is black or grey we don't need to do anything. 5382 // If the value is black or grey we don't need to do anything.
5383 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 5383 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5384 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5384 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
5385 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 5385 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
5386 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 5386 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5387 5387
5388 Label done; 5388 Label done;
5389 5389
5390 // Since both black and grey have a 1 in the first position and white does 5390 // Since both black and grey have a 1 in the first position and white does
5391 // not have a 1 there we only need to check one bit. 5391 // not have a 1 there we only need to check one bit.
5392 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5392 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5393 And(t8, mask_scratch, load_scratch); 5393 And(t8, mask_scratch, load_scratch);
5394 Branch(&done, ne, t8, Operand(zero_reg)); 5394 Branch(&done, ne, t8, Operand(zero_reg));
5395 5395
5396 if (emit_debug_code()) { 5396 if (emit_debug_code()) {
(...skipping 18 matching lines...) Expand all
5415 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5415 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5416 { 5416 {
5417 Label skip; 5417 Label skip;
5418 Branch(&skip, ne, t8, Operand(map)); 5418 Branch(&skip, ne, t8, Operand(map));
5419 li(length, HeapNumber::kSize); 5419 li(length, HeapNumber::kSize);
5420 Branch(&is_data_object); 5420 Branch(&is_data_object);
5421 bind(&skip); 5421 bind(&skip);
5422 } 5422 }
5423 5423
5424 // Check for strings. 5424 // Check for strings.
5425 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 5425 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5426 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 5426 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5427 // If it's a string and it's not a cons string then it's an object containing 5427 // If it's a string and it's not a cons string then it's an object containing
5428 // no GC pointers. 5428 // no GC pointers.
5429 Register instance_type = load_scratch; 5429 Register instance_type = load_scratch;
5430 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); 5430 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5431 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); 5431 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5432 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); 5432 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5433 // It's a non-indirect (non-cons and non-slice) string. 5433 // It's a non-indirect (non-cons and non-slice) string.
5434 // If it's external, the length is just ExternalString::kSize. 5434 // If it's external, the length is just ExternalString::kSize.
5435 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). 5435 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5436 // External strings are the only ones with the kExternalStringTag bit 5436 // External strings are the only ones with the kExternalStringTag bit
5437 // set. 5437 // set.
5438 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 5438 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
5439 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 5439 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
5440 And(t8, instance_type, Operand(kExternalStringTag)); 5440 And(t8, instance_type, Operand(kExternalStringTag));
5441 { 5441 {
5442 Label skip; 5442 Label skip;
5443 Branch(&skip, eq, t8, Operand(zero_reg)); 5443 Branch(&skip, eq, t8, Operand(zero_reg));
5444 li(length, ExternalString::kSize); 5444 li(length, ExternalString::kSize);
5445 Branch(&is_data_object); 5445 Branch(&is_data_object);
5446 bind(&skip); 5446 bind(&skip);
5447 } 5447 }
5448 5448
5449 // Sequential string, either ASCII or UC16. 5449 // Sequential string, either ASCII or UC16.
5450 // For ASCII (char-size of 1) we shift the smi tag away to get the length. 5450 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5451 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby 5451 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5452 // getting the length multiplied by 2. 5452 // getting the length multiplied by 2.
5453 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); 5453 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5454 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 5454 DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
5455 lw(t9, FieldMemOperand(value, String::kLengthOffset)); 5455 lw(t9, FieldMemOperand(value, String::kLengthOffset));
5456 And(t8, instance_type, Operand(kStringEncodingMask)); 5456 And(t8, instance_type, Operand(kStringEncodingMask));
5457 { 5457 {
5458 Label skip; 5458 Label skip;
5459 Branch(&skip, eq, t8, Operand(zero_reg)); 5459 Branch(&skip, eq, t8, Operand(zero_reg));
5460 srl(t9, t9, 1); 5460 srl(t9, t9, 1);
5461 bind(&skip); 5461 bind(&skip);
5462 } 5462 }
5463 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); 5463 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5464 And(length, length, Operand(~kObjectAlignmentMask)); 5464 And(length, length, Operand(~kObjectAlignmentMask));
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
5534 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); 5534 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5535 Branch(call_runtime, ne, a2, Operand(at)); 5535 Branch(call_runtime, ne, a2, Operand(at));
5536 5536
5537 bind(&no_elements); 5537 bind(&no_elements);
5538 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); 5538 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5539 Branch(&next, ne, a2, Operand(null_value)); 5539 Branch(&next, ne, a2, Operand(null_value));
5540 } 5540 }
5541 5541
5542 5542
5543 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { 5543 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5544 ASSERT(!output_reg.is(input_reg)); 5544 DCHECK(!output_reg.is(input_reg));
5545 Label done; 5545 Label done;
5546 li(output_reg, Operand(255)); 5546 li(output_reg, Operand(255));
5547 // Normal branch: nop in delay slot. 5547 // Normal branch: nop in delay slot.
5548 Branch(&done, gt, input_reg, Operand(output_reg)); 5548 Branch(&done, gt, input_reg, Operand(output_reg));
5549 // Use delay slot in this branch. 5549 // Use delay slot in this branch.
5550 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg)); 5550 Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5551 mov(output_reg, zero_reg); // In delay slot. 5551 mov(output_reg, zero_reg); // In delay slot.
5552 mov(output_reg, input_reg); // Value is in range 0..255. 5552 mov(output_reg, input_reg); // Value is in range 0..255.
5553 bind(&done); 5553 bind(&done);
5554 } 5554 }
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
5629 UNREACHABLE(); 5629 UNREACHABLE();
5630 return no_reg; 5630 return no_reg;
5631 } 5631 }
5632 5632
5633 5633
5634 void MacroAssembler::JumpIfDictionaryInPrototypeChain( 5634 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5635 Register object, 5635 Register object,
5636 Register scratch0, 5636 Register scratch0,
5637 Register scratch1, 5637 Register scratch1,
5638 Label* found) { 5638 Label* found) {
5639 ASSERT(!scratch1.is(scratch0)); 5639 DCHECK(!scratch1.is(scratch0));
5640 Factory* factory = isolate()->factory(); 5640 Factory* factory = isolate()->factory();
5641 Register current = scratch0; 5641 Register current = scratch0;
5642 Label loop_again; 5642 Label loop_again;
5643 5643
5644 // Scratch contained elements pointer. 5644 // Scratch contained elements pointer.
5645 Move(current, object); 5645 Move(current, object);
5646 5646
5647 // Loop based on the map going up the prototype chain. 5647 // Loop based on the map going up the prototype chain.
5648 bind(&loop_again); 5648 bind(&loop_again);
5649 lw(current, FieldMemOperand(current, HeapObject::kMapOffset)); 5649 lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
5685 CodePatcher::CodePatcher(byte* address, 5685 CodePatcher::CodePatcher(byte* address,
5686 int instructions, 5686 int instructions,
5687 FlushICache flush_cache) 5687 FlushICache flush_cache)
5688 : address_(address), 5688 : address_(address),
5689 size_(instructions * Assembler::kInstrSize), 5689 size_(instructions * Assembler::kInstrSize),
5690 masm_(NULL, address, size_ + Assembler::kGap), 5690 masm_(NULL, address, size_ + Assembler::kGap),
5691 flush_cache_(flush_cache) { 5691 flush_cache_(flush_cache) {
5692 // Create a new macro assembler pointing to the address of the code to patch. 5692 // Create a new macro assembler pointing to the address of the code to patch.
5693 // The size is adjusted with kGap on order for the assembler to generate size 5693 // The size is adjusted with kGap on order for the assembler to generate size
5694 // bytes of instructions without failing with buffer size constraints. 5694 // bytes of instructions without failing with buffer size constraints.
5695 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 5695 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5696 } 5696 }
5697 5697
5698 5698
5699 CodePatcher::~CodePatcher() { 5699 CodePatcher::~CodePatcher() {
5700 // Indicate that code has changed. 5700 // Indicate that code has changed.
5701 if (flush_cache_ == FLUSH) { 5701 if (flush_cache_ == FLUSH) {
5702 CpuFeatures::FlushICache(address_, size_); 5702 CpuFeatures::FlushICache(address_, size_);
5703 } 5703 }
5704 5704
5705 // Check that the code was patched as expected. 5705 // Check that the code was patched as expected.
5706 ASSERT(masm_.pc_ == address_ + size_); 5706 DCHECK(masm_.pc_ == address_ + size_);
5707 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 5707 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5708 } 5708 }
5709 5709
5710 5710
5711 void CodePatcher::Emit(Instr instr) { 5711 void CodePatcher::Emit(Instr instr) {
5712 masm()->emit(instr); 5712 masm()->emit(instr);
5713 } 5713 }
5714 5714
5715 5715
5716 void CodePatcher::Emit(Address addr) { 5716 void CodePatcher::Emit(Address addr) {
5717 masm()->emit(reinterpret_cast<Instr>(addr)); 5717 masm()->emit(reinterpret_cast<Instr>(addr));
5718 } 5718 }
5719 5719
5720 5720
5721 void CodePatcher::ChangeBranchCondition(Condition cond) { 5721 void CodePatcher::ChangeBranchCondition(Condition cond) {
5722 Instr instr = Assembler::instr_at(masm_.pc_); 5722 Instr instr = Assembler::instr_at(masm_.pc_);
5723 ASSERT(Assembler::IsBranch(instr)); 5723 DCHECK(Assembler::IsBranch(instr));
5724 uint32_t opcode = Assembler::GetOpcodeField(instr); 5724 uint32_t opcode = Assembler::GetOpcodeField(instr);
5725 // Currently only the 'eq' and 'ne' cond values are supported and the simple 5725 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5726 // branch instructions (with opcode being the branch type). 5726 // branch instructions (with opcode being the branch type).
5727 // There are some special cases (see Assembler::IsBranch()) so extending this 5727 // There are some special cases (see Assembler::IsBranch()) so extending this
5728 // would be tricky. 5728 // would be tricky.
5729 ASSERT(opcode == BEQ || 5729 DCHECK(opcode == BEQ ||
5730 opcode == BNE || 5730 opcode == BNE ||
5731 opcode == BLEZ || 5731 opcode == BLEZ ||
5732 opcode == BGTZ || 5732 opcode == BGTZ ||
5733 opcode == BEQL || 5733 opcode == BEQL ||
5734 opcode == BNEL || 5734 opcode == BNEL ||
5735 opcode == BLEZL || 5735 opcode == BLEZL ||
5736 opcode == BGTZL); 5736 opcode == BGTZL);
5737 opcode = (cond == eq) ? BEQ : BNE; 5737 opcode = (cond == eq) ? BEQ : BNE;
5738 instr = (instr & ~kOpcodeMask) | opcode; 5738 instr = (instr & ~kOpcodeMask) | opcode;
5739 masm_.emit(instr); 5739 masm_.emit(instr);
5740 } 5740 }
5741 5741
5742 5742
5743 void MacroAssembler::TruncatingDiv(Register result, 5743 void MacroAssembler::TruncatingDiv(Register result,
5744 Register dividend, 5744 Register dividend,
5745 int32_t divisor) { 5745 int32_t divisor) {
5746 ASSERT(!dividend.is(result)); 5746 DCHECK(!dividend.is(result));
5747 ASSERT(!dividend.is(at)); 5747 DCHECK(!dividend.is(at));
5748 ASSERT(!result.is(at)); 5748 DCHECK(!result.is(at));
5749 MultiplierAndShift ms(divisor); 5749 MultiplierAndShift ms(divisor);
5750 li(at, Operand(ms.multiplier())); 5750 li(at, Operand(ms.multiplier()));
5751 Mult(dividend, Operand(at)); 5751 Mult(dividend, Operand(at));
5752 mfhi(result); 5752 mfhi(result);
5753 if (divisor > 0 && ms.multiplier() < 0) { 5753 if (divisor > 0 && ms.multiplier() < 0) {
5754 Addu(result, result, Operand(dividend)); 5754 Addu(result, result, Operand(dividend));
5755 } 5755 }
5756 if (divisor < 0 && ms.multiplier() > 0) { 5756 if (divisor < 0 && ms.multiplier() > 0) {
5757 Subu(result, result, Operand(dividend)); 5757 Subu(result, result, Operand(dividend));
5758 } 5758 }
5759 if (ms.shift() > 0) sra(result, result, ms.shift()); 5759 if (ms.shift() > 0) sra(result, result, ms.shift());
5760 srl(at, dividend, 31); 5760 srl(at, dividend, 31);
5761 Addu(result, result, Operand(at)); 5761 Addu(result, result, Operand(at));
5762 } 5762 }
5763 5763
5764 5764
5765 } } // namespace v8::internal 5765 } } // namespace v8::internal
5766 5766
5767 #endif // V8_TARGET_ARCH_MIPS 5767 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.h ('k') | src/mips/regexp-macro-assembler-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698