| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ | 5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ |
| 6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ | 6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ |
| 7 | 7 |
| 8 #include "src/assembler.h" | 8 #include "src/assembler.h" |
| 9 #include "src/globals.h" | 9 #include "src/globals.h" |
| 10 #include "src/mips/assembler-mips.h" | 10 #include "src/mips64/assembler-mips64.h" |
| 11 | 11 |
| 12 namespace v8 { | 12 namespace v8 { |
| 13 namespace internal { | 13 namespace internal { |
| 14 | 14 |
| 15 // Forward declaration. | 15 // Forward declaration. |
| 16 class JumpTarget; | 16 class JumpTarget; |
| 17 | 17 |
| 18 // Reserved Register Usage Summary. | 18 // Reserved Register Usage Summary. |
| 19 // | 19 // |
| 20 // Registers t8, t9, and at are reserved for use by the MacroAssembler. | 20 // Registers t8, t9, and at are reserved for use by the MacroAssembler. |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 | 54 |
| 55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. | 55 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. |
| 56 enum BranchDelaySlot { | 56 enum BranchDelaySlot { |
| 57 USE_DELAY_SLOT, | 57 USE_DELAY_SLOT, |
| 58 PROTECT | 58 PROTECT |
| 59 }; | 59 }; |
| 60 | 60 |
| 61 // Flags used for the li macro-assembler function. | 61 // Flags used for the li macro-assembler function. |
| 62 enum LiFlags { | 62 enum LiFlags { |
| 63 // If the constant value can be represented in just 16 bits, then | 63 // If the constant value can be represented in just 16 bits, then |
| 64 // optimize the li to use a single instruction, rather than lui/ori pair. | 64 // optimize the li to use a single instruction, rather than lui/ori/dsll |
| 65 // sequence. |
| 65 OPTIMIZE_SIZE = 0, | 66 OPTIMIZE_SIZE = 0, |
| 66 // Always use 2 instructions (lui/ori pair), even if the constant could | 67 // Always use 6 instructions (lui/ori/dsll sequence), even if the constant |
| 67 // be loaded with just one, so that this value is patchable later. | 68 // could be loaded with just one, so that this value is patchable later. |
| 68 CONSTANT_SIZE = 1 | 69 CONSTANT_SIZE = 1, |
| 70 // For address loads only 4 instruction are required. Used to mark |
| 71 // constant load that will be used as address without relocation |
| 72 // information. It ensures predictable code size, so specific sites |
| 73 // in code are patchable. |
| 74 ADDRESS_LOAD = 2 |
| 69 }; | 75 }; |
| 70 | 76 |
| 71 | 77 |
| 72 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; | 78 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; |
| 73 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; | 79 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; |
| 74 enum PointersToHereCheck { | 80 enum PointersToHereCheck { |
| 75 kPointersToHereMaybeInteresting, | 81 kPointersToHereMaybeInteresting, |
| 76 kPointersToHereAreAlwaysInteresting | 82 kPointersToHereAreAlwaysInteresting |
| 77 }; | 83 }; |
| 78 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; | 84 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 99 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); | 105 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); |
| 100 } | 106 } |
| 101 | 107 |
| 102 | 108 |
| 103 // Generate a MemOperand for loading a field from an object. | 109 // Generate a MemOperand for loading a field from an object. |
| 104 inline MemOperand FieldMemOperand(Register object, int offset) { | 110 inline MemOperand FieldMemOperand(Register object, int offset) { |
| 105 return MemOperand(object, offset - kHeapObjectTag); | 111 return MemOperand(object, offset - kHeapObjectTag); |
| 106 } | 112 } |
| 107 | 113 |
| 108 | 114 |
| 115 inline MemOperand UntagSmiMemOperand(Register rm, int offset) { |
| 116 // Assumes that Smis are shifted by 32 bits and little endianness. |
| 117 STATIC_ASSERT(kSmiShift == 32); |
| 118 return MemOperand(rm, offset + (kSmiShift / kBitsPerByte)); |
| 119 } |
| 120 |
| 121 |
| 122 inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) { |
| 123 return UntagSmiMemOperand(rm, offset - kHeapObjectTag); |
| 124 } |
| 125 |
| 126 |
| 109 // Generate a MemOperand for storing arguments 5..N on the stack | 127 // Generate a MemOperand for storing arguments 5..N on the stack |
| 110 // when calling CallCFunction(). | 128 // when calling CallCFunction(). |
| 129 // TODO(plind): Currently ONLY used for O32. Should be fixed for |
| 130 // n64, and used in RegExp code, and other places |
| 131 // with more than 8 arguments. |
| 111 inline MemOperand CFunctionArgumentOperand(int index) { | 132 inline MemOperand CFunctionArgumentOperand(int index) { |
| 112 ASSERT(index > kCArgSlotCount); | 133 ASSERT(index > kCArgSlotCount); |
| 113 // Argument 5 takes the slot just past the four Arg-slots. | 134 // Argument 5 takes the slot just past the four Arg-slots. |
| 114 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; | 135 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; |
| 115 return MemOperand(sp, offset); | 136 return MemOperand(sp, offset); |
| 116 } | 137 } |
| 117 | 138 |
| 118 | 139 |
| 119 // MacroAssembler implements a collection of frequently used macros. | 140 // MacroAssembler implements a collection of frequently used macros. |
| 120 class MacroAssembler: public Assembler { | 141 class MacroAssembler: public Assembler { |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 220 } | 241 } |
| 221 | 242 |
| 222 inline void Move(FPURegister dst, FPURegister src) { | 243 inline void Move(FPURegister dst, FPURegister src) { |
| 223 if (!dst.is(src)) { | 244 if (!dst.is(src)) { |
| 224 mov_d(dst, src); | 245 mov_d(dst, src); |
| 225 } | 246 } |
| 226 } | 247 } |
| 227 | 248 |
| 228 inline void Move(Register dst_low, Register dst_high, FPURegister src) { | 249 inline void Move(Register dst_low, Register dst_high, FPURegister src) { |
| 229 mfc1(dst_low, src); | 250 mfc1(dst_low, src); |
| 230 mfc1(dst_high, FPURegister::from_code(src.code() + 1)); | 251 mfhc1(dst_high, src); |
| 231 } | 252 } |
| 232 | 253 |
| 233 inline void FmoveHigh(Register dst_high, FPURegister src) { | 254 inline void FmoveHigh(Register dst_high, FPURegister src) { |
| 234 mfc1(dst_high, FPURegister::from_code(src.code() + 1)); | 255 mfhc1(dst_high, src); |
| 235 } | 256 } |
| 236 | 257 |
| 237 inline void FmoveLow(Register dst_low, FPURegister src) { | 258 inline void FmoveLow(Register dst_low, FPURegister src) { |
| 238 mfc1(dst_low, src); | 259 mfc1(dst_low, src); |
| 239 } | 260 } |
| 240 | 261 |
| 241 inline void Move(FPURegister dst, Register src_low, Register src_high) { | 262 inline void Move(FPURegister dst, Register src_low, Register src_high) { |
| 242 mtc1(src_low, dst); | 263 mtc1(src_low, dst); |
| 243 mtc1(src_high, FPURegister::from_code(dst.code() + 1)); | 264 mthc1(src_high, dst); |
| 244 } | 265 } |
| 245 | 266 |
| 246 // Conditional move. | 267 // Conditional move. |
| 247 void Move(FPURegister dst, double imm); | 268 void Move(FPURegister dst, double imm); |
| 248 void Movz(Register rd, Register rs, Register rt); | 269 void Movz(Register rd, Register rs, Register rt); |
| 249 void Movn(Register rd, Register rs, Register rt); | 270 void Movn(Register rd, Register rs, Register rt); |
| 250 void Movt(Register rd, Register rs, uint16_t cc = 0); | 271 void Movt(Register rd, Register rs, uint16_t cc = 0); |
| 251 void Movf(Register rd, Register rs, uint16_t cc = 0); | 272 void Movf(Register rd, Register rs, uint16_t cc = 0); |
| 252 | 273 |
| 253 void Clz(Register rd, Register rs); | 274 void Clz(Register rd, Register rs); |
| (...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 539 Label* gc_required); | 560 Label* gc_required); |
| 540 | 561 |
| 541 // Allocates a heap number or jumps to the gc_required label if the young | 562 // Allocates a heap number or jumps to the gc_required label if the young |
| 542 // space is full and a scavenge is needed. All registers are clobbered also | 563 // space is full and a scavenge is needed. All registers are clobbered also |
| 543 // when control continues at the gc_required label. | 564 // when control continues at the gc_required label. |
| 544 void AllocateHeapNumber(Register result, | 565 void AllocateHeapNumber(Register result, |
| 545 Register scratch1, | 566 Register scratch1, |
| 546 Register scratch2, | 567 Register scratch2, |
| 547 Register heap_number_map, | 568 Register heap_number_map, |
| 548 Label* gc_required, | 569 Label* gc_required, |
| 549 TaggingMode tagging_mode = TAG_RESULT, | 570 TaggingMode tagging_mode = TAG_RESULT); |
| 550 MutableMode mode = IMMUTABLE); | |
| 551 void AllocateHeapNumberWithValue(Register result, | 571 void AllocateHeapNumberWithValue(Register result, |
| 552 FPURegister value, | 572 FPURegister value, |
| 553 Register scratch1, | 573 Register scratch1, |
| 554 Register scratch2, | 574 Register scratch2, |
| 555 Label* gc_required); | 575 Label* gc_required); |
| 556 | 576 |
| 557 // --------------------------------------------------------------------------- | 577 // --------------------------------------------------------------------------- |
| 558 // Instruction macros. | 578 // Instruction macros. |
| 559 | 579 |
| 560 #define DEFINE_INSTRUCTION(instr) \ | 580 #define DEFINE_INSTRUCTION(instr) \ |
| 561 void instr(Register rd, Register rs, const Operand& rt); \ | 581 void instr(Register rd, Register rs, const Operand& rt); \ |
| 562 void instr(Register rd, Register rs, Register rt) { \ | 582 void instr(Register rd, Register rs, Register rt) { \ |
| 563 instr(rd, rs, Operand(rt)); \ | 583 instr(rd, rs, Operand(rt)); \ |
| 564 } \ | 584 } \ |
| 565 void instr(Register rs, Register rt, int32_t j) { \ | 585 void instr(Register rs, Register rt, int32_t j) { \ |
| 566 instr(rs, rt, Operand(j)); \ | 586 instr(rs, rt, Operand(j)); \ |
| 567 } | 587 } |
| 568 | 588 |
| 569 #define DEFINE_INSTRUCTION2(instr) \ | 589 #define DEFINE_INSTRUCTION2(instr) \ |
| 570 void instr(Register rs, const Operand& rt); \ | 590 void instr(Register rs, const Operand& rt); \ |
| 571 void instr(Register rs, Register rt) { \ | 591 void instr(Register rs, Register rt) { \ |
| 572 instr(rs, Operand(rt)); \ | 592 instr(rs, Operand(rt)); \ |
| 573 } \ | 593 } \ |
| 574 void instr(Register rs, int32_t j) { \ | 594 void instr(Register rs, int32_t j) { \ |
| 575 instr(rs, Operand(j)); \ | 595 instr(rs, Operand(j)); \ |
| 576 } | 596 } |
| 577 | 597 |
| 578 DEFINE_INSTRUCTION(Addu); | 598 DEFINE_INSTRUCTION(Addu); |
| 599 DEFINE_INSTRUCTION(Daddu); |
| 579 DEFINE_INSTRUCTION(Subu); | 600 DEFINE_INSTRUCTION(Subu); |
| 601 DEFINE_INSTRUCTION(Dsubu); |
| 580 DEFINE_INSTRUCTION(Mul); | 602 DEFINE_INSTRUCTION(Mul); |
| 603 DEFINE_INSTRUCTION(Dmul); |
| 581 DEFINE_INSTRUCTION2(Mult); | 604 DEFINE_INSTRUCTION2(Mult); |
| 605 DEFINE_INSTRUCTION2(Dmult); |
| 582 DEFINE_INSTRUCTION2(Multu); | 606 DEFINE_INSTRUCTION2(Multu); |
| 607 DEFINE_INSTRUCTION2(Dmultu); |
| 583 DEFINE_INSTRUCTION2(Div); | 608 DEFINE_INSTRUCTION2(Div); |
| 609 DEFINE_INSTRUCTION2(Ddiv); |
| 584 DEFINE_INSTRUCTION2(Divu); | 610 DEFINE_INSTRUCTION2(Divu); |
| 611 DEFINE_INSTRUCTION2(Ddivu); |
| 585 | 612 |
| 586 DEFINE_INSTRUCTION(And); | 613 DEFINE_INSTRUCTION(And); |
| 587 DEFINE_INSTRUCTION(Or); | 614 DEFINE_INSTRUCTION(Or); |
| 588 DEFINE_INSTRUCTION(Xor); | 615 DEFINE_INSTRUCTION(Xor); |
| 589 DEFINE_INSTRUCTION(Nor); | 616 DEFINE_INSTRUCTION(Nor); |
| 590 DEFINE_INSTRUCTION2(Neg); | 617 DEFINE_INSTRUCTION2(Neg); |
| 591 | 618 |
| 592 DEFINE_INSTRUCTION(Slt); | 619 DEFINE_INSTRUCTION(Slt); |
| 593 DEFINE_INSTRUCTION(Sltu); | 620 DEFINE_INSTRUCTION(Sltu); |
| 594 | 621 |
| 595 // MIPS32 R2 instruction macro. | 622 // MIPS32 R2 instruction macro. |
| 596 DEFINE_INSTRUCTION(Ror); | 623 DEFINE_INSTRUCTION(Ror); |
| 624 DEFINE_INSTRUCTION(Dror); |
| 597 | 625 |
| 598 #undef DEFINE_INSTRUCTION | 626 #undef DEFINE_INSTRUCTION |
| 599 #undef DEFINE_INSTRUCTION2 | 627 #undef DEFINE_INSTRUCTION2 |
| 600 | 628 |
| 601 void Pref(int32_t hint, const MemOperand& rs); | 629 void Pref(int32_t hint, const MemOperand& rs); |
| 602 | 630 |
| 603 | 631 |
| 604 // --------------------------------------------------------------------------- | 632 // --------------------------------------------------------------------------- |
| 605 // Pseudo-instructions. | 633 // Pseudo-instructions. |
| 606 | 634 |
| 607 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } | 635 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } |
| 608 | 636 |
| 609 void Ulw(Register rd, const MemOperand& rs); | 637 void Ulw(Register rd, const MemOperand& rs); |
| 610 void Usw(Register rd, const MemOperand& rs); | 638 void Usw(Register rd, const MemOperand& rs); |
| 639 void Uld(Register rd, const MemOperand& rs, Register scratch = at); |
| 640 void Usd(Register rd, const MemOperand& rs, Register scratch = at); |
| 611 | 641 |
| 612 // Load int32 in the rd register. | 642 // Load int32 in the rd register. |
| 613 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); | 643 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); |
| 614 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { | 644 inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) { |
| 615 li(rd, Operand(j), mode); | 645 li(rd, Operand(j), mode); |
| 616 } | 646 } |
| 617 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE); | 647 void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE); |
| 618 | 648 |
| 619 // Push multiple registers on the stack. | 649 // Push multiple registers on the stack. |
| 620 // Registers are saved in numerical order, with higher numbered registers | 650 // Registers are saved in numerical order, with higher numbered registers |
| 621 // saved in higher memory addresses. | 651 // saved in higher memory addresses. |
| 622 void MultiPush(RegList regs); | 652 void MultiPush(RegList regs); |
| 623 void MultiPushReversed(RegList regs); | 653 void MultiPushReversed(RegList regs); |
| 624 | 654 |
| 625 void MultiPushFPU(RegList regs); | 655 void MultiPushFPU(RegList regs); |
| 626 void MultiPushReversedFPU(RegList regs); | 656 void MultiPushReversedFPU(RegList regs); |
| 627 | 657 |
| 628 void push(Register src) { | 658 void push(Register src) { |
| 629 Addu(sp, sp, Operand(-kPointerSize)); | 659 Daddu(sp, sp, Operand(-kPointerSize)); |
| 630 sw(src, MemOperand(sp, 0)); | 660 sd(src, MemOperand(sp, 0)); |
| 631 } | 661 } |
| 632 void Push(Register src) { push(src); } | 662 void Push(Register src) { push(src); } |
| 633 | 663 |
| 634 // Push a handle. | 664 // Push a handle. |
| 635 void Push(Handle<Object> handle); | 665 void Push(Handle<Object> handle); |
| 636 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } | 666 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } |
| 637 | 667 |
| 638 // Push two registers. Pushes leftmost register first (to highest address). | 668 // Push two registers. Pushes leftmost register first (to highest address). |
| 639 void Push(Register src1, Register src2) { | 669 void Push(Register src1, Register src2) { |
| 640 Subu(sp, sp, Operand(2 * kPointerSize)); | 670 Dsubu(sp, sp, Operand(2 * kPointerSize)); |
| 641 sw(src1, MemOperand(sp, 1 * kPointerSize)); | 671 sd(src1, MemOperand(sp, 1 * kPointerSize)); |
| 642 sw(src2, MemOperand(sp, 0 * kPointerSize)); | 672 sd(src2, MemOperand(sp, 0 * kPointerSize)); |
| 643 } | 673 } |
| 644 | 674 |
| 645 // Push three registers. Pushes leftmost register first (to highest address). | 675 // Push three registers. Pushes leftmost register first (to highest address). |
| 646 void Push(Register src1, Register src2, Register src3) { | 676 void Push(Register src1, Register src2, Register src3) { |
| 647 Subu(sp, sp, Operand(3 * kPointerSize)); | 677 Dsubu(sp, sp, Operand(3 * kPointerSize)); |
| 648 sw(src1, MemOperand(sp, 2 * kPointerSize)); | 678 sd(src1, MemOperand(sp, 2 * kPointerSize)); |
| 649 sw(src2, MemOperand(sp, 1 * kPointerSize)); | 679 sd(src2, MemOperand(sp, 1 * kPointerSize)); |
| 650 sw(src3, MemOperand(sp, 0 * kPointerSize)); | 680 sd(src3, MemOperand(sp, 0 * kPointerSize)); |
| 651 } | 681 } |
| 652 | 682 |
| 653 // Push four registers. Pushes leftmost register first (to highest address). | 683 // Push four registers. Pushes leftmost register first (to highest address). |
| 654 void Push(Register src1, Register src2, Register src3, Register src4) { | 684 void Push(Register src1, Register src2, Register src3, Register src4) { |
| 655 Subu(sp, sp, Operand(4 * kPointerSize)); | 685 Dsubu(sp, sp, Operand(4 * kPointerSize)); |
| 656 sw(src1, MemOperand(sp, 3 * kPointerSize)); | 686 sd(src1, MemOperand(sp, 3 * kPointerSize)); |
| 657 sw(src2, MemOperand(sp, 2 * kPointerSize)); | 687 sd(src2, MemOperand(sp, 2 * kPointerSize)); |
| 658 sw(src3, MemOperand(sp, 1 * kPointerSize)); | 688 sd(src3, MemOperand(sp, 1 * kPointerSize)); |
| 659 sw(src4, MemOperand(sp, 0 * kPointerSize)); | 689 sd(src4, MemOperand(sp, 0 * kPointerSize)); |
| 660 } | 690 } |
| 661 | 691 |
| 662 void Push(Register src, Condition cond, Register tst1, Register tst2) { | 692 void Push(Register src, Condition cond, Register tst1, Register tst2) { |
| 663 // Since we don't have conditional execution we use a Branch. | 693 // Since we don't have conditional execution we use a Branch. |
| 664 Branch(3, cond, tst1, Operand(tst2)); | 694 Branch(3, cond, tst1, Operand(tst2)); |
| 665 Subu(sp, sp, Operand(kPointerSize)); | 695 Dsubu(sp, sp, Operand(kPointerSize)); |
| 666 sw(src, MemOperand(sp, 0)); | 696 sd(src, MemOperand(sp, 0)); |
| 667 } | 697 } |
| 668 | 698 |
| 699 void PushRegisterAsTwoSmis(Register src, Register scratch = at); |
| 700 void PopRegisterAsTwoSmis(Register dst, Register scratch = at); |
| 701 |
| 669 // Pops multiple values from the stack and load them in the | 702 // Pops multiple values from the stack and load them in the |
| 670 // registers specified in regs. Pop order is the opposite as in MultiPush. | 703 // registers specified in regs. Pop order is the opposite as in MultiPush. |
| 671 void MultiPop(RegList regs); | 704 void MultiPop(RegList regs); |
| 672 void MultiPopReversed(RegList regs); | 705 void MultiPopReversed(RegList regs); |
| 673 | 706 |
| 674 void MultiPopFPU(RegList regs); | 707 void MultiPopFPU(RegList regs); |
| 675 void MultiPopReversedFPU(RegList regs); | 708 void MultiPopReversedFPU(RegList regs); |
| 676 | 709 |
| 677 void pop(Register dst) { | 710 void pop(Register dst) { |
| 678 lw(dst, MemOperand(sp, 0)); | 711 ld(dst, MemOperand(sp, 0)); |
| 679 Addu(sp, sp, Operand(kPointerSize)); | 712 Daddu(sp, sp, Operand(kPointerSize)); |
| 680 } | 713 } |
| 681 void Pop(Register dst) { pop(dst); } | 714 void Pop(Register dst) { pop(dst); } |
| 682 | 715 |
| 683 // Pop two registers. Pops rightmost register first (from lower address). | 716 // Pop two registers. Pops rightmost register first (from lower address). |
| 684 void Pop(Register src1, Register src2) { | 717 void Pop(Register src1, Register src2) { |
| 685 ASSERT(!src1.is(src2)); | 718 ASSERT(!src1.is(src2)); |
| 686 lw(src2, MemOperand(sp, 0 * kPointerSize)); | 719 ld(src2, MemOperand(sp, 0 * kPointerSize)); |
| 687 lw(src1, MemOperand(sp, 1 * kPointerSize)); | 720 ld(src1, MemOperand(sp, 1 * kPointerSize)); |
| 688 Addu(sp, sp, 2 * kPointerSize); | 721 Daddu(sp, sp, 2 * kPointerSize); |
| 689 } | 722 } |
| 690 | 723 |
| 691 // Pop three registers. Pops rightmost register first (from lower address). | 724 // Pop three registers. Pops rightmost register first (from lower address). |
| 692 void Pop(Register src1, Register src2, Register src3) { | 725 void Pop(Register src1, Register src2, Register src3) { |
| 693 lw(src3, MemOperand(sp, 0 * kPointerSize)); | 726 ld(src3, MemOperand(sp, 0 * kPointerSize)); |
| 694 lw(src2, MemOperand(sp, 1 * kPointerSize)); | 727 ld(src2, MemOperand(sp, 1 * kPointerSize)); |
| 695 lw(src1, MemOperand(sp, 2 * kPointerSize)); | 728 ld(src1, MemOperand(sp, 2 * kPointerSize)); |
| 696 Addu(sp, sp, 3 * kPointerSize); | 729 Daddu(sp, sp, 3 * kPointerSize); |
| 697 } | 730 } |
| 698 | 731 |
| 699 void Pop(uint32_t count = 1) { | 732 void Pop(uint32_t count = 1) { |
| 700 Addu(sp, sp, Operand(count * kPointerSize)); | 733 Daddu(sp, sp, Operand(count * kPointerSize)); |
| 701 } | 734 } |
| 702 | 735 |
| 703 // Push and pop the registers that can hold pointers, as defined by the | 736 // Push and pop the registers that can hold pointers, as defined by the |
| 704 // RegList constant kSafepointSavedRegisters. | 737 // RegList constant kSafepointSavedRegisters. |
| 705 void PushSafepointRegisters(); | 738 void PushSafepointRegisters(); |
| 706 void PopSafepointRegisters(); | 739 void PopSafepointRegisters(); |
| 707 void PushSafepointRegistersAndDoubles(); | 740 void PushSafepointRegistersAndDoubles(); |
| 708 void PopSafepointRegistersAndDoubles(); | 741 void PopSafepointRegistersAndDoubles(); |
| 709 // Store value in register src in the safepoint stack slot for | 742 // Store value in register src in the safepoint stack slot for |
| 710 // register dst. | 743 // register dst. |
| 711 void StoreToSafepointRegisterSlot(Register src, Register dst); | 744 void StoreToSafepointRegisterSlot(Register src, Register dst); |
| 712 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); | 745 void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); |
| 713 // Load the value of the src register from its safepoint stack slot | 746 // Load the value of the src register from its safepoint stack slot |
| 714 // into register dst. | 747 // into register dst. |
| 715 void LoadFromSafepointRegisterSlot(Register dst, Register src); | 748 void LoadFromSafepointRegisterSlot(Register dst, Register src); |
| 716 | 749 |
| 717 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache | 750 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache |
| 718 // from C. | 751 // from C. |
| 719 // Does not handle errors. | 752 // Does not handle errors. |
| 720 void FlushICache(Register address, unsigned instructions); | 753 void FlushICache(Register address, unsigned instructions); |
| 721 | 754 |
| 722 // MIPS32 R2 instruction macro. | 755 // MIPS64 R2 instruction macro. |
| 723 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); | 756 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); |
| 724 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); | 757 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); |
| 725 | 758 |
| 726 // --------------------------------------------------------------------------- | 759 // --------------------------------------------------------------------------- |
| 727 // FPU macros. These do not handle special cases like NaN or +- inf. | 760 // FPU macros. These do not handle special cases like NaN or +- inf. |
| 728 | 761 |
| 729 // Convert unsigned word to double. | 762 // Convert unsigned word to double. |
| 730 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); | 763 void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); |
| 731 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); | 764 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); |
| 732 | 765 |
| 766 // Convert double to unsigned long. |
| 767 void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch); |
| 768 |
| 769 void Trunc_l_d(FPURegister fd, FPURegister fs); |
| 770 void Round_l_d(FPURegister fd, FPURegister fs); |
| 771 void Floor_l_d(FPURegister fd, FPURegister fs); |
| 772 void Ceil_l_d(FPURegister fd, FPURegister fs); |
| 773 |
| 733 // Convert double to unsigned word. | 774 // Convert double to unsigned word. |
| 734 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); | 775 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); |
| 735 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); | 776 void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); |
| 736 | 777 |
| 737 void Trunc_w_d(FPURegister fd, FPURegister fs); | 778 void Trunc_w_d(FPURegister fd, FPURegister fs); |
| 738 void Round_w_d(FPURegister fd, FPURegister fs); | 779 void Round_w_d(FPURegister fd, FPURegister fs); |
| 739 void Floor_w_d(FPURegister fd, FPURegister fs); | 780 void Floor_w_d(FPURegister fd, FPURegister fs); |
| 740 void Ceil_w_d(FPURegister fd, FPURegister fs); | 781 void Ceil_w_d(FPURegister fd, FPURegister fs); |
| 782 |
| 783 void Madd_d(FPURegister fd, |
| 784 FPURegister fr, |
| 785 FPURegister fs, |
| 786 FPURegister ft, |
| 787 FPURegister scratch); |
| 788 |
| 741 // Wrapper function for the different cmp/branch types. | 789 // Wrapper function for the different cmp/branch types. |
| 742 void BranchF(Label* target, | 790 void BranchF(Label* target, |
| 743 Label* nan, | 791 Label* nan, |
| 744 Condition cc, | 792 Condition cc, |
| 745 FPURegister cmp1, | 793 FPURegister cmp1, |
| 746 FPURegister cmp2, | 794 FPURegister cmp2, |
| 747 BranchDelaySlot bd = PROTECT); | 795 BranchDelaySlot bd = PROTECT); |
| 748 | 796 |
| 749 // Alternate (inline) version for better readability with USE_DELAY_SLOT. | 797 // Alternate (inline) version for better readability with USE_DELAY_SLOT. |
| 750 inline void BranchF(BranchDelaySlot bd, | 798 inline void BranchF(BranchDelaySlot bd, |
| (...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1062 Handle<Code> success, | 1110 Handle<Code> success, |
| 1063 SmiCheckType smi_check_type); | 1111 SmiCheckType smi_check_type); |
| 1064 | 1112 |
| 1065 | 1113 |
| 1066 // Load and check the instance type of an object for being a string. | 1114 // Load and check the instance type of an object for being a string. |
| 1067 // Loads the type into the second argument register. | 1115 // Loads the type into the second argument register. |
| 1068 // Returns a condition that will be enabled if the object was a string. | 1116 // Returns a condition that will be enabled if the object was a string. |
| 1069 Condition IsObjectStringType(Register obj, | 1117 Condition IsObjectStringType(Register obj, |
| 1070 Register type, | 1118 Register type, |
| 1071 Register result) { | 1119 Register result) { |
| 1072 lw(type, FieldMemOperand(obj, HeapObject::kMapOffset)); | 1120 ld(type, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 1073 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); | 1121 lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); |
| 1074 And(type, type, Operand(kIsNotStringMask)); | 1122 And(type, type, Operand(kIsNotStringMask)); |
| 1075 ASSERT_EQ(0, kStringTag); | 1123 ASSERT_EQ(0, kStringTag); |
| 1076 return eq; | 1124 return eq; |
| 1077 } | 1125 } |
| 1078 | 1126 |
| 1079 | 1127 |
| 1080 // Picks out an array index from the hash field. | 1128 // Picks out an array index from the hash field. |
| 1081 // Register use: | 1129 // Register use: |
| 1082 // hash - holds the index's hash. Clobbered. | 1130 // hash - holds the index's hash. Clobbered. |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1217 // Needs a scratch register to do some arithmetic. This register will be | 1265 // Needs a scratch register to do some arithmetic. This register will be |
| 1218 // trashed. | 1266 // trashed. |
| 1219 void PrepareCallCFunction(int num_reg_arguments, | 1267 void PrepareCallCFunction(int num_reg_arguments, |
| 1220 int num_double_registers, | 1268 int num_double_registers, |
| 1221 Register scratch); | 1269 Register scratch); |
| 1222 void PrepareCallCFunction(int num_reg_arguments, | 1270 void PrepareCallCFunction(int num_reg_arguments, |
| 1223 Register scratch); | 1271 Register scratch); |
| 1224 | 1272 |
| 1225 // Arguments 1-4 are placed in registers a0 thru a3 respectively. | 1273 // Arguments 1-4 are placed in registers a0 thru a3 respectively. |
| 1226 // Arguments 5..n are stored to stack using following: | 1274 // Arguments 5..n are stored to stack using following: |
| 1227 // sw(t0, CFunctionArgumentOperand(5)); | 1275 // sw(a4, CFunctionArgumentOperand(5)); |
| 1228 | 1276 |
| 1229 // Calls a C function and cleans up the space for arguments allocated | 1277 // Calls a C function and cleans up the space for arguments allocated |
| 1230 // by PrepareCallCFunction. The called function is not allowed to trigger a | 1278 // by PrepareCallCFunction. The called function is not allowed to trigger a |
| 1231 // garbage collection, since that might move the code and invalidate the | 1279 // garbage collection, since that might move the code and invalidate the |
| 1232 // return address (unless this is somehow accounted for by the called | 1280 // return address (unless this is somehow accounted for by the called |
| 1233 // function). | 1281 // function). |
| 1234 void CallCFunction(ExternalReference function, int num_arguments); | 1282 void CallCFunction(ExternalReference function, int num_arguments); |
| 1235 void CallCFunction(Register function, int num_arguments); | 1283 void CallCFunction(Register function, int num_arguments); |
| 1236 void CallCFunction(ExternalReference function, | 1284 void CallCFunction(ExternalReference function, |
| 1237 int num_reg_arguments, | 1285 int num_reg_arguments, |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1331 // control continues at the label not_power_of_two. If reg is a power of two | 1379 // control continues at the label not_power_of_two. If reg is a power of two |
| 1332 // the register scratch contains the value of (reg - 1) when control falls | 1380 // the register scratch contains the value of (reg - 1) when control falls |
| 1333 // through. | 1381 // through. |
| 1334 void JumpIfNotPowerOfTwoOrZero(Register reg, | 1382 void JumpIfNotPowerOfTwoOrZero(Register reg, |
| 1335 Register scratch, | 1383 Register scratch, |
| 1336 Label* not_power_of_two_or_zero); | 1384 Label* not_power_of_two_or_zero); |
| 1337 | 1385 |
| 1338 // ------------------------------------------------------------------------- | 1386 // ------------------------------------------------------------------------- |
| 1339 // Smi utilities. | 1387 // Smi utilities. |
| 1340 | 1388 |
| 1341 void SmiTag(Register reg) { | |
| 1342 Addu(reg, reg, reg); | |
| 1343 } | |
| 1344 | |
| 1345 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). | 1389 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). |
| 1346 void SmiTagCheckOverflow(Register reg, Register overflow); | 1390 void SmiTagCheckOverflow(Register reg, Register overflow); |
| 1347 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); | 1391 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); |
| 1348 | 1392 |
| 1349 void SmiTag(Register dst, Register src) { | 1393 void SmiTag(Register dst, Register src) { |
| 1350 Addu(dst, src, src); | 1394 STATIC_ASSERT(kSmiTag == 0); |
| 1395 if (SmiValuesAre32Bits()) { |
| 1396 STATIC_ASSERT(kSmiShift == 32); |
| 1397 dsll32(dst, src, 0); |
| 1398 } else { |
| 1399 Addu(dst, src, src); |
| 1400 } |
| 1401 } |
| 1402 |
| 1403 void SmiTag(Register reg) { |
| 1404 SmiTag(reg, reg); |
| 1351 } | 1405 } |
| 1352 | 1406 |
| 1353 // Try to convert int32 to smi. If the value is to large, preserve | 1407 // Try to convert int32 to smi. If the value is to large, preserve |
| 1354 // the original value and jump to not_a_smi. Destroys scratch and | 1408 // the original value and jump to not_a_smi. Destroys scratch and |
| 1355 // sets flags. | 1409 // sets flags. |
| 1356 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) { | 1410 void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) { |
| 1357 TrySmiTag(reg, reg, scratch, not_a_smi); | 1411 TrySmiTag(reg, reg, scratch, not_a_smi); |
| 1358 } | 1412 } |
| 1413 |
| 1359 void TrySmiTag(Register dst, | 1414 void TrySmiTag(Register dst, |
| 1360 Register src, | 1415 Register src, |
| 1361 Register scratch, | 1416 Register scratch, |
| 1362 Label* not_a_smi) { | 1417 Label* not_a_smi) { |
| 1363 SmiTagCheckOverflow(at, src, scratch); | 1418 if (SmiValuesAre32Bits()) { |
| 1364 BranchOnOverflow(not_a_smi, scratch); | 1419 SmiTag(dst, src); |
| 1365 mov(dst, at); | 1420 } else { |
| 1421 SmiTagCheckOverflow(at, src, scratch); |
| 1422 BranchOnOverflow(not_a_smi, scratch); |
| 1423 mov(dst, at); |
| 1424 } |
| 1425 } |
| 1426 |
| 1427 void SmiUntag(Register dst, Register src) { |
| 1428 if (SmiValuesAre32Bits()) { |
| 1429 STATIC_ASSERT(kSmiShift == 32); |
| 1430 dsra32(dst, src, 0); |
| 1431 } else { |
| 1432 sra(dst, src, kSmiTagSize); |
| 1433 } |
| 1366 } | 1434 } |
| 1367 | 1435 |
| 1368 void SmiUntag(Register reg) { | 1436 void SmiUntag(Register reg) { |
| 1369 sra(reg, reg, kSmiTagSize); | 1437 SmiUntag(reg, reg); |
| 1370 } | 1438 } |
| 1371 | 1439 |
| 1372 void SmiUntag(Register dst, Register src) { | 1440 // Left-shifted from int32 equivalent of Smi. |
| 1373 sra(dst, src, kSmiTagSize); | 1441 void SmiScale(Register dst, Register src, int scale) { |
| 1442 if (SmiValuesAre32Bits()) { |
| 1443 // The int portion is upper 32-bits of 64-bit word. |
| 1444 dsra(dst, src, kSmiShift - scale); |
| 1445 } else { |
| 1446 ASSERT(scale >= kSmiTagSize); |
| 1447 sll(dst, src, scale - kSmiTagSize); |
| 1448 } |
| 1374 } | 1449 } |
| 1375 | 1450 |
| 1451 // Combine load with untagging or scaling. |
| 1452 void SmiLoadUntag(Register dst, MemOperand src); |
| 1453 |
| 1454 void SmiLoadScale(Register dst, MemOperand src, int scale); |
| 1455 |
| 1456 // Returns 2 values: the Smi and a scaled version of the int within the Smi. |
| 1457 void SmiLoadWithScale(Register d_smi, |
| 1458 Register d_scaled, |
| 1459 MemOperand src, |
| 1460 int scale); |
| 1461 |
| 1462 // Returns 2 values: the untagged Smi (int32) and scaled version of that int. |
| 1463 void SmiLoadUntagWithScale(Register d_int, |
| 1464 Register d_scaled, |
| 1465 MemOperand src, |
| 1466 int scale); |
| 1467 |
| 1468 |
| 1376 // Test if the register contains a smi. | 1469 // Test if the register contains a smi. |
| 1377 inline void SmiTst(Register value, Register scratch) { | 1470 inline void SmiTst(Register value, Register scratch) { |
| 1378 And(scratch, value, Operand(kSmiTagMask)); | 1471 And(scratch, value, Operand(kSmiTagMask)); |
| 1379 } | 1472 } |
| 1380 inline void NonNegativeSmiTst(Register value, Register scratch) { | 1473 inline void NonNegativeSmiTst(Register value, Register scratch) { |
| 1381 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask)); | 1474 And(scratch, value, Operand(kSmiTagMask | kSmiSignMask)); |
| 1382 } | 1475 } |
| 1383 | 1476 |
| 1384 // Untag the source value into destination and jump if source is a smi. | 1477 // Untag the source value into destination and jump if source is a smi. |
| 1385 // Souce and destination can be the same register. | 1478 // Source and destination can be the same register. |
| 1386 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); | 1479 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); |
| 1387 | 1480 |
| 1388 // Untag the source value into destination and jump if source is not a smi. | 1481 // Untag the source value into destination and jump if source is not a smi. |
| 1389 // Souce and destination can be the same register. | 1482 // Source and destination can be the same register. |
| 1390 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); | 1483 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); |
| 1391 | 1484 |
| 1392 // Jump the register contains a smi. | 1485 // Jump the register contains a smi. |
| 1393 void JumpIfSmi(Register value, | 1486 void JumpIfSmi(Register value, |
| 1394 Label* smi_label, | 1487 Label* smi_label, |
| 1395 Register scratch = at, | 1488 Register scratch = at, |
| 1396 BranchDelaySlot bd = PROTECT); | 1489 BranchDelaySlot bd = PROTECT); |
| 1397 | 1490 |
| 1398 // Jump if the register contains a non-smi. | 1491 // Jump if the register contains a non-smi. |
| 1399 void JumpIfNotSmi(Register value, | 1492 void JumpIfNotSmi(Register value, |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1503 } | 1596 } |
| 1504 | 1597 |
| 1505 template<typename Field> | 1598 template<typename Field> |
| 1506 void DecodeField(Register reg) { | 1599 void DecodeField(Register reg) { |
| 1507 DecodeField<Field>(reg, reg); | 1600 DecodeField<Field>(reg, reg); |
| 1508 } | 1601 } |
| 1509 | 1602 |
| 1510 template<typename Field> | 1603 template<typename Field> |
| 1511 void DecodeFieldToSmi(Register dst, Register src) { | 1604 void DecodeFieldToSmi(Register dst, Register src) { |
| 1512 static const int shift = Field::kShift; | 1605 static const int shift = Field::kShift; |
| 1513 static const int mask = Field::kMask >> shift << kSmiTagSize; | 1606 static const int mask = Field::kMask >> shift; |
| 1514 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); | 1607 dsrl(dst, src, shift); |
| 1515 STATIC_ASSERT(kSmiTag == 0); | 1608 And(dst, dst, Operand(mask)); |
| 1516 if (shift < kSmiTagSize) { | 1609 dsll32(dst, dst, 0); |
| 1517 sll(dst, src, kSmiTagSize - shift); | |
| 1518 And(dst, dst, Operand(mask)); | |
| 1519 } else if (shift > kSmiTagSize) { | |
| 1520 srl(dst, src, shift - kSmiTagSize); | |
| 1521 And(dst, dst, Operand(mask)); | |
| 1522 } else { | |
| 1523 And(dst, src, Operand(mask)); | |
| 1524 } | |
| 1525 } | 1610 } |
| 1526 | 1611 |
| 1527 template<typename Field> | 1612 template<typename Field> |
| 1528 void DecodeFieldToSmi(Register reg) { | 1613 void DecodeFieldToSmi(Register reg) { |
| 1529 DecodeField<Field>(reg, reg); | 1614 DecodeField<Field>(reg, reg); |
| 1530 } | 1615 } |
| 1531 | |
| 1532 // Generates function and stub prologue code. | 1616 // Generates function and stub prologue code. |
| 1533 void StubPrologue(); | 1617 void StubPrologue(); |
| 1534 void Prologue(bool code_pre_aging); | 1618 void Prologue(bool code_pre_aging); |
| 1535 | 1619 |
| 1536 // Activation support. | 1620 // Activation support. |
| 1537 void EnterFrame(StackFrame::Type type); | 1621 void EnterFrame(StackFrame::Type type); |
| 1538 void LeaveFrame(StackFrame::Type type); | 1622 void LeaveFrame(StackFrame::Type type); |
| 1539 | 1623 |
| 1540 // Patch the relocated value (lui/ori pair). | 1624 // Patch the relocated value (lui/ori pair). |
| 1541 void PatchRelocatedValue(Register li_location, | 1625 void PatchRelocatedValue(Register li_location, |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1690 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | 1774 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) |
| 1691 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | 1775 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) |
| 1692 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | 1776 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> |
| 1693 #else | 1777 #else |
| 1694 #define ACCESS_MASM(masm) masm-> | 1778 #define ACCESS_MASM(masm) masm-> |
| 1695 #endif | 1779 #endif |
| 1696 | 1780 |
| 1697 } } // namespace v8::internal | 1781 } } // namespace v8::internal |
| 1698 | 1782 |
| 1699 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ | 1783 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_ |
| OLD | NEW |