| Index: src/mips64/macro-assembler-mips64.cc
|
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
|
| index 466906ae2daa907036524ac296c929c352fa3f90..f4e77236196f804eebc430308f9f55462ab0f58f 100644
|
| --- a/src/mips64/macro-assembler-mips64.cc
|
| +++ b/src/mips64/macro-assembler-mips64.cc
|
| @@ -22,7 +22,8 @@ namespace internal {
|
| MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
|
| : Assembler(arg_isolate, buffer, size),
|
| generating_stub_(false),
|
| - has_frame_(false) {
|
| + has_frame_(false),
|
| + has_double_zero_reg_set_(false) {
|
| if (isolate() != NULL) {
|
| code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
|
| isolate());
|
| @@ -711,6 +712,28 @@ void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
|
| }
|
|
|
|
|
| +void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + multu(rs, rt.rm());
|
| + mfhi(rd);
|
| + } else {
|
| + muhu(rd, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + multu(rs, at);
|
| + mfhi(rd);
|
| + } else {
|
| + muhu(rd, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
|
| if (rt.is_reg()) {
|
| if (kArchVariant == kMips64r6) {
|
| @@ -815,6 +838,72 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
|
| }
|
|
|
|
|
| +void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + div(rs, rt.rm());
|
| + mflo(res);
|
| + } else {
|
| + div(res, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + div(rs, at);
|
| + mflo(res);
|
| + } else {
|
| + div(res, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + div(rs, rt.rm());
|
| + mfhi(rd);
|
| + } else {
|
| + mod(rd, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + div(rs, at);
|
| + mfhi(rd);
|
| + } else {
|
| + mod(rd, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + divu(rs, rt.rm());
|
| + mfhi(rd);
|
| + } else {
|
| + modu(rd, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + divu(rs, at);
|
| + mfhi(rd);
|
| + } else {
|
| + modu(rd, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
|
| if (rt.is_reg()) {
|
| ddiv(rs, rt.rm());
|
| @@ -864,6 +953,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
|
| }
|
|
|
|
|
| +void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + divu(rs, rt.rm());
|
| + mflo(res);
|
| + } else {
|
| + divu(res, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + divu(rs, at);
|
| + mflo(res);
|
| + } else {
|
| + divu(res, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
|
| if (rt.is_reg()) {
|
| ddivu(rs, rt.rm());
|
| @@ -876,6 +987,28 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
|
| }
|
|
|
|
|
| +void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + if (kArchVariant != kMips64r6) {
|
| + ddivu(rs, rt.rm());
|
| + mflo(res);
|
| + } else {
|
| + ddivu(res, rs, rt.rm());
|
| + }
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + if (kArchVariant != kMips64r6) {
|
| + ddivu(rs, at);
|
| + mflo(res);
|
| + } else {
|
| + ddivu(res, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
|
| if (kArchVariant != kMips64r6) {
|
| if (rt.is_reg()) {
|
| @@ -901,6 +1034,31 @@ void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
|
| }
|
|
|
|
|
| +void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
|
| + if (kArchVariant != kMips64r6) {
|
| + if (rt.is_reg()) {
|
| + ddivu(rs, rt.rm());
|
| + mfhi(rd);
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + ddivu(rs, at);
|
| + mfhi(rd);
|
| + }
|
| + } else {
|
| + if (rt.is_reg()) {
|
| + dmodu(rd, rs, rt.rm());
|
| + } else {
|
| + // li handles the relocation.
|
| + DCHECK(!rs.is(at));
|
| + li(at, rt);
|
| + dmodu(rd, rs, at);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
|
| if (rt.is_reg()) {
|
| and_(rd, rs, rt.rm());
|
| @@ -1003,27 +1161,10 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
|
|
|
|
|
| void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
|
| - if (kArchVariant == kMips64r2) {
|
| - if (rt.is_reg()) {
|
| - rotrv(rd, rs, rt.rm());
|
| - } else {
|
| - rotr(rd, rs, rt.imm64_);
|
| - }
|
| + if (rt.is_reg()) {
|
| + rotrv(rd, rs, rt.rm());
|
| } else {
|
| - if (rt.is_reg()) {
|
| - subu(at, zero_reg, rt.rm());
|
| - sllv(at, rs, at);
|
| - srlv(rd, rs, rt.rm());
|
| - or_(rd, rd, at);
|
| - } else {
|
| - if (rt.imm64_ == 0) {
|
| - srl(rd, rs, 0);
|
| - } else {
|
| - srl(at, rs, rt.imm64_);
|
| - sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
|
| - or_(rd, rd, at);
|
| - }
|
| - }
|
| + rotr(rd, rs, rt.imm64_);
|
| }
|
| }
|
|
|
| @@ -1281,6 +1422,14 @@ void MacroAssembler::Ext(Register rt,
|
| }
|
|
|
|
|
| +void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
|
| + uint16_t size) {
|
| + DCHECK(pos < 32);
|
| + DCHECK(pos + size < 33);
|
| + dext_(rt, rs, pos, size);
|
| +}
|
| +
|
| +
|
| void MacroAssembler::Ins(Register rt,
|
| Register rs,
|
| uint16_t pos,
|
| @@ -1572,10 +1721,9 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
|
| static const DoubleRepresentation zero(0.0);
|
| DoubleRepresentation value_rep(imm);
|
| // Handle special values first.
|
| - bool force_load = dst.is(kDoubleRegZero);
|
| - if (value_rep == zero && !force_load) {
|
| + if (value_rep == zero && has_double_zero_reg_set_) {
|
| mov_d(dst, kDoubleRegZero);
|
| - } else if (value_rep == minus_zero && !force_load) {
|
| + } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
|
| neg_d(dst, kDoubleRegZero);
|
| } else {
|
| uint32_t lo, hi;
|
| @@ -1596,6 +1744,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
|
| } else {
|
| mthc1(zero_reg, dst);
|
| }
|
| + if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
|
| }
|
| }
|
|
|
| @@ -2000,18 +2149,26 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
|
| b(offset);
|
| break;
|
| case eq:
|
| - // We don't want any other register but scratch clobbered.
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - beq(rs, r2, offset);
|
| + if (rt.imm64_ == 0) {
|
| + beq(rs, zero_reg, offset);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + DCHECK(!scratch.is(rs));
|
| + r2 = scratch;
|
| + li(r2, rt);
|
| + beq(rs, r2, offset);
|
| + }
|
| break;
|
| case ne:
|
| - // We don't want any other register but scratch clobbered.
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - bne(rs, r2, offset);
|
| + if (rt.imm64_ == 0) {
|
| + bne(rs, zero_reg, offset);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + DCHECK(!scratch.is(rs));
|
| + r2 = scratch;
|
| + li(r2, rt);
|
| + bne(rs, r2, offset);
|
| + }
|
| break;
|
| // Signed comparison.
|
| case greater:
|
| @@ -2253,18 +2410,28 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| b(offset);
|
| break;
|
| case eq:
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, r2, offset);
|
| + if (rt.imm64_ == 0) {
|
| + offset = shifted_branch_offset(L, false);
|
| + beq(rs, zero_reg, offset);
|
| + } else {
|
| + DCHECK(!scratch.is(rs));
|
| + r2 = scratch;
|
| + li(r2, rt);
|
| + offset = shifted_branch_offset(L, false);
|
| + beq(rs, r2, offset);
|
| + }
|
| break;
|
| case ne:
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, r2, offset);
|
| + if (rt.imm64_ == 0) {
|
| + offset = shifted_branch_offset(L, false);
|
| + bne(rs, zero_reg, offset);
|
| + } else {
|
| + DCHECK(!scratch.is(rs));
|
| + r2 = scratch;
|
| + li(r2, rt);
|
| + offset = shifted_branch_offset(L, false);
|
| + bne(rs, r2, offset);
|
| + }
|
| break;
|
| // Signed comparison.
|
| case greater:
|
| @@ -4367,6 +4534,33 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
|
| }
|
|
|
|
|
| +void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
|
| + const Operand& right,
|
| + Register overflow_dst,
|
| + Register scratch) {
|
| + if (right.is_reg()) {
|
| + AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
|
| + } else {
|
| + if (dst.is(left)) {
|
| + mov(scratch, left); // Preserve left.
|
| + daddiu(dst, left, right.immediate()); // Left is overwritten.
|
| + xor_(scratch, dst, scratch); // Original left.
|
| + // Load right since xori takes uint16 as immediate.
|
| + daddiu(t9, zero_reg, right.immediate());
|
| + xor_(overflow_dst, dst, t9);
|
| + and_(overflow_dst, overflow_dst, scratch);
|
| + } else {
|
| + daddiu(dst, left, right.immediate());
|
| + xor_(overflow_dst, dst, left);
|
| + // Load right since xori takes uint16 as immediate.
|
| + daddiu(t9, zero_reg, right.immediate());
|
| + xor_(scratch, dst, t9);
|
| + and_(overflow_dst, scratch, overflow_dst);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::AdduAndCheckForOverflow(Register dst,
|
| Register left,
|
| Register right,
|
| @@ -4409,6 +4603,33 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
|
| }
|
|
|
|
|
| +void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
|
| + const Operand& right,
|
| + Register overflow_dst,
|
| + Register scratch) {
|
| + if (right.is_reg()) {
|
| + SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
|
| + } else {
|
| + if (dst.is(left)) {
|
| + mov(scratch, left); // Preserve left.
|
| + daddiu(dst, left, -(right.immediate())); // Left is overwritten.
|
| + xor_(overflow_dst, dst, scratch); // scratch is original left.
|
| + // Load right since xori takes uint16 as immediate.
|
| + daddiu(t9, zero_reg, right.immediate());
|
| + xor_(scratch, scratch, t9); // scratch is original left.
|
| + and_(overflow_dst, scratch, overflow_dst);
|
| + } else {
|
| + daddiu(dst, left, -(right.immediate()));
|
| + xor_(overflow_dst, dst, left);
|
| + // Load right since xori takes uint16 as immediate.
|
| + daddiu(t9, zero_reg, right.immediate());
|
| + xor_(scratch, left, t9);
|
| + and_(overflow_dst, scratch, overflow_dst);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| void MacroAssembler::SubuAndCheckForOverflow(Register dst,
|
| Register left,
|
| Register right,
|
|
|