Index: runtime/vm/assembler_mips.h |
=================================================================== |
--- runtime/vm/assembler_mips.h (revision 42479) |
+++ runtime/vm/assembler_mips.h (working copy) |
@@ -934,6 +934,184 @@ |
} |
} |
+ void OrImmediate(Register rd, Register rs, int32_t imm) { |
+ ASSERT(!in_delay_slot_); |
+ if (imm == 0) { |
+ mov(rd, rs); |
+ return; |
+ } |
+ |
+ if (Utils::IsUint(kImmBits, imm)) { |
+ ori(rd, rs, Immediate(imm)); |
+ } else { |
+ LoadImmediate(TMP, imm); |
+ or_(rd, rs, TMP); |
+ } |
+ } |
+ |
+ void XorImmediate(Register rd, Register rs, int32_t imm) { |
+ ASSERT(!in_delay_slot_); |
+ if (imm == 0) { |
+ mov(rd, rs); |
+ return; |
+ } |
+ |
+ if (Utils::IsUint(kImmBits, imm)) { |
+ xori(rd, rs, Immediate(imm)); |
+ } else { |
+ LoadImmediate(TMP, imm); |
+ xor_(rd, rs, TMP); |
+ } |
+ } |
+ |
+ // Branch to label if condition is true. |
+ void BranchOnCondition(Condition cond, Label* l) { |
+ ASSERT(!in_delay_slot_); |
+ Register left = cond.left(); |
+ Register right = cond.right(); |
+ RelationOperator rel_op = cond.rel_op(); |
+ switch (rel_op) { |
+ case NV: return; |
+ case AL: b(l); return; |
+ case EQ: // fall through. |
+ case NE: { |
+ if (left == IMM) { |
+ addiu(AT, ZR, Immediate(cond.imm())); |
zra
2014/12/19 17:49:47
Why not LoadImmediate?
regis
2014/12/22 20:17:34
I like to see exactly what instruction we generate
|
+ left = AT; |
+ } else if (right == IMM) { |
+ addiu(AT, ZR, Immediate(cond.imm())); |
+ right = AT; |
+ } |
+ if (rel_op == EQ) { |
+ beq(left, right, l); |
+ } else { |
+ bne(left, right, l); |
+ } |
+ break; |
+ } |
+ case GT: { |
+ if (left == ZR) { |
+ bltz(right, l); |
+ } else if (right == ZR) { |
+ bgtz(left, l); |
+ } else if (left == IMM) { |
+ slti(AT, right, Immediate(cond.imm())); |
+ bne(AT, ZR, l); |
+ } else if (right == IMM) { |
+ slti(AT, left, Immediate(cond.imm() + 1)); |
+ beq(AT, ZR, l); |
+ } else { |
+ slt(AT, right, left); |
+ bne(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case GE: { |
+ if (left == ZR) { |
+ blez(right, l); |
+ } else if (right == ZR) { |
+ bgez(left, l); |
+ } else if (left == IMM) { |
+ slti(AT, right, Immediate(cond.imm() + 1)); |
+ bne(AT, ZR, l); |
+ } else if (right == IMM) { |
+ slti(AT, left, Immediate(cond.imm())); |
+ beq(AT, ZR, l); |
+ } else { |
+ slt(AT, left, right); |
+ beq(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case LT: { |
+ if (left == ZR) { |
+ bgtz(right, l); |
+ } else if (right == ZR) { |
+ bltz(left, l); |
+ } else if (left == IMM) { |
+ slti(AT, right, Immediate(cond.imm() + 1)); |
+ beq(AT, ZR, l); |
+ } else if (right == IMM) { |
+ slti(AT, left, Immediate(cond.imm())); |
+ bne(AT, ZR, l); |
+ } else { |
+ slt(AT, left, right); |
+ bne(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case LE: { |
+ if (left == ZR) { |
+ bgez(right, l); |
+ } else if (right == ZR) { |
+ blez(left, l); |
+ } else if (left == IMM) { |
+ slti(AT, right, Immediate(cond.imm())); |
+ beq(AT, ZR, l); |
+ } else if (right == IMM) { |
+ slti(AT, left, Immediate(cond.imm() + 1)); |
+ bne(AT, ZR, l); |
+ } else { |
+ slt(AT, right, left); |
+ beq(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case UGT: { |
+ ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. |
zra
2014/12/19 17:49:47
What do you do if you want to branch on an unsigne
regis
2014/12/22 20:17:34
This does not come up. The compiler will always al
|
+ if (left == ZR) { |
+ // NV: Never branch. Fall through. |
+ } else if (right == ZR) { |
+ bne(left, ZR, l); |
+ } else { |
+ sltu(AT, right, left); |
+ bne(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case UGE: { |
+ ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. |
+ if (left == ZR) { |
+ beq(right, ZR, l); |
+ } else if (right == ZR) { |
+ // AL: Always branch to l. |
+ beq(ZR, ZR, l); |
+ } else { |
+ sltu(AT, left, right); |
+ beq(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case ULT: { |
+ ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. |
+ if (left == ZR) { |
+ bne(right, ZR, l); |
+ } else if (right == ZR) { |
+ // NV: Never branch. Fall through. |
+ } else { |
+ sltu(AT, left, right); |
+ bne(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ case ULE: { |
+ ASSERT((left != IMM) && (right != IMM)); // No unsigned constants used. |
+ if (left == ZR) { |
+ // AL: Always branch to l. |
+ beq(ZR, ZR, l); |
+ } else if (right == ZR) { |
+ beq(left, ZR, l); |
+ } else { |
+ sltu(AT, right, left); |
+ beq(AT, ZR, l); |
+ } |
+ break; |
+ } |
+ default: |
+ UNREACHABLE(); |
+ } |
+ } |
+ |
void BranchEqual(Register rd, Register rn, Label* l) { |
beq(rd, rn, l); |
} |
@@ -989,9 +1167,14 @@ |
if (imm.value() == 0) { |
bgtz(rd, l); |
} else { |
- ASSERT(rd != CMPRES2); |
- LoadImmediate(CMPRES2, imm.value()); |
- BranchSignedGreater(rd, CMPRES2, l); |
+ if (Utils::IsInt(kImmBits, imm.value() + 1)) { |
+ slti(CMPRES2, rd, Immediate(imm.value() + 1)); |
+ beq(CMPRES2, ZR, l); |
+ } else { |
+ ASSERT(rd != CMPRES2); |
+ LoadImmediate(CMPRES2, imm.value()); |
+ BranchSignedGreater(rd, CMPRES2, l); |
+ } |
} |
} |
@@ -1006,9 +1189,14 @@ |
if (imm.value() == 0) { |
BranchNotEqual(rd, Immediate(0), l); |
} else { |
- ASSERT(rd != CMPRES2); |
- LoadImmediate(CMPRES2, imm.value()); |
- BranchUnsignedGreater(rd, CMPRES2, l); |
+ if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { |
+ sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); |
+ beq(CMPRES2, ZR, l); |
+ } else { |
+ ASSERT(rd != CMPRES2); |
+ LoadImmediate(CMPRES2, imm.value()); |
+ BranchUnsignedGreater(rd, CMPRES2, l); |
+ } |
} |
} |
@@ -1045,7 +1233,7 @@ |
if (imm.value() == 0) { |
b(l); |
} else { |
- if (Utils::IsUint(kImmBits, imm.value())) { |
+ if (Utils::IsInt(kImmBits, imm.value())) { |
sltiu(CMPRES2, rd, imm); |
beq(CMPRES2, ZR, l); |
} else { |
@@ -1084,14 +1272,17 @@ |
void BranchUnsignedLess(Register rd, const Immediate& imm, Label* l) { |
ASSERT(!in_delay_slot_); |
- ASSERT(imm.value() != 0); |
- if (Utils::IsInt(kImmBits, imm.value())) { |
- sltiu(CMPRES2, rd, imm); |
- bne(CMPRES2, ZR, l); |
+ if (imm.value() == 0) { |
+ // Never branch. Fall through. |
} else { |
- ASSERT(rd != CMPRES2); |
- LoadImmediate(CMPRES2, imm.value()); |
- BranchUnsignedGreater(CMPRES2, rd, l); |
+ if (Utils::IsInt(kImmBits, imm.value())) { |
+ sltiu(CMPRES2, rd, imm); |
+ bne(CMPRES2, ZR, l); |
+ } else { |
+ ASSERT(rd != CMPRES2); |
+ LoadImmediate(CMPRES2, imm.value()); |
+ BranchUnsignedGreater(CMPRES2, rd, l); |
+ } |
} |
} |
@@ -1105,9 +1296,14 @@ |
if (imm.value() == 0) { |
blez(rd, l); |
} else { |
- ASSERT(rd != CMPRES2); |
- LoadImmediate(CMPRES2, imm.value()); |
- BranchSignedGreaterEqual(CMPRES2, rd, l); |
+ if (Utils::IsInt(kImmBits, imm.value() + 1)) { |
+ slti(CMPRES2, rd, Immediate(imm.value() + 1)); |
+ bne(CMPRES2, ZR, l); |
+ } else { |
+ ASSERT(rd != CMPRES2); |
+ LoadImmediate(CMPRES2, imm.value()); |
+ BranchSignedGreaterEqual(CMPRES2, rd, l); |
+ } |
} |
} |
@@ -1118,9 +1314,18 @@ |
void BranchUnsignedLessEqual(Register rd, const Immediate& imm, Label* l) { |
ASSERT(!in_delay_slot_); |
- ASSERT(rd != CMPRES2); |
- LoadImmediate(CMPRES2, imm.value()); |
- BranchUnsignedGreaterEqual(CMPRES2, rd, l); |
+ if (imm.value() == 0) { |
+ beq(rd, ZR, l); |
+ } else { |
+ if ((imm.value() != -1) && Utils::IsInt(kImmBits, imm.value() + 1)) { |
+ sltiu(CMPRES2, rd, Immediate(imm.value() + 1)); |
+ bne(CMPRES2, ZR, l); |
+ } else { |
+ ASSERT(rd != CMPRES2); |
+ LoadImmediate(CMPRES2, imm.value()); |
+ BranchUnsignedGreaterEqual(CMPRES2, rd, l); |
+ } |
+ } |
} |
void Push(Register rt) { |
@@ -1218,12 +1423,6 @@ |
void LoadObject(Register rd, const Object& object); |
void PushObject(const Object& object); |
- // Compares rn with the object. Returns results in rd1 and rd2. |
- // rd1 is 1 if rn < object. rd2 is 1 if object < rn. Since both cannot be |
- // 1, rd1 == rd2 (== 0) iff rn == object. |
- void CompareObject(Register rd1, Register rd2, |
- Register rn, const Object& object); |
- |
void LoadIsolate(Register result); |
void LoadClassId(Register result, Register object); |