Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(781)

Unified Diff: src/mips/macro-assembler-mips.cc

Issue 1320006: Updates and fixes for MIPS support. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/mips/macro-assembler-mips.cc
===================================================================
--- src/mips/macro-assembler-mips.cc (revision 4259)
+++ src/mips/macro-assembler-mips.cc (working copy)
@@ -46,64 +46,90 @@
}
+// Arguments macros
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
-void MacroAssembler::Jump(Register target, Condition cond,
- Register r1, const Operand& r2) {
- Jump(Operand(target), cond, r1, r2);
+#define REGISTER_TARGET_BODY(Name) \
+void MacroAssembler::Name(Register target, \
+ bool ProtectBranchDelaySlot) { \
Søren Thygesen Gjesse 2010/05/25 09:00:56 ProtectBranchDelaySlot -> protect_branch_delay_slo
+ Name(Operand(target), ProtectBranchDelaySlot); \
+} \
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
+ bool ProtectBranchDelaySlot) { \
+ Name(Operand(target), COND_ARGS, ProtectBranchDelaySlot); \
}
-
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(target, rmode), cond, r1, r2);
+#define INT_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
+ bool ProtectBranchDelaySlot) { \
+ Name(Operand(target, rmode), ProtectBranchDelaySlot); \
+} \
+void MacroAssembler::Name(intptr_t target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ bool ProtectBranchDelaySlot) { \
+ Name(Operand(target, rmode), COND_ARGS, ProtectBranchDelaySlot); \
}
-
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+#define BYTE_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
+ bool ProtectBranchDelaySlot) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, ProtectBranchDelaySlot); \
+} \
+void MacroAssembler::Name(byte* target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ bool ProtectBranchDelaySlot) { \
+ Name(reinterpret_cast<intptr_t>(target), \
+ rmode, \
+ COND_ARGS, \
+ ProtectBranchDelaySlot); \
}
-
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+#define CODE_TARGET_BODY(Name) \
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
+ bool ProtectBranchDelaySlot) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), \
+ rmode, ProtectBranchDelaySlot); \
+} \
+void MacroAssembler::Name(Handle<Code> target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ bool ProtectBranchDelaySlot) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), \
+ rmode, \
+ COND_ARGS, \
+ ProtectBranchDelaySlot); \
}
+REGISTER_TARGET_BODY(Jump)
+REGISTER_TARGET_BODY(Call)
+INT_PTR_TARGET_BODY(Jump)
+INT_PTR_TARGET_BODY(Call)
+BYTE_PTR_TARGET_BODY(Jump)
+BYTE_PTR_TARGET_BODY(Call)
+CODE_TARGET_BODY(Jump)
+CODE_TARGET_BODY(Call)
-void MacroAssembler::Call(Register target,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target), cond, r1, r2);
-}
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef REGISTER_TARGET_BODY
+#undef BYTE_PTR_TARGET_BODY
+#undef CODE_TARGET_BODY
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target, rmode), cond, r1, r2);
+void MacroAssembler::Ret(bool ProtectBranchDelaySlot) {
+ Jump(Operand(ra), ProtectBranchDelaySlot);
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
+ bool ProtectBranchDelaySlot) {
+ Jump(Operand(ra), cond, r1, r2, ProtectBranchDelaySlot);
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(ra), cond, r1, r2);
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -113,12 +139,13 @@
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
- Branch(NegateCondition(cond), 2, src1, src2);
+ Branch(2, NegateCondition(cond), src1, src2);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Assert that lw generates 2 instructions? Or use a
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::RecordWrite(Register object, Register offset,
+void MacroAssembler::RecordWrite(Register object,
+ Register offset,
Register scratch) {
UNIMPLEMENTED_MIPS();
}
@@ -127,33 +154,33 @@
// ---------------------------------------------------------------------------
// Instruction macros
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- add(rd, rs, rt.rm());
+ addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addi(rd, rs, rt.imm32_);
+ addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- add(rd, rs, at);
+ addu(rd, rs, at);
}
}
}
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- addu(rd, rs, rt.rm());
+ subu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addiu(rd, rs, rt.imm32_);
+ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- addu(rd, rs, at);
+ subu(rd, rs, at);
}
}
}
@@ -223,7 +250,7 @@
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -239,7 +266,7 @@
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -255,7 +282,7 @@
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -299,7 +326,7 @@
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -313,12 +340,6 @@
//------------Pseudo-instructions-------------
-void MacroAssembler::movn(Register rd, Register rt) {
- addiu(at, zero_reg, -1); // Fill at with ones.
- xor_(rd, rt, at);
-}
-
-
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
@@ -416,11 +437,29 @@
// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-// Trashes the at register if no scratch register is provided.
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::Branch(int16_t offset,
+ bool ProtectBranchDelaySlot) {
+ b(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
@@ -482,14 +521,33 @@
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
Søren Thygesen Gjesse 2010/05/25 09:00:56 Either use {}'s or place the nop() on the same lin
+ nop();
}
-void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::Branch(Label* L,
+ bool ProtectBranchDelaySlot) {
+ // We use branch_offset as an argument for the branch instructions to be sure
+ // it is called just before generating the branch instruction, as needed.
+
+ b(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -497,71 +555,99 @@
li(r2, rt);
}
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ // Be careful to always use shifted_branch_offset only just before the branch
+ // instruction, as the location will be remember for patching the target.
Søren Thygesen Gjesse 2010/05/25 09:00:56 remember -> remembered
switch (cond) {
case cc_always:
- b(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ b(offset);
break;
case eq:
- beq(rs, r2, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
break;
case ne:
- bne(rs, r2, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
break;
case less:
slt(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
break;
case less_equal:
slt(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
break;
case Uless:
sltu(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
break;
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
}
-// Trashes the at register if no scratch register is provided.
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLink(int16_t offset,
+ bool ProtectBranchDelaySlot) {
+ bal(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -631,14 +717,30 @@
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
}
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLink(Label* L,
+ bool ProtectBranchDelaySlot) {
+ bal(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
Register r2 = no_reg;
+ Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -648,78 +750,114 @@
switch (cond) {
case cc_always:
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
}
void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ bool ProtectBranchDelaySlot) {
if (target.is_reg()) {
+ jr(target.rm());
+ } else { // !target.is_reg()
+ if (!MustUseAt(target.rmode_)) {
+ j(target.imm32_);
+ } else { // MustUseAt(target)
+ li(at, target);
+ jr(at);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::Jump(const Operand& target,
+ Condition cond, Register rs, const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Please use use either labels to jump or assert the
jr(target.rm());
}
} else { // !target.is_reg()
@@ -727,7 +865,7 @@
if (cond == cc_always) {
j(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseAt(target)
@@ -735,23 +873,44 @@
if (cond == cc_always) {
jr(at);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jr(at); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
}
void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ bool ProtectBranchDelaySlot) {
if (target.is_reg()) {
+ jalr(target.rm());
+ } else { // !target.is_reg()
+ if (!MustUseAt(target.rmode_)) {
+ jal(target.imm32_);
+ } else { // MustUseAt(target)
+ li(at, target);
+ jalr(at);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
+}
+
+
+void MacroAssembler::Call(const Operand& target,
+ Condition cond, Register rs, const Operand& rt,
+ bool ProtectBranchDelaySlot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
} else { // !target.is_reg()
@@ -759,7 +918,7 @@
if (cond == cc_always) {
jal(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jal(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseAt(target)
@@ -767,15 +926,17 @@
if (cond == cc_always) {
jalr(at);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jalr(at); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (ProtectBranchDelaySlot)
+ nop();
}
+
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
UNIMPLEMENTED_MIPS();
}
@@ -878,15 +1039,15 @@
// We check for args and receiver size on the stack, all of them word sized.
// We add one for sp, that we also want to store on the stack.
if (((arg_count + 1) % kPointerSizeLog2) == 0) {
- Branch(ne, &extra_push, at, Operand(zero_reg));
+ Branch(&extra_push, ne, scratch, Operand(zero_reg));
} else { // ((arg_count + 1) % 2) == 1
- Branch(eq, &extra_push, at, Operand(zero_reg));
+ Branch(&extra_push, eq, scratch, Operand(zero_reg));
}
// Save sp on the stack.
mov(scratch, sp);
Push(scratch);
- b(&end);
+ jmp(&end);
// Align before saving sp on the stack.
bind(&extra_push);
@@ -948,10 +1109,10 @@
}
}
} else if (actual.is_immediate()) {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
} else {
- Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
@@ -963,8 +1124,7 @@
ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
if (flag == CALL_FUNCTION) {
CallBuiltin(adaptor);
- b(done);
- nop();
+ jmp(done);
} else {
JumpToBuiltin(adaptor);
}
@@ -1051,8 +1211,8 @@
// Call and allocate arguments slots.
jalr(t9);
// Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Please add a comment that the second addiu will be
+ addiu(sp, sp, StandardFrameConstants::kBArgsSlotsSize);
}
@@ -1061,8 +1221,8 @@
// Call and allocate arguments slots.
jalr(target);
// Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
Søren Thygesen Gjesse 2010/05/25 09:00:56 Ditto.
+ addiu(sp, sp, StandardFrameConstants::kBArgsSlotsSize);
}
@@ -1074,7 +1234,7 @@
// Call and allocate arguments slots.
jr(t9);
// Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
}
@@ -1083,7 +1243,7 @@
// Call and allocate arguments slots.
jr(t9);
// Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
+ addiu(sp, sp, -StandardFrameConstants::kBArgsSlotsSize);
}
@@ -1214,13 +1374,13 @@
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
- li(t0, Operand(Smi::FromInt(type)));
- li(t1, Operand(CodeObject()));
+ li(t8, Operand(Smi::FromInt(type)));
+ li(t9, Operand(CodeObject()));
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t0, MemOperand(sp, 1 * kPointerSize));
- sw(t1, MemOperand(sp, 0 * kPointerSize));
+ sw(t8, MemOperand(sp, 1 * kPointerSize));
+ sw(t9, MemOperand(sp, 0 * kPointerSize));
addiu(fp, sp, 3 * kPointerSize);
}
@@ -1239,22 +1399,22 @@
Register hold_function) {
// Compute the argv pointer and keep it in a callee-saved register.
// a0 is argc.
- sll(t0, a0, kPointerSizeLog2);
- add(hold_argv, sp, t0);
- addi(hold_argv, hold_argv, -kPointerSize);
+ sll(t8, a0, kPointerSizeLog2);
+ addu(hold_argv, sp, t8);
+ addiu(hold_argv, hold_argv, -kPointerSize);
// Compute callee's stack pointer before making changes and save it as
- // t1 register so that it is restored as sp register on exit, thereby
+ // t9 register so that it is restored as sp register on exit, thereby
// popping the args.
- // t1 = sp + kPointerSize * #args
- add(t1, sp, t0);
+ // t9 = sp + kPointerSize * #args
+ addu(t9, sp, t8);
// Align the stack at this point.
AlignStack(0);
// Save registers.
addiu(sp, sp, -12);
- sw(t1, MemOperand(sp, 8));
+ sw(t9, MemOperand(sp, 8));
sw(ra, MemOperand(sp, 4));
sw(fp, MemOperand(sp, 0));
mov(fp, sp); // Setup new frame pointer.
@@ -1263,15 +1423,15 @@
if (mode == ExitFrame::MODE_DEBUG) {
Push(zero_reg);
} else {
- li(t0, Operand(CodeObject()));
- Push(t0);
+ li(t8, Operand(CodeObject()));
+ Push(t8);
}
// Save the frame pointer and the context in top.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(fp, MemOperand(t0));
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- sw(cp, MemOperand(t0));
+ LoadExternalReference(t8, ExternalReference(Top::k_c_entry_fp_address));
+ sw(fp, MemOperand(t8));
+ LoadExternalReference(t8, ExternalReference(Top::k_context_address));
+ sw(cp, MemOperand(t8));
// Setup argc and the builtin function in callee-saved registers.
mov(hold_argc, a0);
@@ -1281,14 +1441,14 @@
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
// Clear top frame.
- LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
- sw(zero_reg, MemOperand(t0));
+ LoadExternalReference(t8, ExternalReference(Top::k_c_entry_fp_address));
+ sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- LoadExternalReference(t0, ExternalReference(Top::k_context_address));
- lw(cp, MemOperand(t0));
+ LoadExternalReference(t8, ExternalReference(Top::k_context_address));
+ lw(cp, MemOperand(t8));
#ifdef DEBUG
- sw(a3, MemOperand(t0));
+ sw(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
@@ -1304,17 +1464,29 @@
void MacroAssembler::AlignStack(int offset) {
// On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
// and an offset of 1 aligns to 4 modulo 8 bytes.
+#if defined(V8_HOST_ARCH_MIPS)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one MIPS
+ // platform for another MIPS platform with a different alignment.
int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_MIPS)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_MIPS)
if (activation_frame_alignment != kPointerSize) {
// This code needs to be made more general if this assert doesn't hold.
ASSERT(activation_frame_alignment == 2 * kPointerSize);
if (offset == 0) {
- andi(t0, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t0, zero_reg);
+ andi(t8, sp, activation_frame_alignment - 1);
+ Push(zero_reg, eq, t8, zero_reg);
} else {
- andi(t0, sp, activation_frame_alignment - 1);
- addiu(t0, t0, -4);
- Push(zero_reg, eq, t0, zero_reg);
+ andi(t8, sp, activation_frame_alignment - 1);
+ addiu(t8, t8, -4);
+ Push(zero_reg, eq, t8, zero_reg);
}
}
}

Powered by Google App Engine
This is Rietveld 408576698