Index: src/mips/macro-assembler-mips.cc |
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc |
index 02ece8c56ca8954ee8b9771975e21cf9b70b971e..bd4ab480a5938a1b9cf6fe80a18320c73716a868 100644 |
--- a/src/mips/macro-assembler-mips.cc |
+++ b/src/mips/macro-assembler-mips.cc |
@@ -1,4 +1,4 @@ |
-// Copyright 2010 the V8 project authors. All rights reserved. |
+// Copyright 2011 the V8 project authors. All rights reserved. |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
// met: |
@@ -25,7 +25,7 @@ |
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
- |
+#include <limits.h> // For LONG_MIN, LONG_MAX |
#include "v8.h" |
@@ -41,68 +41,90 @@ namespace internal { |
MacroAssembler::MacroAssembler(void* buffer, int size) |
: Assembler(buffer, size), |
- unresolved_(0), |
generating_stub_(false), |
allow_stub_calls_(true), |
- code_object_(Heap::undefined_value()) { |
+ code_object_(HEAP->undefined_value()) { |
} |
+// Arguments macros |
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 |
+#define COND_ARGS cond, r1, r2 |
-void MacroAssembler::Jump(Register target, Condition cond, |
- Register r1, const Operand& r2) { |
- Jump(Operand(target), cond, r1, r2); |
-} |
- |
- |
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- Jump(Operand(target, rmode), cond, r1, r2); |
+#define REGISTER_TARGET_BODY(Name) \ |
+void MacroAssembler::Name(Register target, \ |
+ BranchDelaySlot bd) { \ |
+ Name(Operand(target), bd); \ |
+} \ |
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \ |
+ BranchDelaySlot bd) { \ |
+ Name(Operand(target), COND_ARGS, bd); \ |
} |
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
+#define INT_PTR_TARGET_BODY(Name) \ |
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \ |
+ BranchDelaySlot bd) { \ |
+ Name(Operand(target, rmode), bd); \ |
+} \ |
+void MacroAssembler::Name(intptr_t target, \ |
+ RelocInfo::Mode rmode, \ |
+ COND_TYPED_ARGS, \ |
+ BranchDelaySlot bd) { \ |
+ Name(Operand(target, rmode), COND_ARGS, bd); \ |
} |
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- ASSERT(RelocInfo::IsCodeTarget(rmode)); |
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
+#define BYTE_PTR_TARGET_BODY(Name) \ |
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \ |
+ BranchDelaySlot bd) { \ |
+ Name(reinterpret_cast<intptr_t>(target), rmode, bd); \ |
+} \ |
+void MacroAssembler::Name(byte* target, \ |
+ RelocInfo::Mode rmode, \ |
+ COND_TYPED_ARGS, \ |
+ BranchDelaySlot bd) { \ |
+ Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \ |
} |
-void MacroAssembler::Call(Register target, |
- Condition cond, Register r1, const Operand& r2) { |
- Call(Operand(target), cond, r1, r2); |
+#define CODE_TARGET_BODY(Name) \ |
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \ |
+ BranchDelaySlot bd) { \ |
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \ |
+} \ |
+void MacroAssembler::Name(Handle<Code> target, \ |
+ RelocInfo::Mode rmode, \ |
+ COND_TYPED_ARGS, \ |
+ BranchDelaySlot bd) { \ |
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \ |
} |
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- Call(Operand(target, rmode), cond, r1, r2); |
-} |
- |
+REGISTER_TARGET_BODY(Jump) |
+REGISTER_TARGET_BODY(Call) |
+INT_PTR_TARGET_BODY(Jump) |
+INT_PTR_TARGET_BODY(Call) |
+BYTE_PTR_TARGET_BODY(Jump) |
+BYTE_PTR_TARGET_BODY(Call) |
+CODE_TARGET_BODY(Jump) |
+CODE_TARGET_BODY(Call) |
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
- Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
-} |
+#undef COND_TYPED_ARGS |
+#undef COND_ARGS |
+#undef REGISTER_TARGET_BODY |
+#undef BYTE_PTR_TARGET_BODY |
+#undef CODE_TARGET_BODY |
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, |
- Condition cond, Register r1, const Operand& r2) { |
- ASSERT(RelocInfo::IsCodeTarget(rmode)); |
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); |
+void MacroAssembler::Ret(BranchDelaySlot bd) { |
+ Jump(Operand(ra), bd); |
} |
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { |
- Jump(Operand(ra), cond, r1, r2); |
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2, |
+ BranchDelaySlot bd) { |
+ Jump(Operand(ra), cond, r1, r2, bd); |
} |
@@ -111,51 +133,248 @@ void MacroAssembler::LoadRoot(Register destination, |
lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
} |
+ |
void MacroAssembler::LoadRoot(Register destination, |
Heap::RootListIndex index, |
Condition cond, |
Register src1, const Operand& src2) { |
- Branch(NegateCondition(cond), 2, src1, src2); |
+ Branch(2, NegateCondition(cond), src1, src2); |
lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
} |
-void MacroAssembler::RecordWrite(Register object, Register offset, |
+void MacroAssembler::StoreRoot(Register source, |
+ Heap::RootListIndex index) { |
+ sw(source, MemOperand(s6, index << kPointerSizeLog2)); |
+} |
+ |
+ |
+void MacroAssembler::StoreRoot(Register source, |
+ Heap::RootListIndex index, |
+ Condition cond, |
+ Register src1, const Operand& src2) { |
+ Branch(2, NegateCondition(cond), src1, src2); |
+ sw(source, MemOperand(s6, index << kPointerSizeLog2)); |
+} |
+ |
+ |
+void MacroAssembler::RecordWriteHelper(Register object, |
+ Register address, |
+ Register scratch) { |
+ if (FLAG_debug_code) { |
+ // Check that the object is not in new space. |
+ Label not_in_new_space; |
+ InNewSpace(object, scratch, ne, ¬_in_new_space); |
+ Abort("new-space object passed to RecordWriteHelper"); |
+ bind(¬_in_new_space); |
+ } |
+ |
+ // Calculate page address: Clear bits from 0 to kPageSizeBits. |
+ if (mips32r2) { |
+ Ins(object, zero_reg, 0, kPageSizeBits); |
+ } else { |
+ // The Ins macro is slow on r1, so use shifts instead. |
+ srl(object, object, kPageSizeBits); |
+ sll(object, object, kPageSizeBits); |
+ } |
+ |
+ // Calculate region number. |
+ Ext(address, address, Page::kRegionSizeLog2, |
+ kPageSizeBits - Page::kRegionSizeLog2); |
+ |
+ // Mark region dirty. |
+ lw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); |
+ li(at, Operand(1)); |
+ sllv(at, at, address); |
+ or_(scratch, scratch, at); |
+ sw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); |
+} |
+ |
+ |
+void MacroAssembler::InNewSpace(Register object, |
+ Register scratch, |
+ Condition cc, |
+ Label* branch) { |
+ ASSERT(cc == eq || cc == ne); |
+ And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); |
+ Branch(branch, cc, scratch, |
+ Operand(ExternalReference::new_space_start(isolate()))); |
+} |
+ |
+ |
+// Will clobber 4 registers: object, scratch0, scratch1, at. The |
+// register 'object' contains a heap object pointer. The heap object |
+// tag is shifted away. |
+void MacroAssembler::RecordWrite(Register object, |
+ Operand offset, |
+ Register scratch0, |
+ Register scratch1) { |
+ // The compiled code assumes that record write doesn't change the |
+ // context register, so we check that none of the clobbered |
+ // registers are cp. |
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); |
+ |
+ Label done; |
+ |
+ // First, test that the object is not in the new space. We cannot set |
+ // region marks for new space pages. |
+ InNewSpace(object, scratch0, eq, &done); |
+ |
+ // Add offset into the object. |
+ Addu(scratch0, object, offset); |
+ |
+ // Record the actual write. |
+ RecordWriteHelper(object, scratch0, scratch1); |
+ |
+ bind(&done); |
+ |
+ // Clobber all input registers when running with the debug-code flag |
+ // turned on to provoke errors. |
+ if (FLAG_debug_code) { |
+ li(object, Operand(BitCast<int32_t>(kZapValue))); |
+ li(scratch0, Operand(BitCast<int32_t>(kZapValue))); |
+ li(scratch1, Operand(BitCast<int32_t>(kZapValue))); |
+ } |
+} |
+ |
+ |
+// Will clobber 4 registers: object, address, scratch, ip. The |
+// register 'object' contains a heap object pointer. The heap object |
+// tag is shifted away. |
+void MacroAssembler::RecordWrite(Register object, |
+ Register address, |
Register scratch) { |
- UNIMPLEMENTED_MIPS(); |
+ // The compiled code assumes that record write doesn't change the |
+ // context register, so we check that none of the clobbered |
+ // registers are cp. |
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); |
+ |
+ Label done; |
+ |
+ // First, test that the object is not in the new space. We cannot set |
+ // region marks for new space pages. |
+ InNewSpace(object, scratch, eq, &done); |
+ |
+ // Record the actual write. |
+ RecordWriteHelper(object, address, scratch); |
+ |
+ bind(&done); |
+ |
+ // Clobber all input registers when running with the debug-code flag |
+ // turned on to provoke errors. |
+ if (FLAG_debug_code) { |
+ li(object, Operand(BitCast<int32_t>(kZapValue))); |
+ li(address, Operand(BitCast<int32_t>(kZapValue))); |
+ li(scratch, Operand(BitCast<int32_t>(kZapValue))); |
+ } |
+} |
+ |
+ |
+// ----------------------------------------------------------------------------- |
+// Allocation support |
+ |
+ |
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
+ Register scratch, |
+ Label* miss) { |
+ Label same_contexts; |
+ |
+ ASSERT(!holder_reg.is(scratch)); |
+ ASSERT(!holder_reg.is(at)); |
+ ASSERT(!scratch.is(at)); |
+ |
+ // Load current lexical context from the stack frame. |
+ lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ // In debug mode, make sure the lexical context is set. |
+#ifdef DEBUG |
+ Check(ne, "we should not have an empty lexical context", |
+ scratch, Operand(zero_reg)); |
+#endif |
+ |
+ // Load the global context of the current context. |
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; |
+ lw(scratch, FieldMemOperand(scratch, offset)); |
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); |
+ |
+ // Check the context is a global context. |
+ if (FLAG_debug_code) { |
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg). |
+ Push(holder_reg); // Temporarily save holder on the stack. |
+ // Read the first word and compare to the global_context_map. |
+ lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex); |
+ Check(eq, "JSGlobalObject::global_context should be a global context.", |
+ holder_reg, Operand(at)); |
+ Pop(holder_reg); // Restore holder. |
+ } |
+ |
+ // Check if both contexts are the same. |
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
+ Branch(&same_contexts, eq, scratch, Operand(at)); |
+ |
+ // Check the context is a global context. |
+ if (FLAG_debug_code) { |
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg). |
+ Push(holder_reg); // Temporarily save holder on the stack. |
+ mov(holder_reg, at); // Move at to its holding place. |
+ LoadRoot(at, Heap::kNullValueRootIndex); |
+ Check(ne, "JSGlobalProxy::context() should not be null.", |
+ holder_reg, Operand(at)); |
+ |
+ lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex); |
+ Check(eq, "JSGlobalObject::global_context should be a global context.", |
+ holder_reg, Operand(at)); |
+ // Restore at is not needed. at is reloaded below. |
+ Pop(holder_reg); // Restore holder. |
+ // Restore at to holder's context. |
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
+ } |
+ |
+ // Check that the security token in the calling global object is |
+ // compatible with the security token in the receiving global |
+ // object. |
+ int token_offset = Context::kHeaderSize + |
+ Context::SECURITY_TOKEN_INDEX * kPointerSize; |
+ |
+ lw(scratch, FieldMemOperand(scratch, token_offset)); |
+ lw(at, FieldMemOperand(at, token_offset)); |
+ Branch(miss, ne, scratch, Operand(at)); |
+ |
+ bind(&same_contexts); |
} |
// --------------------------------------------------------------------------- |
// Instruction macros |
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) { |
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
- add(rd, rs, rt.rm()); |
+ addu(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
- addi(rd, rs, rt.imm32_); |
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
+ addiu(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
ASSERT(!rs.is(at)); |
li(at, rt); |
- add(rd, rs, at); |
+ addu(rd, rs, at); |
} |
} |
} |
-void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { |
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
- addu(rd, rs, rt.rm()); |
+ subu(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
- addiu(rd, rs, rt.imm32_); |
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
+ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm). |
} else { |
// li handles the relocation. |
ASSERT(!rs.is(at)); |
li(at, rt); |
- addu(rd, rs, at); |
+ subu(rd, rs, at); |
} |
} |
} |
@@ -225,7 +444,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
and_(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
andi(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
@@ -241,7 +460,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
or_(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
ori(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
@@ -257,7 +476,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
xor_(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
xori(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
@@ -285,7 +504,7 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
slt(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
slti(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
@@ -301,7 +520,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
sltu(rd, rs, rt.rm()); |
} else { |
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) { |
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { |
sltiu(rd, rs, rt.imm32_); |
} else { |
// li handles the relocation. |
@@ -313,31 +532,51 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { |
} |
-//------------Pseudo-instructions------------- |
- |
-void MacroAssembler::movn(Register rd, Register rt) { |
- addiu(at, zero_reg, -1); // Fill at with ones. |
- xor_(rd, rt, at); |
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { |
+ if (mips32r2) { |
+ if (rt.is_reg()) { |
+ rotrv(rd, rs, rt.rm()); |
+ } else { |
+ rotr(rd, rs, rt.imm32_); |
+ } |
+ } else { |
+ if (rt.is_reg()) { |
+ subu(at, zero_reg, rt.rm()); |
+ sllv(at, rs, at); |
+ srlv(rd, rs, rt.rm()); |
+ or_(rd, rd, at); |
+ } else { |
+ if (rt.imm32_ == 0) { |
+ srl(rd, rs, 0); |
+ } else { |
+ srl(at, rs, rt.imm32_); |
+ sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); |
+ or_(rd, rd, at); |
+ } |
+ } |
+ } |
} |
+//------------Pseudo-instructions------------- |
+ |
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { |
ASSERT(!j.is_reg()); |
- |
- if (!MustUseAt(j.rmode_) && !gen2instr) { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ if (!MustUseReg(j.rmode_) && !gen2instr) { |
// Normal load of an immediate value which does not need Relocation Info. |
if (is_int16(j.imm32_)) { |
addiu(rd, zero_reg, j.imm32_); |
- } else if (!(j.imm32_ & HIMask)) { |
+ } else if (!(j.imm32_ & kHiMask)) { |
ori(rd, zero_reg, j.imm32_); |
- } else if (!(j.imm32_ & LOMask)) { |
- lui(rd, (HIMask & j.imm32_) >> 16); |
+ } else if (!(j.imm32_ & kImm16Mask)) { |
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
} else { |
- lui(rd, (HIMask & j.imm32_) >> 16); |
- ori(rd, rd, (LOMask & j.imm32_)); |
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
+ ori(rd, rd, (j.imm32_ & kImm16Mask)); |
} |
- } else if (MustUseAt(j.rmode_) || gen2instr) { |
- if (MustUseAt(j.rmode_)) { |
+ } else if (MustUseReg(j.rmode_) || gen2instr) { |
+ if (MustUseReg(j.rmode_)) { |
RecordRelocInfo(j.rmode_, j.imm32_); |
} |
// We need always the same number of instructions as we may need to patch |
@@ -345,15 +584,15 @@ void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { |
if (is_int16(j.imm32_)) { |
nop(); |
addiu(rd, zero_reg, j.imm32_); |
- } else if (!(j.imm32_ & HIMask)) { |
+ } else if (!(j.imm32_ & kHiMask)) { |
nop(); |
ori(rd, zero_reg, j.imm32_); |
- } else if (!(j.imm32_ & LOMask)) { |
+ } else if (!(j.imm32_ & kImm16Mask)) { |
nop(); |
- lui(rd, (HIMask & j.imm32_) >> 16); |
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
} else { |
- lui(rd, (HIMask & j.imm32_) >> 16); |
- ori(rd, rd, (LOMask & j.imm32_)); |
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift); |
+ ori(rd, rd, (j.imm32_ & kImm16Mask)); |
} |
} |
} |
@@ -417,153 +656,772 @@ void MacroAssembler::MultiPopReversed(RegList regs) { |
} |
-// Emulated condtional branches do not emit a nop in the branch delay slot. |
+void MacroAssembler::Ext(Register rt, |
+ Register rs, |
+ uint16_t pos, |
+ uint16_t size) { |
+ ASSERT(pos < 32); |
+ ASSERT(pos + size < 32); |
-// Trashes the at register if no scratch register is provided. |
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs, |
- const Operand& rt, Register scratch) { |
- Register r2 = no_reg; |
- if (rt.is_reg()) { |
- // We don't want any other register but scratch clobbered. |
- ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); |
- r2 = rt.rm_; |
- } else if (cond != cc_always) { |
- // We don't want any other register but scratch clobbered. |
- ASSERT(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
+ if (mips32r2) { |
+ ext_(rt, rs, pos, size); |
+ } else { |
+ // Move rs to rt and shift it left then right to get the |
+ // desired bitfield on the right side and zeroes on the left. |
+ sll(rt, rs, 32 - (pos + size)); |
+ srl(rt, rt, 32 - size); |
} |
+} |
- switch (cond) { |
- case cc_always: |
- b(offset); |
- break; |
- case eq: |
- beq(rs, r2, offset); |
- break; |
- case ne: |
- bne(rs, r2, offset); |
- break; |
- // Signed comparison |
- case greater: |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- break; |
- case greater_equal: |
- slt(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- break; |
- case less: |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- break; |
- case less_equal: |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- break; |
+void MacroAssembler::Ins(Register rt, |
+ Register rs, |
+ uint16_t pos, |
+ uint16_t size) { |
+ ASSERT(pos < 32); |
+ ASSERT(pos + size < 32); |
- // Unsigned comparison. |
- case Ugreater: |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- break; |
- case Ugreater_equal: |
- sltu(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- break; |
- case Uless: |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- break; |
- case Uless_equal: |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- break; |
+ if (mips32r2) { |
+ ins_(rt, rs, pos, size); |
+ } else { |
+ ASSERT(!rt.is(t8) && !rs.is(t8)); |
+ |
+ srl(t8, rt, pos + size); |
+ // The left chunk from rt that needs to |
+ // be saved is on the right side of t8. |
+ sll(at, t8, pos + size); |
+ // The 'at' register now contains the left chunk on |
+ // the left (proper position) and zeroes. |
+ sll(t8, rt, 32 - pos); |
+ // t8 now contains the right chunk on the left and zeroes. |
+ srl(t8, t8, 32 - pos); |
+ // t8 now contains the right chunk on |
+ // the right (proper position) and zeroes. |
+ or_(rt, at, t8); |
+ // rt now contains the left and right chunks from the original rt |
+ // in their proper position and zeroes in the middle. |
+ sll(t8, rs, 32 - size); |
+ // t8 now contains the chunk from rs on the left and zeroes. |
+ srl(t8, t8, 32 - size - pos); |
+ // t8 now contains the original chunk from rs in |
+ // the middle (proper position). |
+ or_(rt, rt, t8); |
+ // rt now contains the result of the ins instruction in R2 mode. |
+ } |
+} |
- default: |
- UNREACHABLE(); |
+ |
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) { |
+ // Move the data from fs to t4. |
+ mfc1(t4, fs); |
+ return Cvt_d_uw(fd, t4); |
+} |
+ |
+ |
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { |
+ // Convert rs to a FP value in fd (and fd + 1). |
+ // We do this by converting rs minus the MSB to avoid sign conversion, |
+ // then adding 2^31-1 and 1 to the result. |
+ |
+ ASSERT(!fd.is(f20)); |
+ ASSERT(!rs.is(t9)); |
+ ASSERT(!rs.is(t8)); |
+ |
+ // Save rs's MSB to t8 |
+ And(t8, rs, 0x80000000); |
+ // Remove rs's MSB. |
+ And(t9, rs, 0x7FFFFFFF); |
+ // Move t9 to fd |
+ mtc1(t9, fd); |
+ |
+ // Convert fd to a real FP value. |
+ cvt_d_w(fd, fd); |
+ |
+ Label conversion_done; |
+ |
+ // If rs's MSB was 0, it's done. |
+ // Otherwise we need to add that to the FP register. |
+ Branch(&conversion_done, eq, t8, Operand(zero_reg)); |
+ |
+ // First load 2^31 - 1 into f20. |
+ Or(t9, zero_reg, 0x7FFFFFFF); |
+ mtc1(t9, f20); |
+ |
+ // Convert it to FP and add it to fd. |
+ cvt_d_w(f20, f20); |
+ add_d(fd, fd, f20); |
+ // Now add 1. |
+ Or(t9, zero_reg, 1); |
+ mtc1(t9, f20); |
+ |
+ cvt_d_w(f20, f20); |
+ add_d(fd, fd, f20); |
+ bind(&conversion_done); |
+} |
+ |
+ |
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) { |
+ Trunc_uw_d(fs, t4); |
+ mtc1(t4, fd); |
+} |
+ |
+ |
+void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) { |
+ ASSERT(!fd.is(f22)); |
+ ASSERT(!rs.is(t6)); |
+ |
+ // Load 2^31 into f22. |
+ Or(t6, zero_reg, 0x80000000); |
+ Cvt_d_uw(f22, t6); |
+ |
+ // Test if f22 > fd. |
+ c(OLT, D, fd, f22); |
+ |
+ Label simple_convert; |
+ // If fd < 2^31 we can convert it normally. |
+ bc1t(&simple_convert); |
+ |
+ // First we subtract 2^31 from fd, then trunc it to rs |
+ // and add 2^31 to rs. |
+ |
+ sub_d(f22, fd, f22); |
+ trunc_w_d(f22, f22); |
+ mfc1(rs, f22); |
+ or_(rs, rs, t6); |
+ |
+ Label done; |
+ Branch(&done); |
+ // Simple conversion. |
+ bind(&simple_convert); |
+ trunc_w_d(f22, fd); |
+ mfc1(rs, f22); |
+ |
+ bind(&done); |
+} |
+ |
+ |
+// Tries to get a signed int32 out of a double precision floating point heap |
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the |
+// 32bits signed integer range. |
+// This method implementation differs from the ARM version for performance |
+// reasons. |
+void MacroAssembler::ConvertToInt32(Register source, |
+ Register dest, |
+ Register scratch, |
+ Register scratch2, |
+ FPURegister double_scratch, |
+ Label *not_int32) { |
+ Label right_exponent, done; |
+ // Get exponent word (ENDIAN issues). |
+ lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); |
+ // Get exponent alone in scratch2. |
+ And(scratch2, scratch, Operand(HeapNumber::kExponentMask)); |
+ // Load dest with zero. We use this either for the final shift or |
+ // for the answer. |
+ mov(dest, zero_reg); |
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi. |
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is |
+ // the exponent that we are fastest at and also the highest exponent we can |
+ // handle here. |
+ const uint32_t non_smi_exponent = |
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic. |
+ Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent)); |
+ // If the exponent is higher than that then go to not_int32 case. This |
+ // catches numbers that don't fit in a signed int32, infinities and NaNs. |
+ Branch(not_int32, gt, scratch2, Operand(non_smi_exponent)); |
+ |
+ // We know the exponent is smaller than 30 (biased). If it is less than |
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
+ // it rounds to zero. |
+ const uint32_t zero_exponent = |
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; |
+ Subu(scratch2, scratch2, Operand(zero_exponent)); |
+ // Dest already has a Smi zero. |
+ Branch(&done, lt, scratch2, Operand(zero_reg)); |
+ if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) { |
+ // We have a shifted exponent between 0 and 30 in scratch2. |
+ srl(dest, scratch2, HeapNumber::kExponentShift); |
+ // We now have the exponent in dest. Subtract from 30 to get |
+ // how much to shift down. |
+ li(at, Operand(30)); |
+ subu(dest, at, dest); |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ bind(&right_exponent); |
+ if (Isolate::Current()->cpu_features()->IsSupported(FPU)) { |
+ CpuFeatures::Scope scope(FPU); |
+ // MIPS FPU instructions implementing double precision to integer |
+ // conversion using round to zero. Since the FP value was qualified |
+ // above, the resulting integer should be a legal int32. |
+ // The original 'Exponent' word is still in scratch. |
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1)); |
+ trunc_w_d(double_scratch, double_scratch); |
+ mfc1(dest, double_scratch); |
+ } else { |
+ // On entry, dest has final downshift, scratch has original sign/exp/mant. |
+ // Save sign bit in top bit of dest. |
+ And(scratch2, scratch, Operand(0x80000000)); |
+ Or(dest, dest, Operand(scratch2)); |
+ // Put back the implicit 1, just above mantissa field. |
+ Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift)); |
+ |
+ // Shift up the mantissa bits to take up the space the exponent used to |
+ // take. We just orred in the implicit bit so that took care of one and |
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift |
+ // distance. But we want to clear the sign-bit so shift one more bit |
+ // left, then shift right one bit. |
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
+ sll(scratch, scratch, shift_distance + 1); |
+ srl(scratch, scratch, 1); |
+ |
+ // Get the second half of the double. For some exponents we don't |
+ // actually need this because the bits get shifted out again, but |
+ // it's probably slower to test than just to do it. |
+ lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
+ // Extract the top 10 bits, and insert those bottom 10 bits of scratch. |
+ // The width of the field here is the same as the shift amount above. |
+ const int field_width = shift_distance; |
+ Ext(scratch2, scratch2, 32-shift_distance, field_width); |
+ Ins(scratch, scratch2, 0, field_width); |
+ // Move down according to the exponent. |
+ srlv(scratch, scratch, dest); |
+ // Prepare the negative version of our integer. |
+ subu(scratch2, zero_reg, scratch); |
+ // Trick to check sign bit (msb) held in dest, count leading zero. |
+ // 0 indicates negative, save negative version with conditional move. |
+ clz(dest, dest); |
+ movz(scratch, scratch2, dest); |
+ mov(dest, scratch); |
+ } |
+ bind(&done); |
+} |
+ |
+ |
+// Emulated condtional branches do not emit a nop in the branch delay slot. |
+// |
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. |
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ |
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ |
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) |
+ |
+ |
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { |
+ b(offset); |
+ |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
-void MacroAssembler::Branch(Condition cond, Label* L, Register rs, |
- const Operand& rt, Register scratch) { |
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
+ ASSERT(!rs.is(zero_reg)); |
Register r2 = no_reg; |
+ Register scratch = at; |
+ |
if (rt.is_reg()) { |
+ // We don't want any other register but scratch clobbered. |
+ ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); |
r2 = rt.rm_; |
- } else if (cond != cc_always) { |
- r2 = scratch; |
- li(r2, rt); |
+ switch (cond) { |
+ case cc_always: |
+ b(offset); |
+ break; |
+ case eq: |
+ beq(rs, r2, offset); |
+ break; |
+ case ne: |
+ bne(rs, r2, offset); |
+ break; |
+ // Signed comparison |
+ case greater: |
+ if (r2.is(zero_reg)) { |
+ bgtz(rs, offset); |
+ } else { |
+ slt(scratch, r2, rs); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case greater_equal: |
+ if (r2.is(zero_reg)) { |
+ bgez(rs, offset); |
+ } else { |
+ slt(scratch, rs, r2); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less: |
+ if (r2.is(zero_reg)) { |
+ bltz(rs, offset); |
+ } else { |
+ slt(scratch, rs, r2); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less_equal: |
+ if (r2.is(zero_reg)) { |
+ blez(rs, offset); |
+ } else { |
+ slt(scratch, r2, rs); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ // Unsigned comparison. |
+ case Ugreater: |
+ if (r2.is(zero_reg)) { |
+ bgtz(rs, offset); |
+ } else { |
+ sltu(scratch, r2, rs); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Ugreater_equal: |
+ if (r2.is(zero_reg)) { |
+ bgez(rs, offset); |
+ } else { |
+ sltu(scratch, rs, r2); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless: |
+ if (r2.is(zero_reg)) { |
+ b(offset); |
+ } else { |
+ sltu(scratch, rs, r2); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless_equal: |
+ if (r2.is(zero_reg)) { |
+ b(offset); |
+ } else { |
+ sltu(scratch, r2, rs); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ } else { |
+ // Be careful to always use shifted_branch_offset only just before the |
+ // branch instruction, as the location will be remember for patching the |
+ // target. |
+ switch (cond) { |
+ case cc_always: |
+ b(offset); |
+ break; |
+ case eq: |
+ // We don't want any other register but scratch clobbered. |
+ ASSERT(!scratch.is(rs)); |
+ r2 = scratch; |
+ li(r2, rt); |
+ beq(rs, r2, offset); |
+ break; |
+ case ne: |
+ // We don't want any other register but scratch clobbered. |
+ ASSERT(!scratch.is(rs)); |
+ r2 = scratch; |
+ li(r2, rt); |
+ bne(rs, r2, offset); |
+ break; |
+ // Signed comparison |
+ case greater: |
+ if (rt.imm32_ == 0) { |
+ bgtz(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, r2, rs); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case greater_equal: |
+ if (rt.imm32_ == 0) { |
+ bgez(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ slti(scratch, rs, rt.imm32_); |
+ beq(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less: |
+ if (rt.imm32_ == 0) { |
+ bltz(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ slti(scratch, rs, rt.imm32_); |
+ bne(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, rs, r2); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less_equal: |
+ if (rt.imm32_ == 0) { |
+ blez(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, r2, rs); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ // Unsigned comparison. |
+ case Ugreater: |
+ if (rt.imm32_ == 0) { |
+ bgtz(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, r2, rs); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Ugreater_equal: |
+ if (rt.imm32_ == 0) { |
+ bgez(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ sltiu(scratch, rs, rt.imm32_); |
+ beq(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless: |
+ if (rt.imm32_ == 0) { |
+ b(offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ sltiu(scratch, rs, rt.imm32_); |
+ bne(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless_equal: |
+ if (rt.imm32_ == 0) { |
+ b(offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, r2, rs); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
} |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
+ |
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { |
// We use branch_offset as an argument for the branch instructions to be sure |
// it is called just before generating the branch instruction, as needed. |
- switch (cond) { |
- case cc_always: |
- b(shifted_branch_offset(L, false)); |
- break; |
- case eq: |
- beq(rs, r2, shifted_branch_offset(L, false)); |
- break; |
- case ne: |
- bne(rs, r2, shifted_branch_offset(L, false)); |
- break; |
+ b(shifted_branch_offset(L, false)); |
- // Signed comparison |
- case greater: |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case greater_equal: |
- slt(scratch, rs, r2); |
- beq(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case less: |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case less_equal: |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
- // Unsigned comparison. |
- case Ugreater: |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case Ugreater_equal: |
- sltu(scratch, rs, r2); |
- beq(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case Uless: |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- case Uless_equal: |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, shifted_branch_offset(L, false)); |
- break; |
- default: |
- UNREACHABLE(); |
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
+ |
+ int32_t offset; |
+ Register r2 = no_reg; |
+ Register scratch = at; |
+ if (rt.is_reg()) { |
+ r2 = rt.rm_; |
+ // Be careful to always use shifted_branch_offset only just before the |
+ // branch instruction, as the location will be remember for patching the |
+ // target. |
+ switch (cond) { |
+ case cc_always: |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ break; |
+ case eq: |
+ offset = shifted_branch_offset(L, false); |
+ beq(rs, r2, offset); |
+ break; |
+ case ne: |
+ offset = shifted_branch_offset(L, false); |
+ bne(rs, r2, offset); |
+ break; |
+ // Signed comparison |
+ case greater: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ bgtz(rs, offset); |
+ } else { |
+ slt(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case greater_equal: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ bgez(rs, offset); |
+ } else { |
+ slt(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ bltz(rs, offset); |
+ } else { |
+ slt(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less_equal: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ blez(rs, offset); |
+ } else { |
+ slt(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ // Unsigned comparison. |
+ case Ugreater: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ bgtz(rs, offset); |
+ } else { |
+ sltu(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Ugreater_equal: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ bgez(rs, offset); |
+ } else { |
+ sltu(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ } else { |
+ sltu(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless_equal: |
+ if (r2.is(zero_reg)) { |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ } else { |
+ sltu(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ } else { |
+ // Be careful to always use shifted_branch_offset only just before the |
+ // branch instruction, as the location will be remember for patching the |
+ // target. |
+ switch (cond) { |
+ case cc_always: |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ break; |
+ case eq: |
+ r2 = scratch; |
+ li(r2, rt); |
+ offset = shifted_branch_offset(L, false); |
+ beq(rs, r2, offset); |
+ break; |
+ case ne: |
+ r2 = scratch; |
+ li(r2, rt); |
+ offset = shifted_branch_offset(L, false); |
+ bne(rs, r2, offset); |
+ break; |
+ // Signed comparison |
+ case greater: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ bgtz(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case greater_equal: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ bgez(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ slti(scratch, rs, rt.imm32_); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ bltz(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ slti(scratch, rs, rt.imm32_); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case less_equal: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ blez(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ slt(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ // Unsigned comparison. |
+ case Ugreater: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ bgtz(rs, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Ugreater_equal: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ bgez(rs, offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ sltiu(scratch, rs, rt.imm32_); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ } else if (is_int16(rt.imm32_)) { |
+ sltiu(scratch, rs, rt.imm32_); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, rs, r2); |
+ offset = shifted_branch_offset(L, false); |
+ bne(scratch, zero_reg, offset); |
+ } |
+ break; |
+ case Uless_equal: |
+ if (rt.imm32_ == 0) { |
+ offset = shifted_branch_offset(L, false); |
+ b(offset); |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ sltu(scratch, r2, rs); |
+ offset = shifted_branch_offset(L, false); |
+ beq(scratch, zero_reg, offset); |
+ } |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ // Check that offset could actually hold on an int16_t. |
+ ASSERT(is_int16(offset)); |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
-// Trashes the at register if no scratch register is provided. |
// We need to use a bgezal or bltzal, but they can't be used directly with the |
// slt instructions. We could use sub or add instead but we would miss overflow |
// cases, so we keep slt and add an intermediate third instruction. |
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, |
- const Operand& rt, Register scratch) { |
+void MacroAssembler::BranchAndLink(int16_t offset, |
+ BranchDelaySlot bdslot) { |
+ bal(offset); |
+ |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
+ |
+ |
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
Register r2 = no_reg; |
+ Register scratch = at; |
+ |
if (rt.is_reg()) { |
r2 = rt.rm_; |
} else if (cond != cc_always) { |
@@ -633,14 +1491,29 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, |
default: |
UNREACHABLE(); |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
+ |
+ |
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { |
+ bal(shifted_branch_offset(L, false)); |
+ |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, |
- const Operand& rt, Register scratch) { |
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
+ |
+ int32_t offset; |
Register r2 = no_reg; |
+ Register scratch = at; |
if (rt.is_reg()) { |
r2 = rt.rm_; |
} else if (cond != cc_always) { |
@@ -650,157 +1523,280 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, |
switch (cond) { |
case cc_always: |
- bal(shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bal(offset); |
break; |
case eq: |
bne(rs, r2, 2); |
nop(); |
- bal(shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bal(offset); |
break; |
case ne: |
beq(rs, r2, 2); |
nop(); |
- bal(shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bal(offset); |
break; |
// Signed comparison |
case greater: |
slt(scratch, r2, rs); |
addiu(scratch, scratch, -1); |
- bgezal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bgezal(scratch, offset); |
break; |
case greater_equal: |
slt(scratch, rs, r2); |
addiu(scratch, scratch, -1); |
- bltzal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bltzal(scratch, offset); |
break; |
case less: |
slt(scratch, rs, r2); |
addiu(scratch, scratch, -1); |
- bgezal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bgezal(scratch, offset); |
break; |
case less_equal: |
slt(scratch, r2, rs); |
addiu(scratch, scratch, -1); |
- bltzal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bltzal(scratch, offset); |
break; |
// Unsigned comparison. |
case Ugreater: |
sltu(scratch, r2, rs); |
addiu(scratch, scratch, -1); |
- bgezal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bgezal(scratch, offset); |
break; |
case Ugreater_equal: |
sltu(scratch, rs, r2); |
addiu(scratch, scratch, -1); |
- bltzal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bltzal(scratch, offset); |
break; |
case Uless: |
sltu(scratch, rs, r2); |
addiu(scratch, scratch, -1); |
- bgezal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bgezal(scratch, offset); |
break; |
case Uless_equal: |
sltu(scratch, r2, rs); |
addiu(scratch, scratch, -1); |
- bltzal(scratch, shifted_branch_offset(L, false)); |
+ offset = shifted_branch_offset(L, false); |
+ bltzal(scratch, offset); |
break; |
default: |
UNREACHABLE(); |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ |
+ // Check that offset could actually hold on an int16_t. |
+ ASSERT(is_int16(offset)); |
+ |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
+ |
+ |
+void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ if (target.is_reg()) { |
+ jr(target.rm()); |
+ } else { |
+ if (!MustUseReg(target.rmode_)) { |
+ j(target.imm32_); |
+ } else { |
+ li(t9, target); |
+ jr(t9); |
+ } |
+ } |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
void MacroAssembler::Jump(const Operand& target, |
- Condition cond, Register rs, const Operand& rt) { |
+ Condition cond, Register rs, const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
if (target.is_reg()) { |
if (cond == cc_always) { |
jr(target.rm()); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
+ Branch(2, NegateCondition(cond), rs, rt); |
jr(target.rm()); |
} |
- } else { // !target.is_reg() |
- if (!MustUseAt(target.rmode_)) { |
+ } else { // Not register target. |
+ if (!MustUseReg(target.rmode_)) { |
if (cond == cc_always) { |
j(target.imm32_); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
+ Branch(2, NegateCondition(cond), rs, rt); |
j(target.imm32_); // Will generate only one instruction. |
} |
- } else { // MustUseAt(target) |
- li(at, target); |
+ } else { // MustUseReg(target) |
+ li(t9, target); |
if (cond == cc_always) { |
- jr(at); |
+ jr(t9); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
- jr(at); // Will generate only one instruction. |
+ Branch(2, NegateCondition(cond), rs, rt); |
+ jr(t9); // Will generate only one instruction. |
} |
} |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
+} |
+ |
+ |
+// Note: To call gcc-compiled C code on mips, you must call thru t9. |
+void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ if (target.is_reg()) { |
+ jalr(target.rm()); |
+ } else { // !target.is_reg() |
+ if (!MustUseReg(target.rmode_)) { |
+ jal(target.imm32_); |
+ } else { // MustUseReg(target) |
+ li(t9, target); |
+ jalr(t9); |
+ } |
+ } |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
+// Note: To call gcc-compiled C code on mips, you must call thru t9. |
void MacroAssembler::Call(const Operand& target, |
- Condition cond, Register rs, const Operand& rt) { |
+ Condition cond, Register rs, const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
if (target.is_reg()) { |
if (cond == cc_always) { |
jalr(target.rm()); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
+ Branch(2, NegateCondition(cond), rs, rt); |
jalr(target.rm()); |
} |
} else { // !target.is_reg() |
- if (!MustUseAt(target.rmode_)) { |
+ if (!MustUseReg(target.rmode_)) { |
if (cond == cc_always) { |
jal(target.imm32_); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
+ Branch(2, NegateCondition(cond), rs, rt); |
jal(target.imm32_); // Will generate only one instruction. |
} |
- } else { // MustUseAt(target) |
- li(at, target); |
+ } else { // MustUseReg(target) |
+ li(t9, target); |
if (cond == cc_always) { |
- jalr(at); |
+ jalr(t9); |
} else { |
- Branch(NegateCondition(cond), 2, rs, rt); |
- jalr(at); // Will generate only one instruction. |
+ Branch(2, NegateCondition(cond), rs, rt); |
+ jalr(t9); // Will generate only one instruction. |
} |
} |
} |
- // Emit a nop in the branch delay slot. |
- nop(); |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) |
+ nop(); |
} |
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { |
- UNIMPLEMENTED_MIPS(); |
+ |
+void MacroAssembler::Drop(int count, |
+ Condition cond, |
+ Register reg, |
+ const Operand& op) { |
+ if (count <= 0) { |
+ return; |
+ } |
+ |
+ Label skip; |
+ |
+ if (cond != al) { |
+ Branch(&skip, NegateCondition(cond), reg, op); |
+ } |
+ |
+ if (count > 0) { |
+ addiu(sp, sp, count * kPointerSize); |
+ } |
+ |
+ if (cond != al) { |
+ bind(&skip); |
+ } |
} |
-void MacroAssembler::Drop(int count, Condition cond) { |
- UNIMPLEMENTED_MIPS(); |
+void MacroAssembler::DropAndRet(int drop, |
+ Condition cond, |
+ Register r1, |
+ const Operand& r2) { |
+ // This is a workaround to make sure only one branch instruction is |
+ // generated. It relies on Drop and Ret not creating branches if |
+ // cond == cc_always. |
+ Label skip; |
+ if (cond != cc_always) { |
+ Branch(&skip, NegateCondition(cond), r1, r2); |
+ } |
+ |
+ Drop(drop); |
+ Ret(); |
+ |
+ if (cond != cc_always) { |
+ bind(&skip); |
+ } |
+} |
+ |
+ |
+void MacroAssembler::Swap(Register reg1, |
+ Register reg2, |
+ Register scratch) { |
+ if (scratch.is(no_reg)) { |
+ Xor(reg1, reg1, Operand(reg2)); |
+ Xor(reg2, reg2, Operand(reg1)); |
+ Xor(reg1, reg1, Operand(reg2)); |
+ } else { |
+ mov(scratch, reg1); |
+ mov(reg1, reg2); |
+ mov(reg2, scratch); |
+ } |
} |
void MacroAssembler::Call(Label* target) { |
- UNIMPLEMENTED_MIPS(); |
+ BranchAndLink(target); |
+} |
+ |
+ |
+void MacroAssembler::Move(Register dst, Register src) { |
+ if (!dst.is(src)) { |
+ mov(dst, src); |
+ } |
} |
#ifdef ENABLE_DEBUGGER_SUPPORT |
- // --------------------------------------------------------------------------- |
- // Debugger Support |
- void MacroAssembler::DebugBreak() { |
- UNIMPLEMENTED_MIPS(); |
- } |
-#endif |
+void MacroAssembler::DebugBreak() { |
+ ASSERT(allow_stub_calls()); |
+ mov(a0, zero_reg); |
+ li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); |
+ CEntryStub ces(1); |
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
+} |
+ |
+#endif // ENABLE_DEBUGGER_SUPPORT |
// --------------------------------------------------------------------------- |
@@ -822,7 +1818,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, |
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize |
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
// Save the current handler as the next handler. |
- LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address)); |
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); |
lw(t1, MemOperand(t2)); |
addiu(sp, sp, -StackHandlerConstants::kSize); |
@@ -848,7 +1844,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, |
li(t0, Operand(StackHandler::ENTRY)); |
// Save the current handler as the next handler. |
- LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address)); |
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); |
lw(t1, MemOperand(t2)); |
addiu(sp, sp, -StackHandlerConstants::kSize); |
@@ -864,45 +1860,377 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, |
void MacroAssembler::PopTryHandler() { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset); |
+ pop(a1); |
+ Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); |
+ li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate()))); |
+ sw(a1, MemOperand(at)); |
+} |
+ |
+ |
+void MacroAssembler::AllocateInNewSpace(int object_size, |
+ Register result, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required, |
+ AllocationFlags flags) { |
+ if (!FLAG_inline_new) { |
+ if (FLAG_debug_code) { |
+ // Trash the registers to simulate an allocation failure. |
+ li(result, 0x7091); |
+ li(scratch1, 0x7191); |
+ li(scratch2, 0x7291); |
+ } |
+ jmp(gc_required); |
+ return; |
+ } |
+ |
+ ASSERT(!result.is(scratch1)); |
+ ASSERT(!result.is(scratch2)); |
+ ASSERT(!scratch1.is(scratch2)); |
+ ASSERT(!scratch1.is(t9)); |
+ ASSERT(!scratch2.is(t9)); |
+ ASSERT(!result.is(t9)); |
+ |
+ // Make object size into bytes. |
+ if ((flags & SIZE_IN_WORDS) != 0) { |
+ object_size *= kPointerSize; |
+ } |
+ ASSERT_EQ(0, object_size & kObjectAlignmentMask); |
+ |
+ // Check relative positions of allocation top and limit addresses. |
+ // ARM adds additional checks to make sure the ldm instruction can be |
+ // used. On MIPS we don't have ldm so we don't need additional checks either. |
+ ExternalReference new_space_allocation_top = |
+ ExternalReference::new_space_allocation_top_address(isolate()); |
+ ExternalReference new_space_allocation_limit = |
+ ExternalReference::new_space_allocation_limit_address(isolate()); |
+ intptr_t top = |
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address()); |
+ intptr_t limit = |
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); |
+ ASSERT((limit - top) == kPointerSize); |
+ |
+ // Set up allocation top address and object size registers. |
+ Register topaddr = scratch1; |
+ Register obj_size_reg = scratch2; |
+ li(topaddr, Operand(new_space_allocation_top)); |
+ li(obj_size_reg, Operand(object_size)); |
+ |
+ // This code stores a temporary value in t9. |
+ if ((flags & RESULT_CONTAINS_TOP) == 0) { |
+ // Load allocation top into result and allocation limit into t9. |
+ lw(result, MemOperand(topaddr)); |
+ lw(t9, MemOperand(topaddr, kPointerSize)); |
+ } else { |
+ if (FLAG_debug_code) { |
+ // Assert that result actually contains top on entry. t9 is used |
+ // immediately below so this use of t9 does not cause difference with |
+ // respect to register content between debug and release mode. |
+ lw(t9, MemOperand(topaddr)); |
+ Check(eq, "Unexpected allocation top", result, Operand(t9)); |
+ } |
+ // Load allocation limit into t9. Result already contains allocation top. |
+ lw(t9, MemOperand(topaddr, limit - top)); |
+ } |
+ |
+ // Calculate new top and bail out if new space is exhausted. Use result |
+ // to calculate the new top. |
+ Addu(scratch2, result, Operand(obj_size_reg)); |
+ Branch(gc_required, Ugreater, scratch2, Operand(t9)); |
+ sw(scratch2, MemOperand(topaddr)); |
+ |
+ // Tag object if requested. |
+ if ((flags & TAG_OBJECT) != 0) { |
+ Addu(result, result, Operand(kHeapObjectTag)); |
+ } |
} |
+void MacroAssembler::AllocateInNewSpace(Register object_size, |
+ Register result, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required, |
+ AllocationFlags flags) { |
+ if (!FLAG_inline_new) { |
+ if (FLAG_debug_code) { |
+ // Trash the registers to simulate an allocation failure. |
+ li(result, 0x7091); |
+ li(scratch1, 0x7191); |
+ li(scratch2, 0x7291); |
+ } |
+ jmp(gc_required); |
+ return; |
+ } |
-// ----------------------------------------------------------------------------- |
-// Activation frames |
+ ASSERT(!result.is(scratch1)); |
+ ASSERT(!result.is(scratch2)); |
+ ASSERT(!scratch1.is(scratch2)); |
+ ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9)); |
+ |
+ // Check relative positions of allocation top and limit addresses. |
+ // ARM adds additional checks to make sure the ldm instruction can be |
+ // used. On MIPS we don't have ldm so we don't need additional checks either. |
+ ExternalReference new_space_allocation_top = |
+ ExternalReference::new_space_allocation_top_address(isolate()); |
+ ExternalReference new_space_allocation_limit = |
+ ExternalReference::new_space_allocation_limit_address(isolate()); |
+ intptr_t top = |
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address()); |
+ intptr_t limit = |
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); |
+ ASSERT((limit - top) == kPointerSize); |
+ |
+ // Set up allocation top address and object size registers. |
+ Register topaddr = scratch1; |
+ li(topaddr, Operand(new_space_allocation_top)); |
+ |
+ // This code stores a temporary value in t9. |
+ if ((flags & RESULT_CONTAINS_TOP) == 0) { |
+ // Load allocation top into result and allocation limit into t9. |
+ lw(result, MemOperand(topaddr)); |
+ lw(t9, MemOperand(topaddr, kPointerSize)); |
+ } else { |
+ if (FLAG_debug_code) { |
+ // Assert that result actually contains top on entry. t9 is used |
+ // immediately below so this use of t9 does not cause difference with |
+ // respect to register content between debug and release mode. |
+ lw(t9, MemOperand(topaddr)); |
+ Check(eq, "Unexpected allocation top", result, Operand(t9)); |
+ } |
+ // Load allocation limit into t9. Result already contains allocation top. |
+ lw(t9, MemOperand(topaddr, limit - top)); |
+ } |
-void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { |
- Label extra_push, end; |
+ // Calculate new top and bail out if new space is exhausted. Use result |
+ // to calculate the new top. Object size may be in words so a shift is |
+ // required to get the number of bytes. |
+ if ((flags & SIZE_IN_WORDS) != 0) { |
+ sll(scratch2, object_size, kPointerSizeLog2); |
+ Addu(scratch2, result, scratch2); |
+ } else { |
+ Addu(scratch2, result, Operand(object_size)); |
+ } |
+ Branch(gc_required, Ugreater, scratch2, Operand(t9)); |
+ |
+ // Update allocation top. result temporarily holds the new top. |
+ if (FLAG_debug_code) { |
+ And(t9, scratch2, Operand(kObjectAlignmentMask)); |
+ Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg)); |
+ } |
+ sw(scratch2, MemOperand(topaddr)); |
- andi(scratch, sp, 7); |
+ // Tag object if requested. |
+ if ((flags & TAG_OBJECT) != 0) { |
+ Addu(result, result, Operand(kHeapObjectTag)); |
+ } |
+} |
- // We check for args and receiver size on the stack, all of them word sized. |
- // We add one for sp, that we also want to store on the stack. |
- if (((arg_count + 1) % kPointerSizeLog2) == 0) { |
- Branch(ne, &extra_push, at, Operand(zero_reg)); |
- } else { // ((arg_count + 1) % 2) == 1 |
- Branch(eq, &extra_push, at, Operand(zero_reg)); |
+ |
+void MacroAssembler::UndoAllocationInNewSpace(Register object, |
+ Register scratch) { |
+ ExternalReference new_space_allocation_top = |
+ ExternalReference::new_space_allocation_top_address(isolate()); |
+ |
+ // Make sure the object has no tag before resetting top. |
+ And(object, object, Operand(~kHeapObjectTagMask)); |
+#ifdef DEBUG |
+ // Check that the object un-allocated is below the current top. |
+ li(scratch, Operand(new_space_allocation_top)); |
+ lw(scratch, MemOperand(scratch)); |
+ Check(less, "Undo allocation of non allocated memory", |
+ object, Operand(scratch)); |
+#endif |
+ // Write the address of the object to un-allocate as the current top. |
+ li(scratch, Operand(new_space_allocation_top)); |
+ sw(object, MemOperand(scratch)); |
+} |
+ |
+ |
+void MacroAssembler::AllocateTwoByteString(Register result, |
+ Register length, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Label* gc_required) { |
+ // Calculate the number of bytes needed for the characters in the string while |
+ // observing object alignment. |
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
+ sll(scratch1, length, 1); // Length in bytes, not chars. |
+ addiu(scratch1, scratch1, |
+ kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); |
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
+ |
+ // Allocate two-byte string in new space. |
+ AllocateInNewSpace(scratch1, |
+ result, |
+ scratch2, |
+ scratch3, |
+ gc_required, |
+ TAG_OBJECT); |
+ |
+ // Set the map, length and hash field. |
+ InitializeNewString(result, |
+ length, |
+ Heap::kStringMapRootIndex, |
+ scratch1, |
+ scratch2); |
+} |
+ |
+ |
+void MacroAssembler::AllocateAsciiString(Register result, |
+ Register length, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Label* gc_required) { |
+ // Calculate the number of bytes needed for the characters in the string |
+ // while observing object alignment. |
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); |
+ ASSERT(kCharSize == 1); |
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize); |
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); |
+ |
+ // Allocate ASCII string in new space. |
+ AllocateInNewSpace(scratch1, |
+ result, |
+ scratch2, |
+ scratch3, |
+ gc_required, |
+ TAG_OBJECT); |
+ |
+ // Set the map, length and hash field. |
+ InitializeNewString(result, |
+ length, |
+ Heap::kAsciiStringMapRootIndex, |
+ scratch1, |
+ scratch2); |
+} |
+ |
+ |
+void MacroAssembler::AllocateTwoByteConsString(Register result, |
+ Register length, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required) { |
+ AllocateInNewSpace(ConsString::kSize, |
+ result, |
+ scratch1, |
+ scratch2, |
+ gc_required, |
+ TAG_OBJECT); |
+ InitializeNewString(result, |
+ length, |
+ Heap::kConsStringMapRootIndex, |
+ scratch1, |
+ scratch2); |
+} |
+ |
+ |
+void MacroAssembler::AllocateAsciiConsString(Register result, |
+ Register length, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required) { |
+ AllocateInNewSpace(ConsString::kSize, |
+ result, |
+ scratch1, |
+ scratch2, |
+ gc_required, |
+ TAG_OBJECT); |
+ InitializeNewString(result, |
+ length, |
+ Heap::kConsAsciiStringMapRootIndex, |
+ scratch1, |
+ scratch2); |
+} |
+ |
+ |
+// Allocates a heap number or jumps to the label if the young space is full and |
+// a scavenge is needed. |
+void MacroAssembler::AllocateHeapNumber(Register result, |
+ Register scratch1, |
+ Register scratch2, |
+ Register heap_number_map, |
+ Label* need_gc) { |
+ // Allocate an object in the heap for the heap number and tag it as a heap |
+ // object. |
+ AllocateInNewSpace(HeapNumber::kSize, |
+ result, |
+ scratch1, |
+ scratch2, |
+ need_gc, |
+ TAG_OBJECT); |
+ |
+ // Store heap number map in the allocated object. |
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
+} |
+ |
+ |
+void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
+ FPURegister value, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required) { |
+ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); |
+ AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required); |
+ sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
+} |
+ |
+ |
+// Copies a fixed number of fields of heap objects from src to dst. |
+void MacroAssembler::CopyFields(Register dst, |
+ Register src, |
+ RegList temps, |
+ int field_count) { |
+ ASSERT((temps & dst.bit()) == 0); |
+ ASSERT((temps & src.bit()) == 0); |
+ // Primitive implementation using only one temporary register. |
+ |
+ Register tmp = no_reg; |
+ // Find a temp register in temps list. |
+ for (int i = 0; i < kNumRegisters; i++) { |
+ if ((temps & (1 << i)) != 0) { |
+ tmp.code_ = i; |
+ break; |
+ } |
} |
+ ASSERT(!tmp.is(no_reg)); |
- // Save sp on the stack. |
- mov(scratch, sp); |
- Push(scratch); |
- b(&end); |
+ for (int i = 0; i < field_count; i++) { |
+ lw(tmp, FieldMemOperand(src, i * kPointerSize)); |
+ sw(tmp, FieldMemOperand(dst, i * kPointerSize)); |
+ } |
+} |
- // Align before saving sp on the stack. |
- bind(&extra_push); |
- mov(scratch, sp); |
- addiu(sp, sp, -8); |
- sw(scratch, MemOperand(sp)); |
- // The stack is aligned and sp is stored on the top. |
- bind(&end); |
+void MacroAssembler::CheckMap(Register obj, |
+ Register scratch, |
+ Handle<Map> map, |
+ Label* fail, |
+ bool is_heap_object) { |
+ if (!is_heap_object) { |
+ JumpIfSmi(obj, fail); |
+ } |
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
+ li(at, Operand(map)); |
+ Branch(fail, ne, scratch, Operand(at)); |
} |
-void MacroAssembler::ReturnFromAlignedCall() { |
- lw(sp, MemOperand(sp)); |
+void MacroAssembler::CheckMap(Register obj, |
+ Register scratch, |
+ Heap::RootListIndex index, |
+ Label* fail, |
+ bool is_heap_object) { |
+ if (!is_heap_object) { |
+ JumpIfSmi(obj, fail); |
+ } |
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
+ LoadRoot(at, index); |
+ Branch(fail, ne, scratch, Operand(at)); |
} |
@@ -914,7 +2242,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
Handle<Code> code_constant, |
Register code_reg, |
Label* done, |
- InvokeFlag flag) { |
+ InvokeFlag flag, |
+ PostCallGenerator* post_call_generator) { |
bool definitely_matches = false; |
Label regular_invoke; |
@@ -949,11 +2278,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
li(a2, Operand(expected.immediate())); |
} |
} |
- } else if (actual.is_immediate()) { |
- Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); |
- li(a0, Operand(actual.immediate())); |
} else { |
- Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); |
+ if (actual.is_immediate()) { |
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate())); |
+ li(a0, Operand(actual.immediate())); |
+ } else { |
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg())); |
+ } |
} |
if (!definitely_matches) { |
@@ -962,25 +2293,29 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); |
} |
- ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); |
+ Handle<Code> adaptor = |
+ isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
if (flag == CALL_FUNCTION) { |
- CallBuiltin(adaptor); |
- b(done); |
- nop(); |
+ Call(adaptor, RelocInfo::CODE_TARGET); |
+ if (post_call_generator != NULL) post_call_generator->Generate(); |
+ jmp(done); |
} else { |
- JumpToBuiltin(adaptor); |
+ Jump(adaptor, RelocInfo::CODE_TARGET); |
} |
bind(®ular_invoke); |
} |
} |
+ |
void MacroAssembler::InvokeCode(Register code, |
const ParameterCount& expected, |
const ParameterCount& actual, |
- InvokeFlag flag) { |
+ InvokeFlag flag, |
+ PostCallGenerator* post_call_generator) { |
Label done; |
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); |
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, |
+ post_call_generator); |
if (flag == CALL_FUNCTION) { |
Call(code); |
} else { |
@@ -1014,7 +2349,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code, |
void MacroAssembler::InvokeFunction(Register function, |
const ParameterCount& actual, |
- InvokeFlag flag) { |
+ InvokeFlag flag, |
+ PostCallGenerator* post_call_generator) { |
// Contract with called JS functions requires that function is passed in a1. |
ASSERT(function.is(a1)); |
Register expected_reg = a2; |
@@ -1025,68 +2361,120 @@ void MacroAssembler::InvokeFunction(Register function, |
lw(expected_reg, |
FieldMemOperand(code_reg, |
SharedFunctionInfo::kFormalParameterCountOffset)); |
- lw(code_reg, |
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); |
- addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag); |
+ sra(expected_reg, expected_reg, kSmiTagSize); |
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
ParameterCount expected(expected_reg); |
- InvokeCode(code_reg, expected, actual, flag); |
+ InvokeCode(code_reg, expected, actual, flag, post_call_generator); |
+} |
+ |
+ |
+void MacroAssembler::InvokeFunction(JSFunction* function, |
+ const ParameterCount& actual, |
+ InvokeFlag flag) { |
+ ASSERT(function->is_compiled()); |
+ |
+ // Get the function and setup the context. |
+ li(a1, Operand(Handle<JSFunction>(function))); |
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ |
+ // Invoke the cached code. |
+ Handle<Code> code(function->code()); |
+ ParameterCount expected(function->shared()->formal_parameter_count()); |
+ if (V8::UseCrankshaft()) { |
+ UNIMPLEMENTED_MIPS(); |
+ } else { |
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); |
+ } |
+} |
+ |
+ |
+void MacroAssembler::IsObjectJSObjectType(Register heap_object, |
+ Register map, |
+ Register scratch, |
+ Label* fail) { |
+ lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); |
+ IsInstanceJSObjectType(map, scratch, fail); |
+} |
+ |
+ |
+void MacroAssembler::IsInstanceJSObjectType(Register map, |
+ Register scratch, |
+ Label* fail) { |
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE)); |
+ Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE)); |
+} |
+ |
+ |
+void MacroAssembler::IsObjectJSStringType(Register object, |
+ Register scratch, |
+ Label* fail) { |
+ ASSERT(kNotStringTag != 0); |
+ |
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
+ And(scratch, scratch, Operand(kIsNotStringMask)); |
+ Branch(fail, ne, scratch, Operand(zero_reg)); |
} |
// --------------------------------------------------------------------------- |
// Support functions. |
- void MacroAssembler::GetObjectType(Register function, |
- Register map, |
- Register type_reg) { |
- lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); |
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
- } |
+void MacroAssembler::TryGetFunctionPrototype(Register function, |
+ Register result, |
+ Register scratch, |
+ Label* miss) { |
+ // Check that the receiver isn't a smi. |
+ JumpIfSmi(function, miss); |
- void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { |
- // Load builtin address. |
- LoadExternalReference(t9, builtin_entry); |
- lw(t9, MemOperand(t9)); // Deref address. |
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
- // Call and allocate arguments slots. |
- jalr(t9); |
- // Use the branch delay slot to allocated argument slots. |
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
- } |
+ // Check that the function really is a function. Load map into result reg. |
+ GetObjectType(function, result, scratch); |
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); |
+ // Make sure that the function has an instance prototype. |
+ Label non_instance; |
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
+ Branch(&non_instance, ne, scratch, Operand(zero_reg)); |
- void MacroAssembler::CallBuiltin(Register target) { |
- // Target already holds target address. |
- // Call and allocate arguments slots. |
- jalr(target); |
- // Use the branch delay slot to allocated argument slots. |
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
- } |
+ // Get the prototype or initial map from the function. |
+ lw(result, |
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
+ // If the prototype or initial map is the hole, don't return it and |
+ // simply miss the cache instead. This will allow us to allocate a |
+ // prototype object on-demand in the runtime system. |
+ LoadRoot(t8, Heap::kTheHoleValueRootIndex); |
+ Branch(miss, eq, result, Operand(t8)); |
- void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { |
- // Load builtin address. |
- LoadExternalReference(t9, builtin_entry); |
- lw(t9, MemOperand(t9)); // Deref address. |
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
- // Call and allocate arguments slots. |
- jr(t9); |
- // Use the branch delay slot to allocated argument slots. |
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
- } |
+ // If the function does not have an initial map, we're done. |
+ Label done; |
+ GetObjectType(result, scratch, scratch); |
+ Branch(&done, ne, scratch, Operand(MAP_TYPE)); |
+ // Get the prototype from the initial map. |
+ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
+ jmp(&done); |
- void MacroAssembler::JumpToBuiltin(Register target) { |
- // t9 already holds target address. |
- // Call and allocate arguments slots. |
- jr(t9); |
- // Use the branch delay slot to allocated argument slots. |
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
- } |
+ // Non-instance prototype: Fetch prototype from constructor field |
+ // in initial map. |
+ bind(&non_instance); |
+ lw(result, FieldMemOperand(result, Map::kConstructorOffset)); |
+ |
+ // All done. |
+ bind(&done); |
+} |
+ |
+ |
+void MacroAssembler::GetObjectType(Register object, |
+ Register map, |
+ Register type_reg) { |
+ lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+} |
// ----------------------------------------------------------------------------- |
@@ -1099,8 +2487,9 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond, |
} |
-void MacroAssembler::StubReturn(int argc) { |
- UNIMPLEMENTED_MIPS(); |
+void MacroAssembler::TailCallStub(CodeStub* stub) { |
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs |
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET); |
} |
@@ -1112,7 +2501,71 @@ void MacroAssembler::IllegalOperation(int num_arguments) { |
} |
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { |
+void MacroAssembler::IndexFromHash(Register hash, |
+ Register index) { |
+ // If the hash field contains an array index pick it out. The assert checks |
+ // that the constants for the maximum number of digits for an array index |
+ // cached in the hash field and the number of bits reserved for it does not |
+ // conflict. |
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < |
+ (1 << String::kArrayIndexValueBits)); |
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in |
+ // the low kHashShift bits. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits); |
+ sll(index, hash, kSmiTagSize); |
+} |
+ |
+ |
+void MacroAssembler::ObjectToDoubleFPURegister(Register object, |
+ FPURegister result, |
+ Register scratch1, |
+ Register scratch2, |
+ Register heap_number_map, |
+ Label* not_number, |
+ ObjectToDoubleFlags flags) { |
+ Label done; |
+ if ((flags & OBJECT_NOT_SMI) == 0) { |
+ Label not_smi; |
+ JumpIfNotSmi(object, ¬_smi); |
+ // Remove smi tag and convert to double. |
+ sra(scratch1, object, kSmiTagSize); |
+ mtc1(scratch1, result); |
+ cvt_d_w(result, result); |
+ Branch(&done); |
+ bind(¬_smi); |
+ } |
+ // Check for heap number and load double value from it. |
+ lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ Branch(not_number, ne, scratch1, Operand(heap_number_map)); |
+ |
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { |
+ // If exponent is all ones the number is either a NaN or +/-Infinity. |
+ Register exponent = scratch1; |
+ Register mask_reg = scratch2; |
+ lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
+ li(mask_reg, HeapNumber::kExponentMask); |
+ |
+ And(exponent, exponent, mask_reg); |
+ Branch(not_number, eq, exponent, Operand(mask_reg)); |
+ } |
+ ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); |
+ bind(&done); |
+} |
+ |
+ |
+ |
+void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
+ FPURegister value, |
+ Register scratch1) { |
+ sra(scratch1, smi, kSmiTagSize); |
+ mtc1(scratch1, value); |
+ cvt_d_w(value, value); |
+} |
+ |
+ |
+void MacroAssembler::CallRuntime(const Runtime::Function* f, |
+ int num_arguments) { |
// All parameters are on the stack. v0 has the return value after call. |
// If the expected number of arguments of the runtime function is |
@@ -1128,69 +2581,129 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { |
// should remove this need and make the runtime routine entry code |
// smarter. |
li(a0, num_arguments); |
- LoadExternalReference(a1, ExternalReference(f)); |
+ li(a1, Operand(ExternalReference(f, isolate()))); |
CEntryStub stub(1); |
CallStub(&stub); |
} |
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { |
+ const Runtime::Function* function = Runtime::FunctionForId(id); |
+ li(a0, Operand(function->nargs)); |
+ li(a1, Operand(ExternalReference(function, isolate()))); |
+ CEntryStub stub(1); |
+ stub.SaveDoubles(); |
+ CallStub(&stub); |
+} |
+ |
+ |
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
} |
+void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
+ int num_arguments) { |
+ li(a0, Operand(num_arguments)); |
+ li(a1, Operand(ext)); |
+ |
+ CEntryStub stub(1); |
+ CallStub(&stub); |
+} |
+ |
+ |
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, |
int num_arguments, |
int result_size) { |
- UNIMPLEMENTED_MIPS(); |
+ // TODO(1236192): Most runtime routines don't need the number of |
+ // arguments passed in because it is constant. At some point we |
+ // should remove this need and make the runtime routine entry code |
+ // smarter. |
+ li(a0, Operand(num_arguments)); |
+ JumpToExternalReference(ext); |
} |
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, |
int num_arguments, |
int result_size) { |
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); |
+ TailCallExternalReference(ExternalReference(fid, isolate()), |
+ num_arguments, |
+ result_size); |
} |
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
- UNIMPLEMENTED_MIPS(); |
+ li(a1, Operand(builtin)); |
+ CEntryStub stub(1); |
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
} |
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, |
- bool* resolved) { |
- UNIMPLEMENTED_MIPS(); |
- return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN |
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
+ InvokeJSFlags flags, |
+ PostCallGenerator* post_call_generator) { |
+ GetBuiltinEntry(t9, id); |
+ if (flags == CALL_JS) { |
+ Call(t9); |
+ if (post_call_generator != NULL) post_call_generator->Generate(); |
+ } else { |
+ ASSERT(flags == JUMP_JS); |
+ Jump(t9); |
+ } |
} |
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
- InvokeJSFlags flags) { |
- UNIMPLEMENTED_MIPS(); |
+void MacroAssembler::GetBuiltinFunction(Register target, |
+ Builtins::JavaScript id) { |
+ // Load the builtins object into target register. |
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
+ lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); |
+ // Load the JavaScript builtin function from the builtins object. |
+ lw(target, FieldMemOperand(target, |
+ JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
} |
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT(!target.is(a1)); |
+ GetBuiltinFunction(a1, id); |
+ // Load the code entry point from the builtins object. |
+ lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
} |
void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
Register scratch1, Register scratch2) { |
- UNIMPLEMENTED_MIPS(); |
+ if (FLAG_native_code_counters && counter->Enabled()) { |
+ li(scratch1, Operand(value)); |
+ li(scratch2, Operand(ExternalReference(counter))); |
+ sw(scratch1, MemOperand(scratch2)); |
+ } |
} |
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
Register scratch1, Register scratch2) { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT(value > 0); |
+ if (FLAG_native_code_counters && counter->Enabled()) { |
+ li(scratch2, Operand(ExternalReference(counter))); |
+ lw(scratch1, MemOperand(scratch2)); |
+ Addu(scratch1, scratch1, Operand(value)); |
+ sw(scratch1, MemOperand(scratch2)); |
+ } |
} |
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
Register scratch1, Register scratch2) { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT(value > 0); |
+ if (FLAG_native_code_counters && counter->Enabled()) { |
+ li(scratch2, Operand(ExternalReference(counter))); |
+ lw(scratch1, MemOperand(scratch2)); |
+ Subu(scratch1, scratch1, Operand(value)); |
+ sw(scratch1, MemOperand(scratch2)); |
+ } |
} |
@@ -1199,30 +2712,144 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
void MacroAssembler::Assert(Condition cc, const char* msg, |
Register rs, Operand rt) { |
- UNIMPLEMENTED_MIPS(); |
+ if (FLAG_debug_code) |
+ Check(cc, msg, rs, rt); |
+} |
+ |
+ |
+void MacroAssembler::AssertRegisterIsRoot(Register reg, |
+ Heap::RootListIndex index) { |
+ if (FLAG_debug_code) { |
+ LoadRoot(at, index); |
+ Check(eq, "Register did not match expected root", reg, Operand(at)); |
+ } |
+} |
+ |
+ |
+void MacroAssembler::AssertFastElements(Register elements) { |
+ if (FLAG_debug_code) { |
+ ASSERT(!elements.is(at)); |
+ Label ok; |
+ Push(elements); |
+ lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
+ LoadRoot(at, Heap::kFixedArrayMapRootIndex); |
+ Branch(&ok, eq, elements, Operand(at)); |
+ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); |
+ Branch(&ok, eq, elements, Operand(at)); |
+ Abort("JSObject with fast elements map has slow elements"); |
+ bind(&ok); |
+ Pop(elements); |
+ } |
} |
void MacroAssembler::Check(Condition cc, const char* msg, |
Register rs, Operand rt) { |
- UNIMPLEMENTED_MIPS(); |
+ Label L; |
+ Branch(&L, cc, rs, rt); |
+ Abort(msg); |
+ // will not return here |
+ bind(&L); |
} |
void MacroAssembler::Abort(const char* msg) { |
- UNIMPLEMENTED_MIPS(); |
+ Label abort_start; |
+ bind(&abort_start); |
+ // We want to pass the msg string like a smi to avoid GC |
+ // problems, however msg is not guaranteed to be aligned |
+ // properly. Instead, we pass an aligned pointer that is |
+ // a proper v8 smi, but also pass the alignment difference |
+ // from the real pointer as a smi. |
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
+#ifdef DEBUG |
+ if (msg != NULL) { |
+ RecordComment("Abort message: "); |
+ RecordComment(msg); |
+ } |
+#endif |
+ // Disable stub call restrictions to always allow calls to abort. |
+ AllowStubCallsScope allow_scope(this, true); |
+ |
+ li(a0, Operand(p0)); |
+ Push(a0); |
+ li(a0, Operand(Smi::FromInt(p1 - p0))); |
+ Push(a0); |
+ CallRuntime(Runtime::kAbort, 2); |
+ // will not return here |
+ if (is_trampoline_pool_blocked()) { |
+ // If the calling code cares about the exact number of |
+ // instructions generated, we insert padding here to keep the size |
+ // of the Abort macro constant. |
+ // Currently in debug mode with debug_code enabled the number of |
+ // generated instructions is 14, so we use this as a maximum value. |
+ static const int kExpectedAbortInstructions = 14; |
+ int abort_instructions = InstructionsGeneratedSince(&abort_start); |
+ ASSERT(abort_instructions <= kExpectedAbortInstructions); |
+ while (abort_instructions++ < kExpectedAbortInstructions) { |
+ nop(); |
+ } |
+ } |
+} |
+ |
+ |
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
+ if (context_chain_length > 0) { |
+ // Move up the chain of contexts to the context containing the slot. |
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX))); |
+ // Load the function context (which is the incoming, outer context). |
+ lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
+ for (int i = 1; i < context_chain_length; i++) { |
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); |
+ lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); |
+ } |
+ // The context may be an intermediate context, not a function context. |
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
+ } else { // Slot is in the current function context. |
+ // The context may be an intermediate context, not a function context. |
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
+ } |
+} |
+ |
+ |
+void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
+ // Load the global or builtins object from the current context. |
+ lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
+ // Load the global context from the global or builtins object. |
+ lw(function, FieldMemOperand(function, |
+ GlobalObject::kGlobalContextOffset)); |
+ // Load the function from the global context. |
+ lw(function, MemOperand(function, Context::SlotOffset(index))); |
+} |
+ |
+ |
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
+ Register map, |
+ Register scratch) { |
+ // Load the initial map. The global functions all have initial maps. |
+ lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
+ if (FLAG_debug_code) { |
+ Label ok, fail; |
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); |
+ Branch(&ok); |
+ bind(&fail); |
+ Abort("Global functions must have initial map"); |
+ bind(&ok); |
+ } |
} |
void MacroAssembler::EnterFrame(StackFrame::Type type) { |
addiu(sp, sp, -5 * kPointerSize); |
- li(t0, Operand(Smi::FromInt(type))); |
- li(t1, Operand(CodeObject())); |
+ li(t8, Operand(Smi::FromInt(type))); |
+ li(t9, Operand(CodeObject())); |
sw(ra, MemOperand(sp, 4 * kPointerSize)); |
sw(fp, MemOperand(sp, 3 * kPointerSize)); |
sw(cp, MemOperand(sp, 2 * kPointerSize)); |
- sw(t0, MemOperand(sp, 1 * kPointerSize)); |
- sw(t1, MemOperand(sp, 0 * kPointerSize)); |
+ sw(t8, MemOperand(sp, 1 * kPointerSize)); |
+ sw(t9, MemOperand(sp, 0 * kPointerSize)); |
addiu(fp, sp, 3 * kPointerSize); |
} |
@@ -1235,62 +2862,98 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
} |
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, |
- Register hold_argc, |
+void MacroAssembler::EnterExitFrame(Register hold_argc, |
Register hold_argv, |
- Register hold_function) { |
- // Compute the argv pointer and keep it in a callee-saved register. |
+ Register hold_function, |
+ bool save_doubles) { |
// a0 is argc. |
- sll(t0, a0, kPointerSizeLog2); |
- add(hold_argv, sp, t0); |
- addi(hold_argv, hold_argv, -kPointerSize); |
+ sll(t8, a0, kPointerSizeLog2); |
+ addu(hold_argv, sp, t8); |
+ addiu(hold_argv, hold_argv, -kPointerSize); |
// Compute callee's stack pointer before making changes and save it as |
- // t1 register so that it is restored as sp register on exit, thereby |
+ // t9 register so that it is restored as sp register on exit, thereby |
// popping the args. |
- // t1 = sp + kPointerSize * #args |
- add(t1, sp, t0); |
+ // t9 = sp + kPointerSize * #args |
+ addu(t9, sp, t8); |
+ |
+ // Compute the argv pointer and keep it in a callee-saved register. |
+ // This only seems to be needed for crankshaft and may cause problems |
+ // so it's disabled for now. |
+ // Subu(s6, t9, Operand(kPointerSize)); |
// Align the stack at this point. |
AlignStack(0); |
// Save registers. |
addiu(sp, sp, -12); |
- sw(t1, MemOperand(sp, 8)); |
+ sw(t9, MemOperand(sp, 8)); |
sw(ra, MemOperand(sp, 4)); |
sw(fp, MemOperand(sp, 0)); |
mov(fp, sp); // Setup new frame pointer. |
- // Push debug marker. |
- if (mode == ExitFrame::MODE_DEBUG) { |
- Push(zero_reg); |
- } else { |
- li(t0, Operand(CodeObject())); |
- Push(t0); |
- } |
+ li(t8, Operand(CodeObject())); |
+ Push(t8); // Accessed from ExitFrame::code_slot. |
// Save the frame pointer and the context in top. |
- LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address)); |
- sw(fp, MemOperand(t0)); |
- LoadExternalReference(t0, ExternalReference(Isolate::k_context_address)); |
- sw(cp, MemOperand(t0)); |
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); |
+ sw(fp, MemOperand(t8)); |
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); |
+ sw(cp, MemOperand(t8)); |
// Setup argc and the builtin function in callee-saved registers. |
mov(hold_argc, a0); |
mov(hold_function, a1); |
+ |
+ // Optionally save all double registers. |
+ if (save_doubles) { |
+#ifdef DEBUG |
+ int frame_alignment = ActivationFrameAlignment(); |
+#endif |
+ // The stack alignment code above made sp unaligned, so add space for one |
+ // more double register and use aligned addresses. |
+ ASSERT(kDoubleSize == frame_alignment); |
+ // Mark the frame as containing doubles by pushing a non-valid return |
+ // address, i.e. 0. |
+ ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize); |
+ push(zero_reg); // Marker and alignment word. |
+ int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize; |
+ Subu(sp, sp, Operand(space)); |
+ // Remember: we only need to save every 2nd double FPU value. |
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { |
+ FPURegister reg = FPURegister::from_code(i); |
+ sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize)); |
+ } |
+ // Note that f0 will be accessible at fp - 2*kPointerSize - |
+ // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the |
+ // alignment word were pushed after the fp. |
+ } |
} |
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { |
+void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
+ // Optionally restore all double registers. |
+ if (save_doubles) { |
+ // TODO(regis): Use vldrm instruction. |
+ // Remember: we only need to restore every 2nd double FPU value. |
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { |
+ FPURegister reg = FPURegister::from_code(i); |
+ // Register f30-f31 is just below the marker. |
+ const int offset = ExitFrameConstants::kMarkerOffset; |
+ ldc1(reg, MemOperand(fp, |
+ (i - FPURegister::kNumRegisters) * kDoubleSize + offset)); |
+ } |
+ } |
+ |
// Clear top frame. |
- LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address)); |
- sw(zero_reg, MemOperand(t0)); |
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate()))); |
+ sw(zero_reg, MemOperand(t8)); |
// Restore current context from top and clear it in debug mode. |
- LoadExternalReference(t0, ExternalReference(Isolate::k_context_address)); |
- lw(cp, MemOperand(t0)); |
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate()))); |
+ lw(cp, MemOperand(t8)); |
#ifdef DEBUG |
- sw(a3, MemOperand(t0)); |
+ sw(a3, MemOperand(t8)); |
#endif |
// Pop the arguments, restore registers, and return. |
@@ -1303,24 +2966,362 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { |
} |
+void MacroAssembler::InitializeNewString(Register string, |
+ Register length, |
+ Heap::RootListIndex map_index, |
+ Register scratch1, |
+ Register scratch2) { |
+ sll(scratch1, length, kSmiTagSize); |
+ LoadRoot(scratch2, map_index); |
+ sw(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
+ li(scratch1, Operand(String::kEmptyHashField)); |
+ sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); |
+} |
+ |
+ |
+int MacroAssembler::ActivationFrameAlignment() { |
+#if defined(V8_HOST_ARCH_MIPS) |
+ // Running on the real platform. Use the alignment as mandated by the local |
+ // environment. |
+ // Note: This will break if we ever start generating snapshots on one Mips |
+ // platform for another Mips platform with a different alignment. |
+ return OS::ActivationFrameAlignment(); |
+#else // defined(V8_HOST_ARCH_MIPS) |
+ // If we are using the simulator then we should always align to the expected |
+ // alignment. As the simulator is used to generate snapshots we do not know |
+ // if the target platform will need alignment, so this is controlled from a |
+ // flag. |
+ return FLAG_sim_stack_alignment; |
+#endif // defined(V8_HOST_ARCH_MIPS) |
+} |
+ |
+ |
void MacroAssembler::AlignStack(int offset) { |
// On MIPS an offset of 0 aligns to 0 modulo 8 bytes, |
// and an offset of 1 aligns to 4 modulo 8 bytes. |
+#if defined(V8_HOST_ARCH_MIPS) |
+ // Running on the real platform. Use the alignment as mandated by the local |
+ // environment. |
+ // Note: This will break if we ever start generating snapshots on one MIPS |
+ // platform for another MIPS platform with a different alignment. |
int activation_frame_alignment = OS::ActivationFrameAlignment(); |
+#else // defined(V8_HOST_ARCH_MIPS) |
+ // If we are using the simulator then we should always align to the expected |
+ // alignment. As the simulator is used to generate snapshots we do not know |
+ // if the target platform will need alignment, so we will always align at |
+ // this point here. |
+ int activation_frame_alignment = 2 * kPointerSize; |
+#endif // defined(V8_HOST_ARCH_MIPS) |
if (activation_frame_alignment != kPointerSize) { |
// This code needs to be made more general if this assert doesn't hold. |
ASSERT(activation_frame_alignment == 2 * kPointerSize); |
if (offset == 0) { |
- andi(t0, sp, activation_frame_alignment - 1); |
- Push(zero_reg, eq, t0, zero_reg); |
+ andi(t8, sp, activation_frame_alignment - 1); |
+ Push(zero_reg, eq, t8, zero_reg); |
} else { |
- andi(t0, sp, activation_frame_alignment - 1); |
- addiu(t0, t0, -4); |
- Push(zero_reg, eq, t0, zero_reg); |
+ andi(t8, sp, activation_frame_alignment - 1); |
+ addiu(t8, t8, -4); |
+ Push(zero_reg, eq, t8, zero_reg); |
+ } |
+ } |
+} |
+ |
+ |
+ |
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
+ Register reg, |
+ Register scratch, |
+ Label* not_power_of_two_or_zero) { |
+ Subu(scratch, reg, Operand(1)); |
+ Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, |
+ scratch, Operand(zero_reg)); |
+ and_(at, scratch, reg); // In the delay slot. |
+ Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfNotBothSmi(Register reg1, |
+ Register reg2, |
+ Label* on_not_both_smi) { |
+ STATIC_ASSERT(kSmiTag == 0); |
+ ASSERT_EQ(1, kSmiTagMask); |
+ or_(at, reg1, reg2); |
+ andi(at, at, kSmiTagMask); |
+ Branch(on_not_both_smi, ne, at, Operand(zero_reg)); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfEitherSmi(Register reg1, |
+ Register reg2, |
+ Label* on_either_smi) { |
+ STATIC_ASSERT(kSmiTag == 0); |
+ ASSERT_EQ(1, kSmiTagMask); |
+ // Both Smi tags must be 1 (not Smi). |
+ and_(at, reg1, reg2); |
+ andi(at, at, kSmiTagMask); |
+ Branch(on_either_smi, eq, at, Operand(zero_reg)); |
+} |
+ |
+ |
+void MacroAssembler::AbortIfSmi(Register object) { |
+ STATIC_ASSERT(kSmiTag == 0); |
+ andi(at, object, kSmiTagMask); |
+ Assert(ne, "Operand is a smi", at, Operand(zero_reg)); |
+} |
+ |
+ |
+void MacroAssembler::AbortIfNotSmi(Register object) { |
+ STATIC_ASSERT(kSmiTag == 0); |
+ andi(at, object, kSmiTagMask); |
+ Assert(eq, "Operand is a smi", at, Operand(zero_reg)); |
+} |
+ |
+ |
+void MacroAssembler::AbortIfNotRootValue(Register src, |
+ Heap::RootListIndex root_value_index, |
+ const char* message) { |
+ ASSERT(!src.is(at)); |
+ LoadRoot(at, root_value_index); |
+ Assert(eq, message, src, Operand(at)); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfNotHeapNumber(Register object, |
+ Register heap_number_map, |
+ Register scratch, |
+ Label* on_not_heap_number) { |
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( |
+ Register first, |
+ Register second, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* failure) { |
+ // Test that both first and second are sequential ASCII strings. |
+ // Assume that they are non-smis. |
+ lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
+ lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
+ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
+ lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
+ |
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, |
+ scratch2, |
+ scratch1, |
+ scratch2, |
+ failure); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, |
+ Register second, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* failure) { |
+ // Check that neither is a smi. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ And(scratch1, first, Operand(second)); |
+ And(scratch1, scratch1, Operand(kSmiTagMask)); |
+ Branch(failure, eq, scratch1, Operand(zero_reg)); |
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first, |
+ second, |
+ scratch1, |
+ scratch2, |
+ failure); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( |
+ Register first, |
+ Register second, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* failure) { |
+ int kFlatAsciiStringMask = |
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
+ ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed. |
+ andi(scratch1, first, kFlatAsciiStringMask); |
+ Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag)); |
+ andi(scratch2, second, kFlatAsciiStringMask); |
+ Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag)); |
+} |
+ |
+ |
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, |
+ Register scratch, |
+ Label* failure) { |
+ int kFlatAsciiStringMask = |
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE; |
+ And(scratch, type, Operand(kFlatAsciiStringMask)); |
+ Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); |
+} |
+ |
+ |
+static const int kRegisterPassedArguments = 4; |
+ |
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { |
+ int frame_alignment = ActivationFrameAlignment(); |
+ |
+ // Reserve space for Isolate address which is always passed as last parameter |
+ num_arguments += 1; |
+ |
+ // Up to four simple arguments are passed in registers a0..a3. |
+ // Those four arguments must have reserved argument slots on the stack for |
+ // mips, even though those argument slots are not normally used. |
+ // Remaining arguments are pushed on the stack, above (higher address than) |
+ // the argument slots. |
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); |
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
+ 0 : num_arguments - kRegisterPassedArguments) + |
+ (StandardFrameConstants::kCArgsSlotsSize / |
+ kPointerSize); |
+ if (frame_alignment > kPointerSize) { |
+ // Make stack end at alignment and make room for num_arguments - 4 words |
+ // and the original value of sp. |
+ mov(scratch, sp); |
+ Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
+ ASSERT(IsPowerOf2(frame_alignment)); |
+ And(sp, sp, Operand(-frame_alignment)); |
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
+ } else { |
+ Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
+ } |
+} |
+ |
+ |
+void MacroAssembler::CallCFunction(ExternalReference function, |
+ int num_arguments) { |
+ CallCFunctionHelper(no_reg, function, at, num_arguments); |
+} |
+ |
+ |
+void MacroAssembler::CallCFunction(Register function, |
+ Register scratch, |
+ int num_arguments) { |
+ CallCFunctionHelper(function, |
+ ExternalReference::the_hole_value_location(isolate()), |
+ scratch, |
+ num_arguments); |
+} |
+ |
+ |
+void MacroAssembler::CallCFunctionHelper(Register function, |
+ ExternalReference function_reference, |
+ Register scratch, |
+ int num_arguments) { |
+ // Push Isolate address as the last argument. |
+ if (num_arguments < kRegisterPassedArguments) { |
+ Register arg_to_reg[] = {a0, a1, a2, a3}; |
+ Register r = arg_to_reg[num_arguments]; |
+ li(r, Operand(ExternalReference::isolate_address())); |
+ } else { |
+ int stack_passed_arguments = num_arguments - kRegisterPassedArguments + |
+ (StandardFrameConstants::kCArgsSlotsSize / |
+ kPointerSize); |
+ // Push Isolate address on the stack after the arguments. |
+ li(scratch, Operand(ExternalReference::isolate_address())); |
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
+ } |
+ num_arguments += 1; |
+ |
+ // Make sure that the stack is aligned before calling a C function unless |
+ // running in the simulator. The simulator has its own alignment check which |
+ // provides more information. |
+ // The argument stots are presumed to have been set up by |
+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI. |
+ |
+#if defined(V8_HOST_ARCH_MIPS) |
+ if (emit_debug_code()) { |
+ int frame_alignment = OS::ActivationFrameAlignment(); |
+ int frame_alignment_mask = frame_alignment - 1; |
+ if (frame_alignment > kPointerSize) { |
+ ASSERT(IsPowerOf2(frame_alignment)); |
+ Label alignment_as_expected; |
+ And(at, sp, Operand(frame_alignment_mask)); |
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); |
+ // Don't use Check here, as it will call Runtime_Abort possibly |
+ // re-entering here. |
+ stop("Unexpected alignment in CallCFunction"); |
+ bind(&alignment_as_expected); |
} |
} |
+#endif // V8_HOST_ARCH_MIPS |
+ |
+ // Just call directly. The function called cannot cause a GC, or |
+ // allow preemption, so the return address in the link register |
+ // stays correct. |
+ if (!function.is(t9)) { |
+ mov(t9, function); |
+ function = t9; |
+ } |
+ |
+ if (function.is(no_reg)) { |
+ li(t9, Operand(function_reference)); |
+ function = t9; |
+ } |
+ |
+ Call(function); |
+ |
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0); |
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? |
+ 0 : num_arguments - kRegisterPassedArguments) + |
+ (StandardFrameConstants::kCArgsSlotsSize / |
+ kPointerSize); |
+ |
+ if (OS::ActivationFrameAlignment() > kPointerSize) { |
+ lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
+ } else { |
+ Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); |
+ } |
} |
+ |
+#undef BRANCH_ARGS_CHECK |
+ |
+ |
+#ifdef ENABLE_DEBUGGER_SUPPORT |
+CodePatcher::CodePatcher(byte* address, int instructions) |
+ : address_(address), |
+ instructions_(instructions), |
+ size_(instructions * Assembler::kInstrSize), |
+ masm_(address, size_ + Assembler::kGap) { |
+ // Create a new macro assembler pointing to the address of the code to patch. |
+ // The size is adjusted with kGap on order for the assembler to generate size |
+ // bytes of instructions without failing with buffer size constraints. |
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
+} |
+ |
+ |
+CodePatcher::~CodePatcher() { |
+ // Indicate that code has changed. |
+ CPU::FlushICache(address_, size_); |
+ |
+ // Check that the code was patched as expected. |
+ ASSERT(masm_.pc_ == address_ + size_); |
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
+} |
+ |
+ |
+void CodePatcher::Emit(Instr x) { |
+ masm()->emit(x); |
+} |
+ |
+ |
+void CodePatcher::Emit(Address addr) { |
+ masm()->emit(reinterpret_cast<Instr>(addr)); |
+} |
+ |
+ |
+#endif // ENABLE_DEBUGGER_SUPPORT |
+ |
+ |
} } // namespace v8::internal |
#endif // V8_TARGET_ARCH_MIPS |