Index: src/mips/macro-assembler-mips.cc |
=================================================================== |
--- src/mips/macro-assembler-mips.cc (revision 4138) |
+++ src/mips/macro-assembler-mips.cc (working copy) |
@@ -55,7 +55,7 @@ |
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
Condition cond, Register r1, const Operand& r2) { |
- Jump(Operand(target), cond, r1, r2); |
+ Jump(Operand(target, rmode), cond, r1, r2); |
} |
@@ -81,7 +81,7 @@ |
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, |
Condition cond, Register r1, const Operand& r2) { |
- Call(Operand(target), cond, r1, r2); |
+ Call(Operand(target, rmode), cond, r1, r2); |
} |
@@ -106,7 +106,7 @@ |
void MacroAssembler::LoadRoot(Register destination, |
Heap::RootListIndex index) { |
- lw(destination, MemOperand(s4, index << kPointerSizeLog2)); |
+ lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
} |
void MacroAssembler::LoadRoot(Register destination, |
@@ -114,8 +114,7 @@ |
Condition cond, |
Register src1, const Operand& src2) { |
Branch(NegateCondition(cond), 2, src1, src2); |
- nop(); |
- lw(destination, MemOperand(s4, index << kPointerSizeLog2)); |
+ lw(destination, MemOperand(s6, index << kPointerSizeLog2)); |
} |
@@ -320,7 +319,6 @@ |
} |
-// load wartd in a register |
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { |
ASSERT(!j.is_reg()); |
@@ -372,7 +370,7 @@ |
int16_t NumToPush = NumberOfBitsSet(regs); |
addiu(sp, sp, -4 * NumToPush); |
- for (int16_t i = 0; i < kNumRegisters; i++) { |
+ for (int16_t i = kNumRegisters; i > 0; i--) { |
if ((regs & (1 << i)) != 0) { |
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); |
} |
@@ -385,7 +383,7 @@ |
int16_t NumToPush = NumberOfBitsSet(regs); |
addiu(sp, sp, -4 * NumToPush); |
- for (int16_t i = kNumRegisters; i > 0; i--) { |
+ for (int16_t i = 0; i < kNumRegisters; i++) { |
if ((regs & (1 << i)) != 0) { |
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); |
} |
@@ -396,7 +394,7 @@ |
void MacroAssembler::MultiPop(RegList regs) { |
int16_t NumSaved = 0; |
- for (int16_t i = kNumRegisters; i > 0; i--) { |
+ for (int16_t i = 0; i < kNumRegisters; i++) { |
if ((regs & (1 << i)) != 0) { |
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); |
} |
@@ -408,7 +406,7 @@ |
void MacroAssembler::MultiPopReversed(RegList regs) { |
int16_t NumSaved = 0; |
- for (int16_t i = 0; i < kNumRegisters; i++) { |
+ for (int16_t i = kNumRegisters; i > 0; i--) { |
if ((regs & (1 << i)) != 0) { |
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); |
} |
@@ -484,6 +482,8 @@ |
default: |
UNREACHABLE(); |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
@@ -550,6 +550,8 @@ |
default: |
UNREACHABLE(); |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
@@ -629,6 +631,8 @@ |
default: |
UNREACHABLE(); |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
@@ -704,6 +708,8 @@ |
default: |
UNREACHABLE(); |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
@@ -714,7 +720,6 @@ |
jr(target.rm()); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
jr(target.rm()); |
} |
} else { // !target.is_reg() |
@@ -723,20 +728,20 @@ |
j(target.imm32_); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
- j(target.imm32_); // will generate only one instruction. |
+ j(target.imm32_); // Will generate only one instruction. |
} |
} else { // MustUseAt(target) |
- li(at, rt); |
+ li(at, target); |
if (cond == cc_always) { |
jr(at); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
- jr(at); // will generate only one instruction. |
+ jr(at); // Will generate only one instruction. |
} |
} |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
@@ -747,7 +752,6 @@ |
jalr(target.rm()); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
jalr(target.rm()); |
} |
} else { // !target.is_reg() |
@@ -756,20 +760,20 @@ |
jal(target.imm32_); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
- jal(target.imm32_); // will generate only one instruction. |
+ jal(target.imm32_); // Will generate only one instruction. |
} |
} else { // MustUseAt(target) |
- li(at, rt); |
+ li(at, target); |
if (cond == cc_always) { |
jalr(at); |
} else { |
Branch(NegateCondition(cond), 2, rs, rt); |
- nop(); |
- jalr(at); // will generate only one instruction. |
+ jalr(at); // Will generate only one instruction. |
} |
} |
} |
+ // Emit a nop in the branch delay slot. |
+ nop(); |
} |
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { |
@@ -802,7 +806,58 @@ |
void MacroAssembler::PushTryHandler(CodeLocation try_location, |
HandlerType type) { |
- UNIMPLEMENTED_MIPS(); |
+ // Adjust this code if not the case. |
+ ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
+ // The return address is passed in register ra. |
+ if (try_location == IN_JAVASCRIPT) { |
+ if (type == TRY_CATCH_HANDLER) { |
+ li(t0, Operand(StackHandler::TRY_CATCH)); |
+ } else { |
+ li(t0, Operand(StackHandler::TRY_FINALLY)); |
+ } |
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
+ && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
+ // Save the current handler as the next handler. |
+ LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); |
+ lw(t1, MemOperand(t2)); |
+ |
+ addiu(sp, sp, -StackHandlerConstants::kSize); |
+ sw(ra, MemOperand(sp, 12)); |
+ sw(fp, MemOperand(sp, 8)); |
+ sw(t0, MemOperand(sp, 4)); |
+ sw(t1, MemOperand(sp, 0)); |
+ |
+ // Link this handler as the new current one. |
+ sw(sp, MemOperand(t2)); |
+ |
+ } else { |
+ // Must preserve a0-a3, and s0 (argv). |
+ ASSERT(try_location == IN_JS_ENTRY); |
+ ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
+ && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
+ && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
+ && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
+ |
+ // The frame pointer does not point to a JS frame so we save NULL |
+ // for fp. We expect the code throwing an exception to check fp |
+ // before dereferencing it to restore the context. |
+ li(t0, Operand(StackHandler::ENTRY)); |
+ |
+ // Save the current handler as the next handler. |
+ LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); |
+ lw(t1, MemOperand(t2)); |
+ |
+ addiu(sp, sp, -StackHandlerConstants::kSize); |
+ sw(ra, MemOperand(sp, 12)); |
+ sw(zero_reg, MemOperand(sp, 8)); |
+ sw(t0, MemOperand(sp, 4)); |
+ sw(t1, MemOperand(sp, 0)); |
+ |
+ // Link this handler as the new current one. |
+ sw(sp, MemOperand(t2)); |
+ } |
} |
@@ -812,12 +867,233 @@ |
-// --------------------------------------------------------------------------- |
+// ----------------------------------------------------------------------------- |
// Activation frames |
+void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { |
+ Label extra_push, end; |
+ |
+ andi(scratch, sp, 7); |
+ |
+ // We check for args and receiver size on the stack, all of them word sized. |
+ // We add one for sp, that we also want to store on the stack. |
+ if (((arg_count + 1) % kPointerSizeLog2) == 0) { |
+ Branch(ne, &extra_push, at, Operand(zero_reg)); |
+ } else { // ((arg_count + 1) % 2) == 1 |
+ Branch(eq, &extra_push, at, Operand(zero_reg)); |
+ } |
+ |
+ // Save sp on the stack. |
+ mov(scratch, sp); |
+ Push(scratch); |
+ b(&end); |
+ |
+ // Align before saving sp on the stack. |
+ bind(&extra_push); |
+ mov(scratch, sp); |
+ addiu(sp, sp, -8); |
+ sw(scratch, MemOperand(sp)); |
+ |
+ // The stack is aligned and sp is stored on the top. |
+ bind(&end); |
+} |
+ |
+ |
+void MacroAssembler::ReturnFromAlignedCall() { |
+ lw(sp, MemOperand(sp)); |
+} |
+ |
+ |
+// ----------------------------------------------------------------------------- |
+// JavaScript invokes |
+ |
+void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
+ const ParameterCount& actual, |
+ Handle<Code> code_constant, |
+ Register code_reg, |
+ Label* done, |
+ InvokeFlag flag) { |
+ bool definitely_matches = false; |
+ Label regular_invoke; |
+ |
+ // Check whether the expected and actual arguments count match. If not, |
+ // setup registers according to contract with ArgumentsAdaptorTrampoline: |
+ // a0: actual arguments count |
+ // a1: function (passed through to callee) |
+ // a2: expected arguments count |
+ // a3: callee code entry |
+ |
+ // The code below is made a lot easier because the calling code already sets |
+ // up actual and expected registers according to the contract if values are |
+ // passed in registers. |
+ ASSERT(actual.is_immediate() || actual.reg().is(a0)); |
+ ASSERT(expected.is_immediate() || expected.reg().is(a2)); |
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); |
+ |
+ if (expected.is_immediate()) { |
+ ASSERT(actual.is_immediate()); |
+ if (expected.immediate() == actual.immediate()) { |
+ definitely_matches = true; |
+ } else { |
+ li(a0, Operand(actual.immediate())); |
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
+ if (expected.immediate() == sentinel) { |
+ // Don't worry about adapting arguments for builtins that |
+ // don't want that done. Skip adaption code by making it look |
+ // like we have a match between expected and actual number of |
+ // arguments. |
+ definitely_matches = true; |
+ } else { |
+ li(a2, Operand(expected.immediate())); |
+ } |
+ } |
+ } else if (actual.is_immediate()) { |
+ Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); |
+ li(a0, Operand(actual.immediate())); |
+ } else { |
+ Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); |
+ } |
+ |
+ if (!definitely_matches) { |
+ if (!code_constant.is_null()) { |
+ li(a3, Operand(code_constant)); |
+ addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); |
+ } |
+ |
+ ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); |
+ if (flag == CALL_FUNCTION) { |
+ CallBuiltin(adaptor); |
+ b(done); |
+ nop(); |
+ } else { |
+ JumpToBuiltin(adaptor); |
+ } |
+ bind(®ular_invoke); |
+ } |
+} |
+ |
+void MacroAssembler::InvokeCode(Register code, |
+ const ParameterCount& expected, |
+ const ParameterCount& actual, |
+ InvokeFlag flag) { |
+ Label done; |
+ |
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); |
+ if (flag == CALL_FUNCTION) { |
+ Call(code); |
+ } else { |
+ ASSERT(flag == JUMP_FUNCTION); |
+ Jump(code); |
+ } |
+ // Continue here if InvokePrologue does handle the invocation due to |
+ // mismatched parameter counts. |
+ bind(&done); |
+} |
+ |
+ |
+void MacroAssembler::InvokeCode(Handle<Code> code, |
+ const ParameterCount& expected, |
+ const ParameterCount& actual, |
+ RelocInfo::Mode rmode, |
+ InvokeFlag flag) { |
+ Label done; |
+ |
+ InvokePrologue(expected, actual, code, no_reg, &done, flag); |
+ if (flag == CALL_FUNCTION) { |
+ Call(code, rmode); |
+ } else { |
+ Jump(code, rmode); |
+ } |
+ // Continue here if InvokePrologue does handle the invocation due to |
+ // mismatched parameter counts. |
+ bind(&done); |
+} |
+ |
+ |
+void MacroAssembler::InvokeFunction(Register function, |
+ const ParameterCount& actual, |
+ InvokeFlag flag) { |
+ // Contract with called JS functions requires that function is passed in a1. |
+ ASSERT(function.is(a1)); |
+ Register expected_reg = a2; |
+ Register code_reg = a3; |
+ |
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ lw(expected_reg, |
+ FieldMemOperand(code_reg, |
+ SharedFunctionInfo::kFormalParameterCountOffset)); |
+ lw(code_reg, |
+ MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); |
+ addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag); |
+ |
+ ParameterCount expected(expected_reg); |
+ InvokeCode(code_reg, expected, actual, flag); |
+} |
+ |
+ |
+// --------------------------------------------------------------------------- |
+// Support functions. |
+ |
+ void MacroAssembler::GetObjectType(Register function, |
+ Register map, |
+ Register type_reg) { |
+ lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); |
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ } |
+ |
+ |
+ void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { |
+ // Load builtin address. |
+ LoadExternalReference(t9, builtin_entry); |
+ lw(t9, MemOperand(t9)); // Deref address. |
+ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
+ // Call and allocate arguments slots. |
+ jalr(t9); |
+ // Use the branch delay slot to allocated argument slots. |
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
+ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
+ } |
+ |
+ |
+ void MacroAssembler::CallBuiltin(Register target) { |
+ // Target already holds target address. |
+ // Call and allocate arguments slots. |
+ jalr(target); |
+ // Use the branch delay slot to allocated argument slots. |
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
+ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
+ } |
+ |
+ |
+ void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { |
+ // Load builtin address. |
+ LoadExternalReference(t9, builtin_entry); |
+ lw(t9, MemOperand(t9)); // Deref address. |
+ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); |
+ // Call and allocate arguments slots. |
+ jr(t9); |
+ // Use the branch delay slot to allocated argument slots. |
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
+ } |
+ |
+ |
+ void MacroAssembler::JumpToBuiltin(Register target) { |
+ // t9 already holds target address. |
+ // Call and allocate arguments slots. |
+ jr(t9); |
+ // Use the branch delay slot to allocated argument slots. |
+ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
+ } |
+ |
+ |
+// ----------------------------------------------------------------------------- |
+// Runtime calls |
+ |
void MacroAssembler::CallStub(CodeStub* stub, Condition cond, |
Register r1, const Operand& r2) { |
- UNIMPLEMENTED_MIPS(); |
+ ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. |
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); |
} |
@@ -826,13 +1102,38 @@ |
} |
+void MacroAssembler::IllegalOperation(int num_arguments) { |
+ if (num_arguments > 0) { |
+ addiu(sp, sp, num_arguments * kPointerSize); |
+ } |
+ LoadRoot(v0, Heap::kUndefinedValueRootIndex); |
+} |
+ |
+ |
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { |
- UNIMPLEMENTED_MIPS(); |
+ // All parameters are on the stack. v0 has the return value after call. |
+ |
+ // If the expected number of arguments of the runtime function is |
+ // constant, we check that the actual number of arguments match the |
+ // expectation. |
+ if (f->nargs >= 0 && f->nargs != num_arguments) { |
+ IllegalOperation(num_arguments); |
+ return; |
+ } |
+ |
+ // TODO(1236192): Most runtime routines don't need the number of |
+ // arguments passed in because it is constant. At some point we |
+ // should remove this need and make the runtime routine entry code |
+ // smarter. |
+ li(a0, num_arguments); |
+ LoadExternalReference(a1, ExternalReference(f)); |
+ CEntryStub stub(1); |
+ CallStub(&stub); |
} |
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
- UNIMPLEMENTED_MIPS(); |
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
} |
@@ -891,6 +1192,8 @@ |
} |
+// ----------------------------------------------------------------------------- |
+// Debugging |
void MacroAssembler::Assert(Condition cc, const char* msg, |
Register rs, Operand rt) { |
@@ -908,5 +1211,113 @@ |
UNIMPLEMENTED_MIPS(); |
} |
+ |
+void MacroAssembler::EnterFrame(StackFrame::Type type) { |
+ addiu(sp, sp, -5 * kPointerSize); |
+ li(t0, Operand(Smi::FromInt(type))); |
+ li(t1, Operand(CodeObject())); |
+ sw(ra, MemOperand(sp, 4 * kPointerSize)); |
+ sw(fp, MemOperand(sp, 3 * kPointerSize)); |
+ sw(cp, MemOperand(sp, 2 * kPointerSize)); |
+ sw(t0, MemOperand(sp, 1 * kPointerSize)); |
+ sw(t1, MemOperand(sp, 0 * kPointerSize)); |
+ addiu(fp, sp, 3 * kPointerSize); |
+} |
+ |
+ |
+void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
+ mov(sp, fp); |
+ lw(fp, MemOperand(sp, 0 * kPointerSize)); |
+ lw(ra, MemOperand(sp, 1 * kPointerSize)); |
+ addiu(sp, sp, 2 * kPointerSize); |
+} |
+ |
+ |
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, |
+ Register hold_argc, |
+ Register hold_argv, |
+ Register hold_function) { |
+ // Compute the argv pointer and keep it in a callee-saved register. |
+ // a0 is argc. |
+ sll(t0, a0, kPointerSizeLog2); |
+ add(hold_argv, sp, t0); |
+ addi(hold_argv, hold_argv, -kPointerSize); |
+ |
+ // Compute callee's stack pointer before making changes and save it as |
+ // t1 register so that it is restored as sp register on exit, thereby |
+ // popping the args. |
+ // t1 = sp + kPointerSize * #args |
+ add(t1, sp, t0); |
+ |
+ // Align the stack at this point. |
+ AlignStack(0); |
+ |
+ // Save registers. |
+ addiu(sp, sp, -12); |
+ sw(t1, MemOperand(sp, 8)); |
+ sw(ra, MemOperand(sp, 4)); |
+ sw(fp, MemOperand(sp, 0)); |
+ mov(fp, sp); // Setup new frame pointer. |
+ |
+ // Push debug marker. |
+ if (mode == ExitFrame::MODE_DEBUG) { |
+ Push(zero_reg); |
+ } else { |
+ li(t0, Operand(CodeObject())); |
+ Push(t0); |
+ } |
+ |
+ // Save the frame pointer and the context in top. |
+ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); |
+ sw(fp, MemOperand(t0)); |
+ LoadExternalReference(t0, ExternalReference(Top::k_context_address)); |
+ sw(cp, MemOperand(t0)); |
+ |
+ // Setup argc and the builtin function in callee-saved registers. |
+ mov(hold_argc, a0); |
+ mov(hold_function, a1); |
+} |
+ |
+ |
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { |
+ // Clear top frame. |
+ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); |
+ sw(zero_reg, MemOperand(t0)); |
+ |
+ // Restore current context from top and clear it in debug mode. |
+ LoadExternalReference(t0, ExternalReference(Top::k_context_address)); |
+ lw(cp, MemOperand(t0)); |
+#ifdef DEBUG |
+ sw(a3, MemOperand(t0)); |
+#endif |
+ |
+ // Pop the arguments, restore registers, and return. |
+ mov(sp, fp); // Respect ABI stack constraint. |
+ lw(fp, MemOperand(sp, 0)); |
+ lw(ra, MemOperand(sp, 4)); |
+ lw(sp, MemOperand(sp, 8)); |
+ jr(ra); |
+ nop(); // Branch delay slot nop. |
+} |
+ |
+ |
+void MacroAssembler::AlignStack(int offset) { |
+ // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, |
+ // and an offset of 1 aligns to 4 modulo 8 bytes. |
+ int activation_frame_alignment = OS::ActivationFrameAlignment(); |
+ if (activation_frame_alignment != kPointerSize) { |
+ // This code needs to be made more general if this assert doesn't hold. |
+ ASSERT(activation_frame_alignment == 2 * kPointerSize); |
+ if (offset == 0) { |
+ andi(t0, sp, activation_frame_alignment - 1); |
+ Push(zero_reg, eq, t0, zero_reg); |
+ } else { |
+ andi(t0, sp, activation_frame_alignment - 1); |
+ addiu(t0, t0, -4); |
+ Push(zero_reg, eq, t0, zero_reg); |
+ } |
+ } |
+} |
+ |
} } // namespace v8::internal |