Index: src/compiler/ia32/code-generator-ia32.cc |
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..38f7d4b0c100f51c1e464786cb57f35df22d18a6 |
--- /dev/null |
+++ b/src/compiler/ia32/code-generator-ia32.cc |
@@ -0,0 +1,929 @@ |
+// Copyright 2013 the V8 project authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "src/compiler/code-generator.h" |
+ |
+#include "src/compiler/code-generator-impl.h" |
+#include "src/compiler/gap-resolver.h" |
+#include "src/compiler/node-matchers.h" |
+#include "src/compiler/node-properties-inl.h" |
+#include "src/ia32/assembler-ia32.h" |
+#include "src/ia32/macro-assembler-ia32.h" |
+#include "src/scopes.h" |
+ |
+namespace v8 { |
+namespace internal { |
+namespace compiler { |
+ |
+#define __ masm()-> |
+ |
+ |
+// Adds IA-32 specific methods for decoding operands. |
+class IA32OperandConverter : public InstructionOperandConverter { |
+ public: |
+ IA32OperandConverter(CodeGenerator* gen, Instruction* instr) |
+ : InstructionOperandConverter(gen, instr) {} |
+ |
+ Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } |
+ |
+ Immediate InputImmediate(int index) { |
+ return ToImmediate(instr_->InputAt(index)); |
+ } |
+ |
+ Operand OutputOperand() { return ToOperand(instr_->Output()); } |
+ |
+ Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); } |
+ |
+ Operand ToOperand(InstructionOperand* op, int extra = 0) { |
+ if (op->IsRegister()) { |
+ ASSERT(extra == 0); |
+ return Operand(ToRegister(op)); |
+ } else if (op->IsDoubleRegister()) { |
+ ASSERT(extra == 0); |
+ return Operand(ToDoubleRegister(op)); |
+ } |
+ ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
+ // The linkage computes where all spill slots are located. |
+ FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); |
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset()); |
+ } |
+ |
+ Operand HighOperand(InstructionOperand* op) { |
+ ASSERT(op->IsDoubleStackSlot()); |
+ return ToOperand(op, kPointerSize); |
+ } |
+ |
+ Immediate ToImmediate(InstructionOperand* operand) { |
+ Constant constant = ToConstant(operand); |
+ switch (constant.type()) { |
+ case Constant::kInt32: |
+ return Immediate(constant.ToInt32()); |
+ case Constant::kFloat64: |
+ return Immediate( |
+ isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); |
+ case Constant::kExternalReference: |
+ return Immediate(constant.ToExternalReference()); |
+ case Constant::kHeapObject: |
+ return Immediate(constant.ToHeapObject()); |
+ case Constant::kInt64: |
+ break; |
+ } |
+ UNREACHABLE(); |
+ return Immediate(-1); |
+ } |
+ |
+ Operand MemoryOperand(int* first_input) { |
+ const int offset = *first_input; |
+ switch (AddressingModeField::decode(instr_->opcode())) { |
+ case kMode_MR1I: |
+ *first_input += 2; |
+ return Operand(InputRegister(offset + 0), InputRegister(offset + 1), |
+ times_1, |
+ 0); // TODO(dcarney): K != 0 |
+ case kMode_MRI: |
+ *first_input += 2; |
+ return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0), |
+ InputImmediate(offset + 1)); |
+ case kMode_MI: |
+ *first_input += 1; |
+ return Operand(InputImmediate(offset + 0)); |
+ default: |
+ UNREACHABLE(); |
+ return Operand(no_reg); |
+ } |
+ } |
+ |
+ Operand MemoryOperand() { |
+ int first_input = 0; |
+ return MemoryOperand(&first_input); |
+ } |
+}; |
+ |
+ |
+static bool HasImmediateInput(Instruction* instr, int index) { |
+ return instr->InputAt(index)->IsImmediate(); |
+} |
+ |
+ |
+// Assembles an instruction after register allocation, producing machine code. |
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
+ IA32OperandConverter i(this, instr); |
+ |
+ switch (ArchOpcodeField::decode(instr->opcode())) { |
+ case kArchJmp: |
+ __ jmp(code()->GetLabel(i.InputBlock(0))); |
+ break; |
+ case kArchNop: |
+ // don't emit code for nops. |
+ break; |
+ case kArchRet: |
+ AssembleReturn(); |
+ break; |
+ case kArchDeoptimize: { |
+ int deoptimization_id = MiscField::decode(instr->opcode()); |
+ BuildTranslation(instr, deoptimization_id); |
+ |
+ Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
+ isolate(), deoptimization_id, Deoptimizer::LAZY); |
+ __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
+ break; |
+ } |
+ case kIA32Add: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ add(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ add(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32And: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ and_(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ and_(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Cmp: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ cmp(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ cmp(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Test: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ test(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ test(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Imul: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1)); |
+ } else { |
+ __ imul(i.OutputRegister(), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Idiv: |
+ __ cdq(); |
+ __ idiv(i.InputOperand(1)); |
+ break; |
+ case kIA32Udiv: |
+ __ xor_(edx, edx); |
+ __ div(i.InputOperand(1)); |
+ break; |
+ case kIA32Not: |
+ __ not_(i.OutputOperand()); |
+ break; |
+ case kIA32Neg: |
+ __ neg(i.OutputOperand()); |
+ break; |
+ case kIA32Or: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ or_(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ or_(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Xor: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ xor_(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ xor_(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Sub: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ sub(i.InputOperand(0), i.InputImmediate(1)); |
+ } else { |
+ __ sub(i.InputRegister(0), i.InputOperand(1)); |
+ } |
+ break; |
+ case kIA32Shl: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ shl(i.OutputRegister(), i.InputInt5(1)); |
+ } else { |
+ __ shl_cl(i.OutputRegister()); |
+ } |
+ break; |
+ case kIA32Shr: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ shr(i.OutputRegister(), i.InputInt5(1)); |
+ } else { |
+ __ shr_cl(i.OutputRegister()); |
+ } |
+ break; |
+ case kIA32Sar: |
+ if (HasImmediateInput(instr, 1)) { |
+ __ sar(i.OutputRegister(), i.InputInt5(1)); |
+ } else { |
+ __ sar_cl(i.OutputRegister()); |
+ } |
+ break; |
+ case kIA32Push: |
+ if (HasImmediateInput(instr, 0)) { |
+ __ push(i.InputImmediate(0)); |
+ } else { |
+ __ push(i.InputOperand(0)); |
+ } |
+ break; |
+ case kIA32CallCodeObject: { |
+ if (HasImmediateInput(instr, 0)) { |
+ Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); |
+ __ call(code, RelocInfo::CODE_TARGET); |
+ } else { |
+ Register reg = i.InputRegister(0); |
+ int entry = Code::kHeaderSize - kHeapObjectTag; |
+ __ call(Operand(reg, entry)); |
+ } |
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
+ Safepoint::kNoLazyDeopt); |
+ |
+ bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); |
+ if (lazy_deopt) { |
+ RecordLazyDeoptimizationEntry(instr); |
+ } |
+ AddNopForSmiCodeInlining(); |
+ break; |
+ } |
+ case kIA32CallAddress: |
+ if (HasImmediateInput(instr, 0)) { |
+ // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY. |
+ __ call(reinterpret_cast<byte*>(i.InputInt32(0)), |
+ RelocInfo::RUNTIME_ENTRY); |
+ } else { |
+ __ call(i.InputRegister(0)); |
+ } |
+ break; |
+ case kPopStack: { |
+ int words = MiscField::decode(instr->opcode()); |
+ __ add(esp, Immediate(kPointerSize * words)); |
+ break; |
+ } |
+ case kIA32CallJSFunction: { |
+ Register func = i.InputRegister(0); |
+ |
+ // TODO(jarin) The load of the context should be separated from the call. |
+ __ mov(esi, FieldOperand(func, JSFunction::kContextOffset)); |
+ __ call(FieldOperand(func, JSFunction::kCodeEntryOffset)); |
+ |
+ RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
+ Safepoint::kNoLazyDeopt); |
+ RecordLazyDeoptimizationEntry(instr); |
+ break; |
+ } |
+ case kSSEFloat64Cmp: |
+ __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); |
+ break; |
+ case kSSEFloat64Add: |
+ __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
+ break; |
+ case kSSEFloat64Sub: |
+ __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
+ break; |
+ case kSSEFloat64Mul: |
+ __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
+ break; |
+ case kSSEFloat64Div: |
+ __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
+ break; |
+ case kSSEFloat64Mod: { |
+ // TODO(dcarney): alignment is wrong. |
+ __ sub(esp, Immediate(kDoubleSize)); |
+ // Move values to st(0) and st(1). |
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(1)); |
+ __ fld_d(Operand(esp, 0)); |
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0)); |
+ __ fld_d(Operand(esp, 0)); |
+ // Loop while fprem isn't done. |
+ Label mod_loop; |
+ __ bind(&mod_loop); |
+ // This instructions traps on all kinds inputs, but we are assuming the |
+ // floating point control word is set to ignore them all. |
+ __ fprem(); |
+ // The following 2 instruction implicitly use eax. |
+ __ fnstsw_ax(); |
+ __ sahf(); |
+ __ j(parity_even, &mod_loop); |
+ // Move output to stack and clean up. |
+ __ fstp(1); |
+ __ fstp_d(Operand(esp, 0)); |
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); |
+ __ add(esp, Immediate(kDoubleSize)); |
+ break; |
+ } |
+ case kSSEFloat64ToInt32: |
+ __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); |
+ break; |
+ case kSSEInt32ToFloat64: |
+ __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); |
+ break; |
+ case kSSELoad: |
+ __ movsd(i.OutputDoubleRegister(), i.MemoryOperand()); |
+ break; |
+ case kSSEStore: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ movsd(operand, i.InputDoubleRegister(index)); |
+ break; |
+ } |
+ case kIA32LoadWord8: |
+ __ movzx_b(i.OutputRegister(), i.MemoryOperand()); |
+ break; |
+ case kIA32StoreWord8: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov_b(operand, i.InputRegister(index)); |
+ break; |
+ } |
+ case kIA32StoreWord8I: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov_b(operand, i.InputInt8(index)); |
+ break; |
+ } |
+ case kIA32LoadWord16: |
+ __ movzx_w(i.OutputRegister(), i.MemoryOperand()); |
+ break; |
+ case kIA32StoreWord16: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov_w(operand, i.InputRegister(index)); |
+ break; |
+ } |
+ case kIA32StoreWord16I: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov_w(operand, i.InputInt16(index)); |
+ break; |
+ } |
+ case kIA32LoadWord32: |
+ __ mov(i.OutputRegister(), i.MemoryOperand()); |
+ break; |
+ case kIA32StoreWord32: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov(operand, i.InputRegister(index)); |
+ break; |
+ } |
+ case kIA32StoreWord32I: { |
+ int index = 0; |
+ Operand operand = i.MemoryOperand(&index); |
+ __ mov(operand, i.InputImmediate(index)); |
+ break; |
+ } |
+ case kIA32StoreWriteBarrier: { |
+ Register object = i.InputRegister(0); |
+ Register index = i.InputRegister(1); |
+ Register value = i.InputRegister(2); |
+ __ mov(Operand(object, index, times_1, 0), value); |
+ __ lea(index, Operand(object, index, times_1, 0)); |
+ SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters() |
+ ? kSaveFPRegs |
+ : kDontSaveFPRegs; |
+ __ RecordWrite(object, index, value, mode); |
+ break; |
+ } |
+ } |
+} |
+ |
+ |
+// Assembles branches after an instruction. |
+void CodeGenerator::AssembleArchBranch(Instruction* instr, |
+ FlagsCondition condition) { |
+ IA32OperandConverter i(this, instr); |
+ Label done; |
+ |
+ // Emit a branch. The true and false targets are always the last two inputs |
+ // to the instruction. |
+ BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2); |
+ BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1); |
+ bool fallthru = IsNextInAssemblyOrder(fblock); |
+ Label* tlabel = code()->GetLabel(tblock); |
+ Label* flabel = fallthru ? &done : code()->GetLabel(fblock); |
+ Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar; |
+ switch (condition) { |
+ case kUnorderedEqual: |
+ __ j(parity_even, flabel, flabel_distance); |
+ // Fall through. |
+ case kEqual: |
+ __ j(equal, tlabel); |
+ break; |
+ case kUnorderedNotEqual: |
+ __ j(parity_even, tlabel); |
+ // Fall through. |
+ case kNotEqual: |
+ __ j(not_equal, tlabel); |
+ break; |
+ case kSignedLessThan: |
+ __ j(less, tlabel); |
+ break; |
+ case kSignedGreaterThanOrEqual: |
+ __ j(greater_equal, tlabel); |
+ break; |
+ case kSignedLessThanOrEqual: |
+ __ j(less_equal, tlabel); |
+ break; |
+ case kSignedGreaterThan: |
+ __ j(greater, tlabel); |
+ break; |
+ case kUnorderedLessThan: |
+ __ j(parity_even, flabel, flabel_distance); |
+ // Fall through. |
+ case kUnsignedLessThan: |
+ __ j(below, tlabel); |
+ break; |
+ case kUnorderedGreaterThanOrEqual: |
+ __ j(parity_even, tlabel); |
+ // Fall through. |
+ case kUnsignedGreaterThanOrEqual: |
+ __ j(above_equal, tlabel); |
+ break; |
+ case kUnorderedLessThanOrEqual: |
+ __ j(parity_even, flabel, flabel_distance); |
+ // Fall through. |
+ case kUnsignedLessThanOrEqual: |
+ __ j(below_equal, tlabel); |
+ break; |
+ case kUnorderedGreaterThan: |
+ __ j(parity_even, tlabel); |
+ // Fall through. |
+ case kUnsignedGreaterThan: |
+ __ j(above, tlabel); |
+ break; |
+ } |
+ if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel. |
+ __ bind(&done); |
+} |
+ |
+ |
+// Assembles boolean materializations after an instruction. |
+void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
+ FlagsCondition condition) { |
+ IA32OperandConverter i(this, instr); |
+ Label done; |
+ |
+ // Materialize a full 32-bit 1 or 0 value. |
+ Label check; |
+ Register reg = i.OutputRegister(); |
+ Condition cc = no_condition; |
+ switch (condition) { |
+ case kUnorderedEqual: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(0)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kEqual: |
+ cc = equal; |
+ break; |
+ case kUnorderedNotEqual: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(1)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kNotEqual: |
+ cc = not_equal; |
+ break; |
+ case kSignedLessThan: |
+ cc = less; |
+ break; |
+ case kSignedGreaterThanOrEqual: |
+ cc = greater_equal; |
+ break; |
+ case kSignedLessThanOrEqual: |
+ cc = less_equal; |
+ break; |
+ case kSignedGreaterThan: |
+ cc = greater; |
+ break; |
+ case kUnorderedLessThan: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(0)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kUnsignedLessThan: |
+ cc = below; |
+ break; |
+ case kUnorderedGreaterThanOrEqual: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(1)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kUnsignedGreaterThanOrEqual: |
+ cc = above_equal; |
+ break; |
+ case kUnorderedLessThanOrEqual: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(0)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kUnsignedLessThanOrEqual: |
+ cc = below_equal; |
+ break; |
+ case kUnorderedGreaterThan: |
+ __ j(parity_odd, &check, Label::kNear); |
+ __ mov(reg, Immediate(1)); |
+ __ jmp(&done, Label::kNear); |
+ // Fall through. |
+ case kUnsignedGreaterThan: |
+ cc = above; |
+ break; |
+ } |
+ __ bind(&check); |
+ if (reg.is_byte_register()) { |
+ // setcc for byte registers (al, bl, cl, dl). |
+ __ setcc(cc, reg); |
+ __ movzx_b(reg, reg); |
+ } else { |
+ // Emit a branch to set a register to either 1 or 0. |
+ Label set; |
+ __ j(cc, &set, Label::kNear); |
+ __ mov(reg, Immediate(0)); |
+ __ jmp(&done, Label::kNear); |
+ __ bind(&set); |
+ __ mov(reg, Immediate(1)); |
+ } |
+ __ bind(&done); |
+} |
+ |
+ |
+// The calling convention for JSFunctions on IA32 passes arguments on the |
+// stack and the JSFunction and context in EDI and ESI, respectively, thus |
+// the steps of the call look as follows: |
+ |
+// --{ before the call instruction }-------------------------------------------- |
+// | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ push arguments and setup ESI, EDI }-------------------------------------- |
+// | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+// [edi = JSFunction, esi = context] |
+ |
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------ |
+// | RET | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+ |
+// =={ prologue of called function }============================================ |
+// --{ push ebp }--------------------------------------------------------------- |
+// | FP | RET | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ mov ebp, esp }----------------------------------------------------------- |
+// | FP | RET | args + receiver | caller frame | |
+// ^ ebp,esp |
+ |
+// --{ push esi }--------------------------------------------------------------- |
+// | CTX | FP | RET | args + receiver | caller frame | |
+// ^esp ^ ebp |
+ |
+// --{ push edi }--------------------------------------------------------------- |
+// | FNC | CTX | FP | RET | args + receiver | caller frame | |
+// ^esp ^ ebp |
+ |
+// --{ subi esp, #N }----------------------------------------------------------- |
+// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame | |
+// ^esp ^ ebp |
+ |
+// =={ body of called function }================================================ |
+ |
+// =={ epilogue of called function }============================================ |
+// --{ mov esp, ebp }----------------------------------------------------------- |
+// | FP | RET | args + receiver | caller frame | |
+// ^ esp,ebp |
+ |
+// --{ pop ebp }----------------------------------------------------------- |
+// | | RET | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ ret #A+1 }----------------------------------------------------------- |
+// | | caller frame | |
+// ^ esp ^ ebp |
+ |
+ |
+// Runtime function calls are accomplished by doing a stub call to the |
+// CEntryStub (a real code object). On IA32 passes arguments on the |
+// stack, the number of arguments in EAX, the address of the runtime function |
+// in EBX, and the context in ESI. |
+ |
+// --{ before the call instruction }-------------------------------------------- |
+// | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ push arguments and setup EAX, EBX, and ESI }----------------------------- |
+// | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+// [eax = #args, ebx = runtime function, esi = context] |
+ |
+// --{ call #CEntryStub }------------------------------------------------------- |
+// | RET | args + receiver | caller frame | |
+// ^ esp ^ ebp |
+ |
+// =={ body of runtime function }=============================================== |
+ |
+// --{ runtime returns }-------------------------------------------------------- |
+// | caller frame | |
+// ^ esp ^ ebp |
+ |
+// Other custom linkages (e.g. for calling directly into and out of C++) may |
+// need to save callee-saved registers on the stack, which is done in the |
+// function prologue of generated code. |
+ |
+// --{ before the call instruction }-------------------------------------------- |
+// | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ set up arguments in registers on stack }--------------------------------- |
+// | args | caller frame | |
+// ^ esp ^ ebp |
+// [r0 = arg0, r1 = arg1, ...] |
+ |
+// --{ call code }-------------------------------------------------------------- |
+// | RET | args | caller frame | |
+// ^ esp ^ ebp |
+ |
+// =={ prologue of called function }============================================ |
+// --{ push ebp }--------------------------------------------------------------- |
+// | FP | RET | args | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ mov ebp, esp }----------------------------------------------------------- |
+// | FP | RET | args | caller frame | |
+// ^ ebp,esp |
+ |
+// --{ save registers }--------------------------------------------------------- |
+// | regs | FP | RET | args | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ subi esp, #N }----------------------------------------------------------- |
+// | callee frame | regs | FP | RET | args | caller frame | |
+// ^esp ^ ebp |
+ |
+// =={ body of called function }================================================ |
+ |
+// =={ epilogue of called function }============================================ |
+// --{ restore registers }------------------------------------------------------ |
+// | regs | FP | RET | args | caller frame | |
+// ^ esp ^ ebp |
+ |
+// --{ mov esp, ebp }----------------------------------------------------------- |
+// | FP | RET | args | caller frame | |
+// ^ esp,ebp |
+ |
+// --{ pop ebp }---------------------------------------------------------------- |
+// | RET | args | caller frame | |
+// ^ esp ^ ebp |
+ |
+ |
+void CodeGenerator::AssemblePrologue() { |
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
+ Frame* frame = code_->frame(); |
+ int stack_slots = frame->GetSpillSlotCount(); |
+ if (descriptor->kind() == CallDescriptor::kCallAddress) { |
+ // Assemble a prologue similar the to cdecl calling convention. |
+ __ push(ebp); |
+ __ mov(ebp, esp); |
+ const RegList saves = descriptor->CalleeSavedRegisters(); |
+ if (saves != 0) { // Save callee-saved registers. |
+ int register_save_area_size = 0; |
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) { |
+ if (!((1 << i) & saves)) continue; |
+ __ push(Register::from_code(i)); |
+ register_save_area_size += kPointerSize; |
+ } |
+ frame->SetRegisterSaveAreaSize(register_save_area_size); |
+ } |
+ } else if (descriptor->IsJSFunctionCall()) { |
+ CompilationInfo* info = linkage()->info(); |
+ __ Prologue(info->IsCodePreAgingActive()); |
+ frame->SetRegisterSaveAreaSize( |
+ StandardFrameConstants::kFixedFrameSizeFromFp); |
+ |
+ // Sloppy mode functions and builtins need to replace the receiver with the |
+ // global proxy when called as functions (without an explicit receiver |
+ // object). |
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC? |
+ if (info->strict_mode() == SLOPPY && !info->is_native()) { |
+ Label ok; |
+ // +2 for return address and saved frame pointer. |
+ int receiver_slot = info->scope()->num_parameters() + 2; |
+ __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize)); |
+ __ cmp(ecx, isolate()->factory()->undefined_value()); |
+ __ j(not_equal, &ok, Label::kNear); |
+ __ mov(ecx, GlobalObjectOperand()); |
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset)); |
+ __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx); |
+ __ bind(&ok); |
+ } |
+ |
+ } else { |
+ __ StubPrologue(); |
+ frame->SetRegisterSaveAreaSize( |
+ StandardFrameConstants::kFixedFrameSizeFromFp); |
+ } |
+ if (stack_slots > 0) { |
+ __ sub(esp, Immediate(stack_slots * kPointerSize)); |
+ } |
+} |
+ |
+ |
+void CodeGenerator::AssembleReturn() { |
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); |
+ if (descriptor->kind() == CallDescriptor::kCallAddress) { |
+ const RegList saves = descriptor->CalleeSavedRegisters(); |
+ if (frame()->GetRegisterSaveAreaSize() > 0) { |
+ // Remove this frame's spill slots first. |
+ int stack_slots = frame()->GetSpillSlotCount(); |
+ if (stack_slots > 0) { |
+ __ add(esp, Immediate(stack_slots * kPointerSize)); |
+ } |
+ // Restore registers. |
+ if (saves != 0) { |
+ for (int i = 0; i < Register::kNumRegisters; i++) { |
+ if (!((1 << i) & saves)) continue; |
+ __ pop(Register::from_code(i)); |
+ } |
+ } |
+ __ pop(ebp); // Pop caller's frame pointer. |
+ __ ret(0); |
+ } else { |
+ // No saved registers. |
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer. |
+ __ pop(ebp); // Pop caller's frame pointer. |
+ __ ret(0); |
+ } |
+ } else { |
+ __ mov(esp, ebp); // Move stack pointer back to frame pointer. |
+ __ pop(ebp); // Pop caller's frame pointer. |
+ int pop_count = |
+ descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0; |
+ __ ret(pop_count * kPointerSize); |
+ } |
+} |
+ |
+ |
+void CodeGenerator::AssembleMove(InstructionOperand* source, |
+ InstructionOperand* destination) { |
+ IA32OperandConverter g(this, NULL); |
+ // Dispatch on the source and destination operand kinds. Not all |
+ // combinations are possible. |
+ if (source->IsRegister()) { |
+ ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
+ Register src = g.ToRegister(source); |
+ Operand dst = g.ToOperand(destination); |
+ __ mov(dst, src); |
+ } else if (source->IsStackSlot()) { |
+ ASSERT(destination->IsRegister() || destination->IsStackSlot()); |
+ Operand src = g.ToOperand(source); |
+ if (destination->IsRegister()) { |
+ Register dst = g.ToRegister(destination); |
+ __ mov(dst, src); |
+ } else { |
+ Operand dst = g.ToOperand(destination); |
+ __ push(src); |
+ __ pop(dst); |
+ } |
+ } else if (source->IsConstant()) { |
+ Constant src_constant = g.ToConstant(source); |
+ if (src_constant.type() == Constant::kHeapObject) { |
+ Handle<HeapObject> src = src_constant.ToHeapObject(); |
+ if (destination->IsRegister()) { |
+ Register dst = g.ToRegister(destination); |
+ __ LoadHeapObject(dst, src); |
+ } else { |
+ ASSERT(destination->IsStackSlot()); |
+ Operand dst = g.ToOperand(destination); |
+ AllowDeferredHandleDereference embedding_raw_address; |
+ if (isolate()->heap()->InNewSpace(*src)) { |
+ __ PushHeapObject(src); |
+ __ pop(dst); |
+ } else { |
+ __ mov(dst, src); |
+ } |
+ } |
+ } else if (destination->IsRegister()) { |
+ Register dst = g.ToRegister(destination); |
+ __ mov(dst, g.ToImmediate(source)); |
+ } else if (destination->IsStackSlot()) { |
+ Operand dst = g.ToOperand(destination); |
+ __ mov(dst, g.ToImmediate(source)); |
+ } else { |
+ double v = g.ToDouble(source); |
+ uint64_t int_val = BitCast<uint64_t, double>(v); |
+ int32_t lower = static_cast<int32_t>(int_val); |
+ int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); |
+ if (destination->IsDoubleRegister()) { |
+ XMMRegister dst = g.ToDoubleRegister(destination); |
+ __ Move(dst, v); |
+ } else { |
+ ASSERT(destination->IsDoubleStackSlot()); |
+ Operand dst0 = g.ToOperand(destination); |
+ Operand dst1 = g.HighOperand(destination); |
+ __ mov(dst0, Immediate(lower)); |
+ __ mov(dst1, Immediate(upper)); |
+ } |
+ } |
+ } else if (source->IsDoubleRegister()) { |
+ XMMRegister src = g.ToDoubleRegister(source); |
+ if (destination->IsDoubleRegister()) { |
+ XMMRegister dst = g.ToDoubleRegister(destination); |
+ __ movaps(dst, src); |
+ } else { |
+ ASSERT(destination->IsDoubleStackSlot()); |
+ Operand dst = g.ToOperand(destination); |
+ __ movsd(dst, src); |
+ } |
+ } else if (source->IsDoubleStackSlot()) { |
+ ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
+ Operand src = g.ToOperand(source); |
+ if (destination->IsDoubleRegister()) { |
+ XMMRegister dst = g.ToDoubleRegister(destination); |
+ __ movsd(dst, src); |
+ } else { |
+ // We rely on having xmm0 available as a fixed scratch register. |
+ Operand dst = g.ToOperand(destination); |
+ __ movsd(xmm0, src); |
+ __ movsd(dst, xmm0); |
+ } |
+ } else { |
+ UNREACHABLE(); |
+ } |
+} |
+ |
+ |
+void CodeGenerator::AssembleSwap(InstructionOperand* source, |
+ InstructionOperand* destination) { |
+ IA32OperandConverter g(this, NULL); |
+ // Dispatch on the source and destination operand kinds. Not all |
+ // combinations are possible. |
+ if (source->IsRegister() && destination->IsRegister()) { |
+ // Register-register. |
+ Register src = g.ToRegister(source); |
+ Register dst = g.ToRegister(destination); |
+ __ xchg(dst, src); |
+ } else if (source->IsRegister() && destination->IsStackSlot()) { |
+ // Register-memory. |
+ __ xchg(g.ToRegister(source), g.ToOperand(destination)); |
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) { |
+ // Memory-memory. |
+ Operand src = g.ToOperand(source); |
+ Operand dst = g.ToOperand(destination); |
+ __ push(dst); |
+ __ push(src); |
+ __ pop(dst); |
+ __ pop(src); |
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { |
+ // XMM register-register swap. We rely on having xmm0 |
+ // available as a fixed scratch register. |
+ XMMRegister src = g.ToDoubleRegister(source); |
+ XMMRegister dst = g.ToDoubleRegister(destination); |
+ __ movaps(xmm0, src); |
+ __ movaps(src, dst); |
+ __ movaps(dst, xmm0); |
+ } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) { |
+ // XMM register-memory swap. We rely on having xmm0 |
+ // available as a fixed scratch register. |
+ XMMRegister reg = g.ToDoubleRegister(source); |
+ Operand other = g.ToOperand(destination); |
+ __ movsd(xmm0, other); |
+ __ movsd(other, reg); |
+ __ movaps(reg, xmm0); |
+ } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { |
+ // Double-width memory-to-memory. |
+ Operand src0 = g.ToOperand(source); |
+ Operand src1 = g.HighOperand(source); |
+ Operand dst0 = g.ToOperand(destination); |
+ Operand dst1 = g.HighOperand(destination); |
+ __ movsd(xmm0, dst0); // Save destination in xmm0. |
+ __ push(src0); // Then use stack to copy source to destination. |
+ __ pop(dst0); |
+ __ push(src1); |
+ __ pop(dst1); |
+ __ movsd(src0, xmm0); |
+ } else { |
+ // No other combinations are possible. |
+ UNREACHABLE(); |
+ } |
+} |
+ |
+ |
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); } |
+ |
+#undef __ |
+ |
+#ifdef DEBUG |
+ |
+// Checks whether the code between start_pc and end_pc is a no-op. |
+bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc, |
+ int end_pc) { |
+ if (start_pc + 1 != end_pc) { |
+ return false; |
+ } |
+ return *(code->instruction_start() + start_pc) == |
+ v8::internal::Assembler::kNopByte; |
+} |
+ |
+#endif // DEBUG |
+} |
+} |
+} // namespace v8::internal::compiler |