| Index: src/compiler/x64/code-generator-x64.cc
|
| diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
|
| index fdf63855d59cac64d5f9083657a8688d769c4347..e6dfaf1e7785051423cf910210c20c894e299400 100644
|
| --- a/src/compiler/x64/code-generator-x64.cc
|
| +++ b/src/compiler/x64/code-generator-x64.cc
|
| @@ -19,130 +19,34 @@ namespace compiler {
|
| #define __ masm()->
|
|
|
|
|
| -// TODO(turbofan): Cleanup these hacks.
|
| -enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
|
| -
|
| -
|
| -struct Immediate64 {
|
| - uint64_t value;
|
| - Handle<Object> handle;
|
| - ExternalReference reference;
|
| - Immediate64Type type;
|
| -};
|
| -
|
| -
|
| -enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
|
| -
|
| -
|
| -struct RegisterOrOperand {
|
| - RegisterOrOperand() : operand(no_reg, 0) {}
|
| - Register reg;
|
| - DoubleRegister double_reg;
|
| - Operand operand;
|
| - RegisterOrOperandType type;
|
| -};
|
| -
|
| -
|
| // Adds X64 specific methods for decoding operands.
|
| class X64OperandConverter : public InstructionOperandConverter {
|
| public:
|
| X64OperandConverter(CodeGenerator* gen, Instruction* instr)
|
| : InstructionOperandConverter(gen, instr) {}
|
|
|
| - RegisterOrOperand InputRegisterOrOperand(int index) {
|
| - return ToRegisterOrOperand(instr_->InputAt(index));
|
| - }
|
| -
|
| Immediate InputImmediate(int index) {
|
| return ToImmediate(instr_->InputAt(index));
|
| }
|
|
|
| - RegisterOrOperand OutputRegisterOrOperand() {
|
| - return ToRegisterOrOperand(instr_->Output());
|
| - }
|
| -
|
| - Immediate64 InputImmediate64(int index) {
|
| - return ToImmediate64(instr_->InputAt(index));
|
| - }
|
| + Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
|
|
|
| - Immediate64 ToImmediate64(InstructionOperand* operand) {
|
| - Constant constant = ToConstant(operand);
|
| - Immediate64 immediate;
|
| - immediate.value = 0xbeefdeaddeefbeed;
|
| - immediate.type = kImm64Value;
|
| - switch (constant.type()) {
|
| - case Constant::kInt32:
|
| - case Constant::kInt64:
|
| - immediate.value = constant.ToInt64();
|
| - return immediate;
|
| - case Constant::kFloat32:
|
| - immediate.type = kImm64Handle;
|
| - immediate.handle =
|
| - isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED);
|
| - return immediate;
|
| - case Constant::kFloat64:
|
| - immediate.type = kImm64Handle;
|
| - immediate.handle =
|
| - isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
|
| - return immediate;
|
| - case Constant::kExternalReference:
|
| - immediate.type = kImm64Reference;
|
| - immediate.reference = constant.ToExternalReference();
|
| - return immediate;
|
| - case Constant::kHeapObject:
|
| - immediate.type = kImm64Handle;
|
| - immediate.handle = constant.ToHeapObject();
|
| - return immediate;
|
| - }
|
| - UNREACHABLE();
|
| - return immediate;
|
| - }
|
| + Operand OutputOperand() { return ToOperand(instr_->Output()); }
|
|
|
| Immediate ToImmediate(InstructionOperand* operand) {
|
| Constant constant = ToConstant(operand);
|
| - switch (constant.type()) {
|
| - case Constant::kInt32:
|
| - return Immediate(constant.ToInt32());
|
| - case Constant::kInt64:
|
| - case Constant::kFloat32:
|
| - case Constant::kFloat64:
|
| - case Constant::kExternalReference:
|
| - case Constant::kHeapObject:
|
| - break;
|
| + if (constant.type() == Constant::kInt32) {
|
| + return Immediate(constant.ToInt32());
|
| }
|
| UNREACHABLE();
|
| return Immediate(-1);
|
| }
|
|
|
| Operand ToOperand(InstructionOperand* op, int extra = 0) {
|
| - RegisterOrOperand result = ToRegisterOrOperand(op, extra);
|
| - DCHECK_EQ(kOperand, result.type);
|
| - return result.operand;
|
| - }
|
| -
|
| - RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
|
| - RegisterOrOperand result;
|
| - if (op->IsRegister()) {
|
| - DCHECK(extra == 0);
|
| - result.type = kRegister;
|
| - result.reg = ToRegister(op);
|
| - return result;
|
| - } else if (op->IsDoubleRegister()) {
|
| - DCHECK(extra == 0);
|
| - DCHECK(extra == 0);
|
| - result.type = kDoubleRegister;
|
| - result.double_reg = ToDoubleRegister(op);
|
| - return result;
|
| - }
|
| -
|
| DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
|
| -
|
| - result.type = kOperand;
|
| // The linkage computes where all spill slots are located.
|
| FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
|
| - result.operand =
|
| - Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
|
| - return result;
|
| + return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
|
| }
|
|
|
| static int NextOffset(int* offset) {
|
| @@ -232,23 +136,51 @@ static bool HasImmediateInput(Instruction* instr, int index) {
|
| }
|
|
|
|
|
| -#define ASSEMBLE_BINOP(asm_instr) \
|
| - do { \
|
| - if (HasImmediateInput(instr, 1)) { \
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0); \
|
| - if (input.type == kRegister) { \
|
| - __ asm_instr(input.reg, i.InputImmediate(1)); \
|
| - } else { \
|
| - __ asm_instr(input.operand, i.InputImmediate(1)); \
|
| - } \
|
| - } else { \
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(1); \
|
| - if (input.type == kRegister) { \
|
| - __ asm_instr(i.InputRegister(0), input.reg); \
|
| - } else { \
|
| - __ asm_instr(i.InputRegister(0), input.operand); \
|
| - } \
|
| - } \
|
| +#define ASSEMBLE_UNOP(asm_instr) \
|
| + do { \
|
| + if (instr->Output()->IsRegister()) { \
|
| + __ asm_instr(i.OutputRegister()); \
|
| + } else { \
|
| + __ asm_instr(i.OutputOperand()); \
|
| + } \
|
| + } while (0)
|
| +
|
| +
|
| +#define ASSEMBLE_BINOP(asm_instr) \
|
| + do { \
|
| + if (HasImmediateInput(instr, 1)) { \
|
| + if (instr->InputAt(0)->IsRegister()) { \
|
| + __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
|
| + } else { \
|
| + __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
|
| + } \
|
| + } else { \
|
| + if (instr->InputAt(1)->IsRegister()) { \
|
| + __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
|
| + } else { \
|
| + __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
|
| + } \
|
| + } \
|
| + } while (0)
|
| +
|
| +
|
| +#define ASSEMBLE_MULT(asm_instr) \
|
| + do { \
|
| + if (HasImmediateInput(instr, 1)) { \
|
| + if (instr->InputAt(0)->IsRegister()) { \
|
| + __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
|
| + i.InputImmediate(1)); \
|
| + } else { \
|
| + __ asm_instr(i.OutputRegister(), i.InputOperand(0), \
|
| + i.InputImmediate(1)); \
|
| + } \
|
| + } else { \
|
| + if (instr->InputAt(1)->IsRegister()) { \
|
| + __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \
|
| + } else { \
|
| + __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \
|
| + } \
|
| + } \
|
| } while (0)
|
|
|
|
|
| @@ -335,38 +267,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| ASSEMBLE_BINOP(testq);
|
| break;
|
| case kX64Imul32:
|
| - if (HasImmediateInput(instr, 1)) {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
|
| - } else {
|
| - __ imull(i.OutputRegister(), input.operand, i.InputImmediate(1));
|
| - }
|
| - } else {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(1);
|
| - if (input.type == kRegister) {
|
| - __ imull(i.OutputRegister(), input.reg);
|
| - } else {
|
| - __ imull(i.OutputRegister(), input.operand);
|
| - }
|
| - }
|
| + ASSEMBLE_MULT(imull);
|
| break;
|
| case kX64Imul:
|
| - if (HasImmediateInput(instr, 1)) {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
|
| - } else {
|
| - __ imulq(i.OutputRegister(), input.operand, i.InputImmediate(1));
|
| - }
|
| - } else {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(1);
|
| - if (input.type == kRegister) {
|
| - __ imulq(i.OutputRegister(), input.reg);
|
| - } else {
|
| - __ imulq(i.OutputRegister(), input.operand);
|
| - }
|
| - }
|
| + ASSEMBLE_MULT(imulq);
|
| break;
|
| case kX64Idiv32:
|
| __ cdq();
|
| @@ -384,42 +288,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| __ xorq(rdx, rdx);
|
| __ divq(i.InputRegister(1));
|
| break;
|
| - case kX64Not: {
|
| - RegisterOrOperand output = i.OutputRegisterOrOperand();
|
| - if (output.type == kRegister) {
|
| - __ notq(output.reg);
|
| - } else {
|
| - __ notq(output.operand);
|
| - }
|
| + case kX64Not:
|
| + ASSEMBLE_UNOP(notq);
|
| break;
|
| - }
|
| - case kX64Not32: {
|
| - RegisterOrOperand output = i.OutputRegisterOrOperand();
|
| - if (output.type == kRegister) {
|
| - __ notl(output.reg);
|
| - } else {
|
| - __ notl(output.operand);
|
| - }
|
| + case kX64Not32:
|
| + ASSEMBLE_UNOP(notl);
|
| break;
|
| - }
|
| - case kX64Neg: {
|
| - RegisterOrOperand output = i.OutputRegisterOrOperand();
|
| - if (output.type == kRegister) {
|
| - __ negq(output.reg);
|
| - } else {
|
| - __ negq(output.operand);
|
| - }
|
| + case kX64Neg:
|
| + ASSEMBLE_UNOP(negq);
|
| break;
|
| - }
|
| - case kX64Neg32: {
|
| - RegisterOrOperand output = i.OutputRegisterOrOperand();
|
| - if (output.type == kRegister) {
|
| - __ negl(output.reg);
|
| - } else {
|
| - __ negl(output.operand);
|
| - }
|
| + case kX64Neg32:
|
| + ASSEMBLE_UNOP(negl);
|
| break;
|
| - }
|
| case kX64Or32:
|
| ASSEMBLE_BINOP(orl);
|
| break;
|
| @@ -456,15 +336,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| case kX64Ror:
|
| ASSEMBLE_SHIFT(rorq, 6);
|
| break;
|
| - case kSSEFloat64Cmp: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(1);
|
| - if (input.type == kDoubleRegister) {
|
| - __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
|
| + case kSSEFloat64Cmp:
|
| + if (instr->InputAt(1)->IsDoubleRegister()) {
|
| + __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
| } else {
|
| - __ ucomisd(i.InputDoubleRegister(0), input.operand);
|
| + __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
|
| }
|
| break;
|
| - }
|
| case kSSEFloat64Add:
|
| __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
|
| break;
|
| @@ -508,56 +386,47 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| __ addq(rsp, Immediate(kDoubleSize));
|
| break;
|
| }
|
| - case kSSEFloat64Sqrt: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kDoubleRegister) {
|
| - __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
|
| + case kSSEFloat64Sqrt:
|
| + if (instr->InputAt(0)->IsDoubleRegister()) {
|
| + __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
| } else {
|
| - __ sqrtsd(i.OutputDoubleRegister(), input.operand);
|
| + __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
|
| }
|
| break;
|
| - }
|
| case kSSECvtss2sd:
|
| __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
| break;
|
| case kSSECvtsd2ss:
|
| __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
| break;
|
| - case kSSEFloat64ToInt32: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kDoubleRegister) {
|
| - __ cvttsd2si(i.OutputRegister(), input.double_reg);
|
| + case kSSEFloat64ToInt32:
|
| + if (instr->InputAt(0)->IsDoubleRegister()) {
|
| + __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
|
| } else {
|
| - __ cvttsd2si(i.OutputRegister(), input.operand);
|
| + __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
|
| }
|
| break;
|
| - }
|
| - case kSSEFloat64ToUint32: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kDoubleRegister) {
|
| - __ cvttsd2siq(i.OutputRegister(), input.double_reg);
|
| + case kSSEFloat64ToUint32:
|
| + if (instr->InputAt(0)->IsDoubleRegister()) {
|
| + __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
|
| } else {
|
| - __ cvttsd2siq(i.OutputRegister(), input.operand);
|
| + __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
|
| }
|
| __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits.
|
| // TODO(turbofan): generated code should not look at the upper 32 bits
|
| // of the result, but those bits could escape to the outside world.
|
| break;
|
| - }
|
| - case kSSEInt32ToFloat64: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
|
| + case kSSEInt32ToFloat64:
|
| + if (instr->InputAt(0)->IsRegister()) {
|
| + __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
|
| } else {
|
| - __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
|
| + __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
|
| }
|
| break;
|
| - }
|
| - case kSSEUint32ToFloat64: {
|
| + case kSSEUint32ToFloat64:
|
| // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
|
| __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
|
| break;
|
| - }
|
| case kX64Movsxbl:
|
| __ movsxbl(i.OutputRegister(), i.MemoryOperand());
|
| break;
|
| @@ -593,11 +462,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| case kX64Movl:
|
| if (instr->HasOutput()) {
|
| if (instr->addressing_mode() == kMode_None) {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ movl(i.OutputRegister(), input.reg);
|
| + if (instr->InputAt(0)->IsRegister()) {
|
| + __ movl(i.OutputRegister(), i.InputRegister(0));
|
| } else {
|
| - __ movl(i.OutputRegister(), input.operand);
|
| + __ movl(i.OutputRegister(), i.InputOperand(0));
|
| }
|
| } else {
|
| __ movl(i.OutputRegister(), i.MemoryOperand());
|
| @@ -613,11 +481,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| }
|
| break;
|
| case kX64Movsxlq: {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ movsxlq(i.OutputRegister(), input.reg);
|
| + if (instr->InputAt(0)->IsRegister()) {
|
| + __ movsxlq(i.OutputRegister(), i.InputRegister(0));
|
| } else {
|
| - __ movsxlq(i.OutputRegister(), input.operand);
|
| + __ movsxlq(i.OutputRegister(), i.InputOperand(0));
|
| }
|
| break;
|
| }
|
| @@ -656,11 +523,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
|
| if (HasImmediateInput(instr, 0)) {
|
| __ pushq(i.InputImmediate(0));
|
| } else {
|
| - RegisterOrOperand input = i.InputRegisterOrOperand(0);
|
| - if (input.type == kRegister) {
|
| - __ pushq(input.reg);
|
| + if (instr->InputAt(0)->IsRegister()) {
|
| + __ pushq(i.InputRegister(0));
|
| } else {
|
| - __ pushq(input.operand);
|
| + __ pushq(i.InputOperand(0));
|
| }
|
| }
|
| break;
|
| @@ -968,16 +834,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
| if (destination->IsRegister() || destination->IsStackSlot()) {
|
| Register dst = destination->IsRegister() ? g.ToRegister(destination)
|
| : kScratchRegister;
|
| - Immediate64 imm = g.ToImmediate64(constant_source);
|
| - switch (imm.type) {
|
| - case kImm64Value:
|
| - __ Set(dst, imm.value);
|
| + switch (src.type()) {
|
| + case Constant::kInt32:
|
| + // TODO(dcarney): don't need scratch in this case.
|
| + __ movq(dst, Immediate(src.ToInt32()));
|
| + break;
|
| + case Constant::kInt64:
|
| + __ Set(dst, src.ToInt64());
|
| + break;
|
| + case Constant::kFloat32:
|
| + __ Move(dst,
|
| + isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
|
| + break;
|
| + case Constant::kFloat64:
|
| + __ Move(dst,
|
| + isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
|
| break;
|
| - case kImm64Reference:
|
| - __ Move(dst, imm.reference);
|
| + case Constant::kExternalReference:
|
| + __ Move(dst, src.ToExternalReference());
|
| break;
|
| - case kImm64Handle:
|
| - __ Move(dst, imm.handle);
|
| + case Constant::kHeapObject:
|
| + __ Move(dst, src.ToHeapObject());
|
| break;
|
| }
|
| if (destination->IsStackSlot()) {
|
|
|