Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Unified Diff: src/compiler/x87/code-generator-x87.cc

Issue 1179763004: X87: enable the X87 turbofan support. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/x87/OWNERS ('k') | src/compiler/x87/instruction-codes-x87.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/x87/code-generator-x87.cc
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/x87/code-generator-x87.cc
similarity index 65%
copy from src/compiler/ia32/code-generator-ia32.cc
copy to src/compiler/x87/code-generator-x87.cc
index 53b4ebe7cecff7819022b4335a017525559b6121..a1ce22fc012422283d88db751c4cd5040ccc7b99 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -7,9 +7,9 @@
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/macro-assembler-ia32.h"
#include "src/scopes.h"
+#include "src/x87/assembler-x87.h"
+#include "src/x87/macro-assembler-x87.h"
namespace v8 {
namespace internal {
@@ -18,13 +18,10 @@ namespace compiler {
#define __ masm()->
-#define kScratchDoubleReg xmm0
-
-
-// Adds IA-32 specific methods for decoding operands.
-class IA32OperandConverter : public InstructionOperandConverter {
+// Adds X87 specific methods for decoding operands.
+class X87OperandConverter : public InstructionOperandConverter {
public:
- IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+ X87OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
Operand InputOperand(size_t index, int extra = 0) {
@@ -43,7 +40,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
DCHECK(extra == 0);
- return Operand(ToDoubleRegister(op));
+ UNIMPLEMENTED();
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
// The linkage computes where all spill slots are located.
@@ -187,32 +184,39 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
class OutOfLineLoadFloat final : public OutOfLineCode {
public:
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+ OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() final { __ pcmpeqd(result_, result_); }
+ void Generate() final {
+ DCHECK(result_.code() == 0);
+ USE(result_);
+ __ fstp(0);
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0x7fffffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ }
private:
- XMMRegister const result_;
+ X87Register const result_;
};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- XMMRegister input)
+ X87Register input)
: OutOfLineCode(gen), result_(result), input_(input) {}
void Generate() final {
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(MemOperand(esp, 0), input_);
- __ SlowTruncateToI(result_, esp, 0);
- __ add(esp, Immediate(kDoubleSize));
+ UNIMPLEMENTED();
+ USE(result_);
+ USE(input_);
}
private:
Register const result_;
- XMMRegister const input_;
+ X87Register const input_;
};
} // namespace
@@ -222,6 +226,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
do { \
auto result = i.OutputDoubleRegister(); \
auto offset = i.InputRegister(0); \
+ DCHECK(result.code() == 0); \
if (instr->InputAt(1)->IsRegister()) { \
__ cmp(offset, i.InputRegister(1)); \
} else { \
@@ -229,7 +234,8 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
} \
OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
__ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
+ __ fstp(0); \
+ __ asm_instr(i.MemoryOperand(2)); \
__ bind(ool->exit()); \
} while (false)
@@ -250,18 +256,19 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
} while (false)
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ Label done; \
+ DCHECK(i.InputDoubleRegister(2).code() == 0); \
+ __ j(above_equal, &done, Label::kNear); \
+ __ asm_instr(i.MemoryOperand(3)); \
+ __ bind(&done); \
} while (false)
@@ -303,7 +310,7 @@ void CodeGenerator::AssembleDeconstructActivationRecord() {
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
- IA32OperandConverter i(this, instr);
+ X87OperandConverter i(this, instr);
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallCodeObject: {
@@ -316,6 +323,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
}
RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
break;
}
case kArchTailCallCodeObject: {
@@ -339,6 +359,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
break;
}
case kArchTailCallJSFunction: {
@@ -373,405 +406,621 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchRet:
AssembleReturn();
break;
- case kArchStackPointer:
- __ mov(i.OutputRegister(), esp);
- break;
case kArchFramePointer:
__ mov(i.OutputRegister(), ebp);
break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), esp);
+ break;
case kArchTruncateDoubleToI: {
- auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
- auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
- __ cvttsd2si(result, Operand(input));
- __ cmp(result, 1);
- __ j(overflow, ool->entry());
- __ bind(ool->exit());
+ USE(input);
+ DCHECK(input.code() == 0);
+ auto result_reg = i.OutputRegister();
+ __ TruncateX87TOSToI(result_reg);
break;
}
- case kIA32Add:
+ case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
} else {
__ add(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32And:
+ case kX87And:
if (HasImmediateInput(instr, 1)) {
__ and_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ and_(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Cmp:
+ case kX87Cmp:
if (HasImmediateInput(instr, 1)) {
__ cmp(i.InputOperand(0), i.InputImmediate(1));
} else {
__ cmp(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Test:
+ case kX87Test:
if (HasImmediateInput(instr, 1)) {
__ test(i.InputOperand(0), i.InputImmediate(1));
} else {
__ test(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Imul:
+ case kX87Imul:
if (HasImmediateInput(instr, 1)) {
__ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
} else {
__ imul(i.OutputRegister(), i.InputOperand(1));
}
break;
- case kIA32ImulHigh:
+ case kX87ImulHigh:
__ imul(i.InputRegister(1));
break;
- case kIA32UmulHigh:
+ case kX87UmulHigh:
__ mul(i.InputRegister(1));
break;
- case kIA32Idiv:
+ case kX87Idiv:
__ cdq();
__ idiv(i.InputOperand(1));
break;
- case kIA32Udiv:
+ case kX87Udiv:
__ Move(edx, Immediate(0));
__ div(i.InputOperand(1));
break;
- case kIA32Not:
+ case kX87Not:
__ not_(i.OutputOperand());
break;
- case kIA32Neg:
+ case kX87Neg:
__ neg(i.OutputOperand());
break;
- case kIA32Or:
+ case kX87Or:
if (HasImmediateInput(instr, 1)) {
__ or_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ or_(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Xor:
+ case kX87Xor:
if (HasImmediateInput(instr, 1)) {
__ xor_(i.InputOperand(0), i.InputImmediate(1));
} else {
__ xor_(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Sub:
+ case kX87Sub:
if (HasImmediateInput(instr, 1)) {
__ sub(i.InputOperand(0), i.InputImmediate(1));
} else {
__ sub(i.InputRegister(0), i.InputOperand(1));
}
break;
- case kIA32Shl:
+ case kX87Shl:
if (HasImmediateInput(instr, 1)) {
__ shl(i.OutputOperand(), i.InputInt5(1));
} else {
__ shl_cl(i.OutputOperand());
}
break;
- case kIA32Shr:
+ case kX87Shr:
if (HasImmediateInput(instr, 1)) {
__ shr(i.OutputOperand(), i.InputInt5(1));
} else {
__ shr_cl(i.OutputOperand());
}
break;
- case kIA32Sar:
+ case kX87Sar:
if (HasImmediateInput(instr, 1)) {
__ sar(i.OutputOperand(), i.InputInt5(1));
} else {
__ sar_cl(i.OutputOperand());
}
break;
- case kIA32Ror:
+ case kX87Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
} else {
__ ror_cl(i.OutputOperand());
}
break;
- case kIA32Lzcnt:
+ case kX87Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Cmp:
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Add:
- __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sub:
- __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Mul:
- __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Div:
- __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
- case kSSEFloat32Max:
- __ maxss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Min:
- __ minss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sqrt:
- __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
- break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ case kX87LoadFloat64Constant: {
+ InstructionOperand* source = instr->InputAt(0);
+ InstructionOperand* destination = instr->Output();
+ DCHECK(source->IsConstant());
+ X87OperandConverter g(this, NULL);
+ Constant src_constant = g.ToConstant(source);
+
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (destination->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ UNREACHABLE();
+ }
break;
}
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ case kX87Float32Cmp: {
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
break;
}
- case kSSEFloat64Cmp:
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
- case kSSEFloat64Max:
- __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Min:
- __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mod: {
- // TODO(dcarney): alignment is wrong.
- __ sub(esp, Immediate(kDoubleSize));
- // Move values to st(0) and st(1).
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
- __ fld_d(Operand(esp, 0));
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- __ fld_d(Operand(esp, 0));
- // Loop while fprem isn't done.
- Label mod_loop;
- __ bind(&mod_loop);
- // This instructions traps on all kinds inputs, but we are assuming the
- // floating point control word is set to ignore them all.
- __ fprem();
- // The following 2 instruction implicitly use eax.
- __ fnstsw_ax();
- __ sahf();
- __ j(parity_even, &mod_loop);
- // Move output to stack and clean up.
- __ fstp(1);
- __ fstp_d(Operand(esp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
+ case kX87Float32Add: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
}
- case kSSEFloat64Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ case kX87Float32Sub: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fsubp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
}
- case kSSEFloat64Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ case kX87Float32Mul: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fmulp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
}
- case kSSEFloat64Sqrt:
- __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
- break;
- case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- RoundingMode const mode =
- static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ case kX87Float32Div: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fdivp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
}
- case kSSEFloat32ToFloat64:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
- break;
- case kSSEFloat64ToFloat32:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kX87Float32Max: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = below;
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ fadd(1);
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
break;
- case kSSEFloat64ToInt32:
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
- break;
- case kSSEFloat64ToUint32: {
- __ Move(kScratchDoubleReg, -2147483648.0);
- __ addsd(kScratchDoubleReg, i.InputOperand(0));
- __ cvttsd2si(i.OutputRegister(), kScratchDoubleReg);
- __ add(i.OutputRegister(), Immediate(0x80000000));
+ }
+ case kX87Float32Min: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = above;
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+ // load it to left.
+ __ push(eax);
+ __ fld(1);
+ __ fld(1);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, kPointerSize));
+ __ pop(eax);
+ __ xor_(MemOperand(esp, 0), eax);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ pop(eax); // restore esp
+ __ pop(eax); // restore esp
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
break;
}
- case kSSEInt32ToFloat64:
- __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kX87Float32Sqrt: {
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kFloatSize));
break;
- case kSSEUint32ToFloat64:
- __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ case kX87Float32Abs: {
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kFloatSize));
break;
- case kSSEFloat64ExtractLowWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
- __ mov(i.OutputRegister(), i.InputOperand(0));
- } else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
- }
+ }
+ case kX87Float64Add: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ faddp();
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
- case kSSEFloat64ExtractHighWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
- __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
- } else {
- __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
- }
+ }
+ case kX87Float64Sub: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fsub_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
- case kSSEFloat64InsertLowWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ }
+ case kX87Float64Mul: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fmul_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
- case kSSEFloat64InsertHighWord32:
- __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ }
+ case kX87Float64Div: {
+ __ X87SetFPUCW(0x027F);
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fdiv_d(MemOperand(esp, 0));
+ // Clear stack.
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ // Restore the default value of control word.
+ __ X87SetFPUCW(0x037F);
break;
- case kSSEFloat64LoadLowWord32:
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ case kX87Float64Mod: {
+ FrameScope frame_scope(&masm_, StackFrame::MANUAL);
+ __ mov(eax, esp);
+ __ PrepareCallCFunction(4, eax);
+ __ fstp(0);
+ __ fld_d(MemOperand(eax, 0));
+ __ fstp_d(Operand(esp, 1 * kDoubleSize));
+ __ fld_d(MemOperand(eax, kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+ 4);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
- case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ }
+ case kX87Float64Max: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = below;
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+
+ __ fadd(1);
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
- case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64Min: {
+ Label check_nan_left, check_zero, return_left, return_right;
+ Condition condition = above;
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ __ j(equal, &check_zero, Label::kNear); // left == right.
+ __ j(condition, &return_left, Label::kNear);
+ __ jmp(&return_right, Label::kNear);
+
+ __ bind(&check_zero);
+ __ fld(0);
+ __ fldz();
+ __ FCmp();
+ __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ // At this point, both left and right are either 0 or -0.
+ // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+ // load it to left.
+ __ push(eax);
+ __ fld(1);
+ __ fld(1);
+ __ sub(esp, Immediate(2 * kPointerSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, kPointerSize));
+ __ pop(eax);
+ __ xor_(MemOperand(esp, 0), eax);
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ pop(eax); // restore esp
+ __ pop(eax); // restore esp
+ __ jmp(&return_left, Label::kNear);
+
+ __ bind(&check_nan_left);
+ __ fld(0);
+ __ fld(0);
+ __ FCmp(); // NaN check.
+ __ j(parity_even, &return_left, Label::kNear); // left == NaN.
+
+ __ bind(&return_right);
+ __ fxch();
+
+ __ bind(&return_left);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
- case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64Abs: {
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fabs();
+ __ lea(esp, Operand(esp, kDoubleSize));
break;
}
- case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kX87Int32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ __ fstp(0);
+ if (input->IsRegister()) {
+ Register input_reg = i.InputRegister(0);
+ __ push(input_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(input_reg);
+ } else {
+ __ fild_s(i.InputOperand(0));
+ }
break;
}
- case kAVXFloat32Max: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmaxss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float32ToFloat64: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
break;
}
- case kAVXFloat32Min: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vminss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Uint32ToFloat64: {
+ __ fstp(0);
+ __ LoadUint32NoSSE2(i.InputRegister(0));
break;
}
- case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64ToInt32: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
break;
}
- case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64ToFloat32: {
+ InstructionOperand* input = instr->InputAt(0);
+ if (input->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ }
break;
}
- case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64ToUint32: {
+ __ push_imm32(-2147483648);
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_d(i.InputOperand(0));
+ }
+ __ fild_s(Operand(esp, 0));
+ __ fadd(1);
+ __ fstp(0);
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ __ add(esp, Immediate(kInt32Size));
+ __ add(i.OutputRegister(), Immediate(0x80000000));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
break;
}
- case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kX87Float64ExtractHighWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ }
break;
}
- case kAVXFloat64Max: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64ExtractLowWord32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ mov(i.OutputRegister(), MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ } else {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ }
break;
}
- case kAVXFloat64Min: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kX87Float64InsertHighWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
break;
}
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ case kX87Float64InsertLowWord32: {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(MemOperand(esp, 0));
+ __ mov(MemOperand(esp, 0), i.InputRegister(1));
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ case kX87Float64Sqrt: {
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kDoubleSize));
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ case kX87Float64Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ if (mode == MiscField::encode(kRoundDown)) {
+ __ X87SetRC(0x0400);
+ } else {
+ __ X87SetRC(0x0c00);
+ }
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ __ fstp(0);
+ __ fld_d(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ case kX87Float64Cmp: {
+ __ fld_d(MemOperand(esp, kDoubleSize));
+ __ fld_d(MemOperand(esp, 0));
+ __ FCmp();
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
- case kIA32Movsxbl:
+ case kX87Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32Movzxbl:
+ case kX87Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32Movb: {
+ case kX87Movb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -781,13 +1030,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kIA32Movsxwl:
+ case kX87Movsxwl:
__ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32Movzxwl:
+ case kX87Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
- case kIA32Movw: {
+ case kX87Movw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -797,7 +1046,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kIA32Movl:
+ case kX87Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -810,25 +1059,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
}
break;
- case kIA32Movsd:
+ case kX87Movsd: {
if (instr->HasOutput()) {
- __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ __ fstp(0);
+ __ fld_d(i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movsd(operand, i.InputDoubleRegister(index));
+ __ fst_d(operand);
}
break;
- case kIA32Movss:
+ }
+ case kX87Movss: {
if (instr->HasOutput()) {
- __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ X87Register output = i.OutputDoubleRegister();
+ USE(output);
+ DCHECK(output.code() == 0);
+ __ fstp(0);
+ __ fld_s(i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movss(operand, i.InputDoubleRegister(index));
+ __ fst_s(operand);
}
break;
- case kIA32Lea: {
+ }
+ case kX87Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
@@ -863,14 +1122,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
- case kIA32Push:
+ case kX87Push:
if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
} else {
__ push(i.InputOperand(0));
}
break;
- case kIA32StoreWriteBarrier: {
+ case kX87PushFloat32:
+ __ lea(esp, Operand(esp, -kFloatSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_s(i.InputOperand(0));
+ __ fstp_s(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_s(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kX87PushFloat64:
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ fld_d(i.InputOperand(0));
+ __ fstp_d(MemOperand(esp, 0));
+ } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ fst_d(MemOperand(esp, 0));
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case kX87StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
SaveFPRegsMode mode =
@@ -904,10 +1185,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -919,12 +1200,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_INTEGER(mov);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
break;
- case kIA32StackCheck: {
+ case kX87StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
@@ -936,7 +1217,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Assembles a branch after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- IA32OperandConverter i(this, instr);
+ X87OperandConverter i(this, instr);
Label::Distance flabel_distance =
branch->fallthru ? Label::kNear : Label::kFar;
Label* tlabel = branch->true_label;
@@ -998,7 +1279,7 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
- IA32OperandConverter i(this, instr);
+ X87OperandConverter i(this, instr);
Label done;
// Materialize a full 32-bit 1 or 0 value. The result register is always the
@@ -1074,7 +1355,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- IA32OperandConverter i(this, instr);
+ X87OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
__ cmp(input, Immediate(i.InputInt32(index + 0)));
@@ -1085,7 +1366,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
- IA32OperandConverter i(this, instr);
+ X87OperandConverter i(this, instr);
Register input = i.InputRegister(0);
size_t const case_count = instr->InputCount() - 2;
Label** cases = zone()->NewArray<Label*>(case_count);
@@ -1107,7 +1388,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
}
-// The calling convention for JSFunctions on IA32 passes arguments on the
+// The calling convention for JSFunctions on X87 passes arguments on the
// stack and the JSFunction and context in EDI and ESI, respectively, thus
// the steps of the call look as follows:
@@ -1162,7 +1443,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
// Runtime function calls are accomplished by doing a stub call to the
-// CEntryStub (a real code object). On IA32 passes arguments on the
+// CEntryStub (a real code object). On X87 passes arguments on the
// stack, the number of arguments in EAX, the address of the runtime function
// in EBX, and the context in ESI.
@@ -1285,6 +1566,10 @@ void CodeGenerator::AssemblePrologue() {
// Allocate the stack slots used by this frame.
__ sub(esp, Immediate(stack_slots * kPointerSize));
}
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
}
@@ -1328,7 +1613,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ X87OperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1386,8 +1671,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
if (destination->IsDoubleRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ __ sub(esp, Immediate(kInt32Size));
+ __ mov(MemOperand(esp, 0), Immediate(src));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ add(esp, Immediate(kInt32Size));
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
@@ -1399,8 +1688,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsDoubleRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ mov(MemOperand(esp, 0), Immediate(lower));
+ __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst0 = g.ToOperand(destination);
@@ -1410,25 +1704,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsDoubleRegister()) {
- XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(dst, src);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- Operand dst = g.ToOperand(destination);
- __ movsd(dst, src);
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fst_s(dst);
+ break;
+ case kRepFloat64:
+ __ fst_d(dst);
+ break;
+ default:
+ UNREACHABLE();
}
} else if (source->IsDoubleStackSlot()) {
DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
Operand src = g.ToOperand(source);
+ auto allocated = AllocatedOperand::cast(*source);
if (destination->IsDoubleRegister()) {
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ // always only push one value into the x87 stack.
+ __ fstp(0);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(src);
+ break;
+ case kRepFloat64:
+ __ fld_d(src);
+ break;
+ default:
+ UNREACHABLE();
+ }
} else {
Operand dst = g.ToOperand(destination);
- __ movsd(kScratchDoubleReg, src);
- __ movsd(dst, kScratchDoubleReg);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(src);
+ __ fstp_s(dst);
+ break;
+ case kRepFloat64:
+ __ fld_d(src);
+ __ fstp_d(dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
} else {
UNREACHABLE();
@@ -1438,7 +1757,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ X87OperandConverter g(this, NULL);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
@@ -1458,31 +1777,41 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ pop(dst);
__ pop(src);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // XMM register-register swap.
- XMMRegister src = g.ToDoubleRegister(source);
- XMMRegister dst = g.ToDoubleRegister(destination);
- __ movaps(kScratchDoubleReg, src);
- __ movaps(src, dst);
- __ movaps(dst, kScratchDoubleReg);
+ UNREACHABLE();
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
- // XMM register-memory swap.
- XMMRegister reg = g.ToDoubleRegister(source);
- Operand other = g.ToOperand(destination);
- __ movsd(kScratchDoubleReg, other);
- __ movsd(other, reg);
- __ movaps(reg, kScratchDoubleReg);
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case kRepFloat64:
+ __ fld_d(g.ToOperand(destination));
+ __ fxch();
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
- // Double-width memory-to-memory.
- Operand src0 = g.ToOperand(source);
- Operand src1 = g.HighOperand(source);
- Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.HighOperand(destination);
- __ movsd(kScratchDoubleReg, dst0); // Save destination in scratch register.
- __ push(src0); // Then use stack to copy source to destination.
- __ pop(dst0);
- __ push(src1);
- __ pop(dst1);
- __ movsd(src0, kScratchDoubleReg);
+ auto allocated = AllocatedOperand::cast(*source);
+ switch (allocated.machine_type()) {
+ case kRepFloat32:
+ __ fld_s(g.ToOperand(source));
+ __ fld_s(g.ToOperand(destination));
+ __ fstp_s(g.ToOperand(source));
+ __ fstp_s(g.ToOperand(destination));
+ break;
+ case kRepFloat64:
+ __ fld_d(g.ToOperand(source));
+ __ fld_d(g.ToOperand(destination));
+ __ fstp_d(g.ToOperand(source));
+ __ fstp_d(g.ToOperand(destination));
+ break;
+ default:
+ UNREACHABLE();
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
« no previous file with comments | « src/compiler/x87/OWNERS ('k') | src/compiler/x87/instruction-codes-x87.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698