| Index: src/compiler/x64/instruction-selector-x64.cc
|
| diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
|
| index a948257acf8da64566cb9020f74c60a8cb1636d9..5f3fc06865d35d4d0d7b816039de5b36d91c2f33 100644
|
| --- a/src/compiler/x64/instruction-selector-x64.cc
|
| +++ b/src/compiler/x64/instruction-selector-x64.cc
|
| @@ -735,7 +735,7 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
|
|
|
| void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
|
| X64OperandGenerator g(this);
|
| - Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
| + Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
| }
|
|
|
|
|
| @@ -808,7 +808,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
|
|
|
| void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
|
| X64OperandGenerator g(this);
|
| - Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
| + Emit(kSSEFloat64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
| }
|
|
|
|
|
| @@ -835,17 +835,63 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
|
| }
|
|
|
|
|
| -void InstructionSelector::VisitFloat64Add(Node* node) {
|
| - X64OperandGenerator g(this);
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Add, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| +namespace {
|
| +
|
| +void VisitFloatBinop(InstructionSelector* selector, Node* node,
|
| + ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
|
| + X64OperandGenerator g(selector);
|
| + InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
|
| + InstructionOperand operand1 = g.Use(node->InputAt(1));
|
| + if (selector->IsSupported(AVX)) {
|
| + selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
|
| } else {
|
| - Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| + selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
|
| }
|
| }
|
|
|
| +} // namespace
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Add(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Sub(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Mul(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Div(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Div, kSSEFloat32Div);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Max(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Min(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32Sqrt(Node* node) {
|
| + X64OperandGenerator g(this);
|
| + Emit(kSSEFloat32Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat64Add(Node* node) {
|
| + VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
|
| +}
|
| +
|
|
|
| void InstructionSelector::VisitFloat64Sub(Node* node) {
|
| X64OperandGenerator g(this);
|
| @@ -862,37 +908,17 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
|
| }
|
| }
|
| }
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - } else {
|
| - Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - }
|
| + VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
|
| }
|
|
|
|
|
| void InstructionSelector::VisitFloat64Mul(Node* node) {
|
| - X64OperandGenerator g(this);
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - } else {
|
| - Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - }
|
| + VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
|
| }
|
|
|
|
|
| void InstructionSelector::VisitFloat64Div(Node* node) {
|
| - X64OperandGenerator g(this);
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Div, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - } else {
|
| - Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - }
|
| + VisitFloatBinop(this, node, kAVXFloat64Div, kSSEFloat64Div);
|
| }
|
|
|
|
|
| @@ -906,26 +932,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
|
|
|
|
|
| void InstructionSelector::VisitFloat64Max(Node* node) {
|
| - X64OperandGenerator g(this);
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Max, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - } else {
|
| - Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - }
|
| + VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
|
| }
|
|
|
|
|
| void InstructionSelector::VisitFloat64Min(Node* node) {
|
| - X64OperandGenerator g(this);
|
| - if (IsSupported(AVX)) {
|
| - Emit(kAVXFloat64Min, g.DefineAsRegister(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - } else {
|
| - Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
|
| - g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
|
| - }
|
| + VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
|
| }
|
|
|
|
|
| @@ -1107,12 +1119,25 @@ void VisitCompareZero(InstructionSelector* selector, Node* node,
|
| }
|
|
|
|
|
| +// Shared routine for multiple float32 compare operations (inputs commuted).
|
| +void VisitFloat32Compare(InstructionSelector* selector, Node* node,
|
| + FlagsContinuation* cont) {
|
| + Node* const left = node->InputAt(0);
|
| + Node* const right = node->InputAt(1);
|
| + InstructionCode const opcode =
|
| + selector->IsSupported(AVX) ? kAVXFloat32Cmp : kSSEFloat32Cmp;
|
| + VisitCompare(selector, opcode, right, left, cont, false);
|
| +}
|
| +
|
| +
|
| // Shared routine for multiple float64 compare operations (inputs commuted).
|
| void VisitFloat64Compare(InstructionSelector* selector, Node* node,
|
| FlagsContinuation* cont) {
|
| Node* const left = node->InputAt(0);
|
| Node* const right = node->InputAt(1);
|
| - VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
|
| + InstructionCode const opcode =
|
| + selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
|
| + VisitCompare(selector, opcode, right, left, cont, false);
|
| }
|
|
|
| } // namespace
|
| @@ -1168,6 +1193,15 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
|
| case IrOpcode::kUint64LessThan:
|
| cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
|
| return VisitWord64Compare(this, value, &cont);
|
| + case IrOpcode::kFloat32Equal:
|
| + cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
|
| + return VisitFloat32Compare(this, value, &cont);
|
| + case IrOpcode::kFloat32LessThan:
|
| + cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
|
| + return VisitFloat32Compare(this, value, &cont);
|
| + case IrOpcode::kFloat32LessThanOrEqual:
|
| + cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
|
| + return VisitFloat32Compare(this, value, &cont);
|
| case IrOpcode::kFloat64Equal:
|
| cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
|
| return VisitFloat64Compare(this, value, &cont);
|
| @@ -1385,6 +1419,24 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
|
| }
|
|
|
|
|
| +void InstructionSelector::VisitFloat32Equal(Node* node) {
|
| + FlagsContinuation cont(kUnorderedEqual, node);
|
| + VisitFloat32Compare(this, node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32LessThan(Node* node) {
|
| + FlagsContinuation cont(kUnsignedGreaterThan, node);
|
| + VisitFloat32Compare(this, node, &cont);
|
| +}
|
| +
|
| +
|
| +void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
|
| + FlagsContinuation cont(kUnsignedGreaterThanOrEqual, node);
|
| + VisitFloat32Compare(this, node, &cont);
|
| +}
|
| +
|
| +
|
| void InstructionSelector::VisitFloat64Equal(Node* node) {
|
| FlagsContinuation cont(kUnorderedEqual, node);
|
| VisitFloat64Compare(this, node, &cont);
|
| @@ -1444,6 +1496,8 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
|
| MachineOperatorBuilder::Flags
|
| InstructionSelector::SupportedMachineOperatorFlags() {
|
| MachineOperatorBuilder::Flags flags =
|
| + MachineOperatorBuilder::kFloat32Max |
|
| + MachineOperatorBuilder::kFloat32Min |
|
| MachineOperatorBuilder::kFloat64Max |
|
| MachineOperatorBuilder::kFloat64Min |
|
| MachineOperatorBuilder::kWord32ShiftIsSafe;
|
|
|