OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
10 | 10 |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
84 g.UseRegister(node->InputAt(0))); | 84 g.UseRegister(node->InputAt(0))); |
85 } | 85 } |
86 | 86 |
87 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 87 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
88 ArmOperandGenerator g(selector); | 88 ArmOperandGenerator g(selector); |
89 selector->Emit(opcode, g.DefineAsRegister(node), | 89 selector->Emit(opcode, g.DefineAsRegister(node), |
90 g.UseRegister(node->InputAt(0)), | 90 g.UseRegister(node->InputAt(0)), |
91 g.UseRegister(node->InputAt(1))); | 91 g.UseRegister(node->InputAt(1))); |
92 } | 92 } |
93 | 93 |
| 94 void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode, |
| 95 Node* node) { |
| 96 ArmOperandGenerator g(selector); |
| 97 // Swap inputs to save an instruction in the CodeGenerator for High ops. |
| 98 if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight || |
| 99 opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight || |
| 100 opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight || |
| 101 opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight || |
| 102 opcode == kArmS8x16TransposeRight) { |
| 103 Node* in0 = node->InputAt(0); |
| 104 Node* in1 = node->InputAt(1); |
| 105 node->ReplaceInput(0, in1); |
| 106 node->ReplaceInput(1, in0); |
| 107 } |
| 108 // Use DefineSameAsFirst for binary ops that clobber their inputs, e.g. the |
| 109 // NEON vzip, vuzp, and vtrn instructions. |
| 110 selector->Emit(opcode, g.DefineSameAsFirst(node), |
| 111 g.UseRegister(node->InputAt(0)), |
| 112 g.UseRegister(node->InputAt(1))); |
| 113 } |
| 114 |
94 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 115 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
95 ArmOperandGenerator g(selector); | 116 ArmOperandGenerator g(selector); |
96 // Use DefineSameAsFirst for ternary ops that clobber their first input, | 117 // Use DefineSameAsFirst for ternary ops that clobber their first input, |
97 // e.g. the NEON vbsl instruction. | 118 // e.g. the NEON vbsl instruction. |
98 selector->Emit( | 119 selector->Emit( |
99 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), | 120 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), |
100 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); | 121 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); |
101 } | 122 } |
102 | 123 |
103 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 124 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
(...skipping 2282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2386 V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \ | 2407 V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \ |
2387 V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \ | 2408 V(I32x4UConvertI16x8Low, kArmI32x4UConvertI16x8Low) \ |
2388 V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \ | 2409 V(I32x4UConvertI16x8High, kArmI32x4UConvertI16x8High) \ |
2389 V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low) \ | 2410 V(I16x8SConvertI8x16Low, kArmI16x8SConvertI8x16Low) \ |
2390 V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \ | 2411 V(I16x8SConvertI8x16High, kArmI16x8SConvertI8x16High) \ |
2391 V(I16x8Neg, kArmI16x8Neg) \ | 2412 V(I16x8Neg, kArmI16x8Neg) \ |
2392 V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low) \ | 2413 V(I16x8UConvertI8x16Low, kArmI16x8UConvertI8x16Low) \ |
2393 V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \ | 2414 V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \ |
2394 V(I8x16Neg, kArmI8x16Neg) \ | 2415 V(I8x16Neg, kArmI8x16Neg) \ |
2395 V(S128Not, kArmS128Not) \ | 2416 V(S128Not, kArmS128Not) \ |
| 2417 V(S32x2Reverse, kArmS32x2Reverse) \ |
| 2418 V(S16x4Reverse, kArmS16x4Reverse) \ |
| 2419 V(S16x2Reverse, kArmS16x2Reverse) \ |
| 2420 V(S8x8Reverse, kArmS8x8Reverse) \ |
| 2421 V(S8x4Reverse, kArmS8x4Reverse) \ |
| 2422 V(S8x2Reverse, kArmS8x2Reverse) \ |
2396 V(S1x4Not, kArmS128Not) \ | 2423 V(S1x4Not, kArmS128Not) \ |
2397 V(S1x4AnyTrue, kArmS1x4AnyTrue) \ | 2424 V(S1x4AnyTrue, kArmS1x4AnyTrue) \ |
2398 V(S1x4AllTrue, kArmS1x4AllTrue) \ | 2425 V(S1x4AllTrue, kArmS1x4AllTrue) \ |
2399 V(S1x8Not, kArmS128Not) \ | 2426 V(S1x8Not, kArmS128Not) \ |
2400 V(S1x8AnyTrue, kArmS1x8AnyTrue) \ | 2427 V(S1x8AnyTrue, kArmS1x8AnyTrue) \ |
2401 V(S1x8AllTrue, kArmS1x8AllTrue) \ | 2428 V(S1x8AllTrue, kArmS1x8AllTrue) \ |
2402 V(S1x16Not, kArmS128Not) \ | 2429 V(S1x16Not, kArmS128Not) \ |
2403 V(S1x16AnyTrue, kArmS1x16AnyTrue) \ | 2430 V(S1x16AnyTrue, kArmS1x16AnyTrue) \ |
2404 V(S1x16AllTrue, kArmS1x16AllTrue) | 2431 V(S1x16AllTrue, kArmS1x16AllTrue) |
2405 | 2432 |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2483 V(S1x4And, kArmS128And) \ | 2510 V(S1x4And, kArmS128And) \ |
2484 V(S1x4Or, kArmS128Or) \ | 2511 V(S1x4Or, kArmS128Or) \ |
2485 V(S1x4Xor, kArmS128Xor) \ | 2512 V(S1x4Xor, kArmS128Xor) \ |
2486 V(S1x8And, kArmS128And) \ | 2513 V(S1x8And, kArmS128And) \ |
2487 V(S1x8Or, kArmS128Or) \ | 2514 V(S1x8Or, kArmS128Or) \ |
2488 V(S1x8Xor, kArmS128Xor) \ | 2515 V(S1x8Xor, kArmS128Xor) \ |
2489 V(S1x16And, kArmS128And) \ | 2516 V(S1x16And, kArmS128And) \ |
2490 V(S1x16Or, kArmS128Or) \ | 2517 V(S1x16Or, kArmS128Or) \ |
2491 V(S1x16Xor, kArmS128Xor) | 2518 V(S1x16Xor, kArmS128Xor) |
2492 | 2519 |
| 2520 #define SIMD_SHUFFLE_OP_LIST(V) \ |
| 2521 V(S32x4ZipLeft) \ |
| 2522 V(S32x4ZipRight) \ |
| 2523 V(S32x4UnzipLeft) \ |
| 2524 V(S32x4UnzipRight) \ |
| 2525 V(S32x4TransposeLeft) \ |
| 2526 V(S32x4TransposeRight) \ |
| 2527 V(S16x8ZipLeft) \ |
| 2528 V(S16x8ZipRight) \ |
| 2529 V(S16x8UnzipLeft) \ |
| 2530 V(S16x8UnzipRight) \ |
| 2531 V(S16x8TransposeLeft) \ |
| 2532 V(S16x8TransposeRight) \ |
| 2533 V(S8x16ZipLeft) \ |
| 2534 V(S8x16ZipRight) \ |
| 2535 V(S8x16UnzipLeft) \ |
| 2536 V(S8x16UnzipRight) \ |
| 2537 V(S8x16TransposeLeft) \ |
| 2538 V(S8x16TransposeRight) |
| 2539 |
2493 #define SIMD_VISIT_SPLAT(Type) \ | 2540 #define SIMD_VISIT_SPLAT(Type) \ |
2494 void InstructionSelector::Visit##Type##Splat(Node* node) { \ | 2541 void InstructionSelector::Visit##Type##Splat(Node* node) { \ |
2495 VisitRR(this, kArm##Type##Splat, node); \ | 2542 VisitRR(this, kArm##Type##Splat, node); \ |
2496 } | 2543 } |
2497 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) | 2544 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) |
2498 #undef SIMD_VISIT_SPLAT | 2545 #undef SIMD_VISIT_SPLAT |
2499 | 2546 |
2500 #define SIMD_VISIT_EXTRACT_LANE(Type) \ | 2547 #define SIMD_VISIT_EXTRACT_LANE(Type) \ |
2501 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ | 2548 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ |
2502 VisitRRI(this, kArm##Type##ExtractLane, node); \ | 2549 VisitRRI(this, kArm##Type##ExtractLane, node); \ |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2540 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) | 2587 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) |
2541 #undef SIMD_VISIT_BINOP | 2588 #undef SIMD_VISIT_BINOP |
2542 | 2589 |
2543 #define SIMD_VISIT_SELECT_OP(format) \ | 2590 #define SIMD_VISIT_SELECT_OP(format) \ |
2544 void InstructionSelector::VisitS##format##Select(Node* node) { \ | 2591 void InstructionSelector::VisitS##format##Select(Node* node) { \ |
2545 VisitRRRR(this, kArmS128Select, node); \ | 2592 VisitRRRR(this, kArmS128Select, node); \ |
2546 } | 2593 } |
2547 SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP) | 2594 SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP) |
2548 #undef SIMD_VISIT_SELECT_OP | 2595 #undef SIMD_VISIT_SELECT_OP |
2549 | 2596 |
| 2597 #define SIMD_VISIT_SHUFFLE_OP(Name) \ |
| 2598 void InstructionSelector::Visit##Name(Node* node) { \ |
| 2599 VisitRRRShuffle(this, kArm##Name, node); \ |
| 2600 } |
| 2601 SIMD_SHUFFLE_OP_LIST(SIMD_VISIT_SHUFFLE_OP) |
| 2602 #undef SIMD_VISIT_SHUFFLE_OP |
| 2603 |
| 2604 void InstructionSelector::VisitS8x16Concat(Node* node) { |
| 2605 ArmOperandGenerator g(this); |
| 2606 int32_t imm = OpParameter<int32_t>(node); |
| 2607 Emit(kArmS8x16Concat, g.DefineAsRegister(node), |
| 2608 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), |
| 2609 g.UseImmediate(imm)); |
| 2610 } |
| 2611 |
2550 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { | 2612 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { |
2551 UNREACHABLE(); | 2613 UNREACHABLE(); |
2552 } | 2614 } |
2553 | 2615 |
2554 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { | 2616 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { |
2555 UNREACHABLE(); | 2617 UNREACHABLE(); |
2556 } | 2618 } |
2557 | 2619 |
2558 // static | 2620 // static |
2559 MachineOperatorBuilder::Flags | 2621 MachineOperatorBuilder::Flags |
(...skipping 28 matching lines...) Expand all Loading... |
2588 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); | 2650 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); |
2589 req_aligned[0] = MachineType::Float32(); | 2651 req_aligned[0] = MachineType::Float32(); |
2590 req_aligned[1] = MachineType::Float64(); | 2652 req_aligned[1] = MachineType::Float64(); |
2591 return MachineOperatorBuilder::AlignmentRequirements:: | 2653 return MachineOperatorBuilder::AlignmentRequirements:: |
2592 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); | 2654 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); |
2593 } | 2655 } |
2594 | 2656 |
2595 } // namespace compiler | 2657 } // namespace compiler |
2596 } // namespace internal | 2658 } // namespace internal |
2597 } // namespace v8 | 2659 } // namespace v8 |
OLD | NEW |