| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
| 6 #include "src/base/bits.h" | 6 #include "src/base/bits.h" |
| 7 #include "src/compiler/instruction-selector-impl.h" | 7 #include "src/compiler/instruction-selector-impl.h" |
| 8 #include "src/compiler/node-matchers.h" | 8 #include "src/compiler/node-matchers.h" |
| 9 #include "src/compiler/node-properties.h" | 9 #include "src/compiler/node-properties.h" |
| 10 | 10 |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 84 g.UseRegister(node->InputAt(0))); | 84 g.UseRegister(node->InputAt(0))); |
| 85 } | 85 } |
| 86 | 86 |
| 87 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 87 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 88 ArmOperandGenerator g(selector); | 88 ArmOperandGenerator g(selector); |
| 89 selector->Emit(opcode, g.DefineAsRegister(node), | 89 selector->Emit(opcode, g.DefineAsRegister(node), |
| 90 g.UseRegister(node->InputAt(0)), | 90 g.UseRegister(node->InputAt(0)), |
| 91 g.UseRegister(node->InputAt(1))); | 91 g.UseRegister(node->InputAt(1))); |
| 92 } | 92 } |
| 93 | 93 |
| 94 void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode, |
| 95 Node* node) { |
| 96 ArmOperandGenerator g(selector); |
| 97 // Swap inputs to save an instruction in the CodeGenerator for High ops. |
| 98 if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight || |
| 99 opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight || |
| 100 opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight || |
| 101 opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight || |
| 102 opcode == kArmS8x16TransposeRight) { |
| 103 Node* in0 = node->InputAt(0); |
| 104 Node* in1 = node->InputAt(1); |
| 105 node->ReplaceInput(0, in1); |
| 106 node->ReplaceInput(1, in0); |
| 107 } |
| 108 // Use DefineSameAsFirst for binary ops that clobber their inputs, e.g. the |
| 109 // NEON vzip, vuzp, and vtrn instructions. |
| 110 selector->Emit(opcode, g.DefineSameAsFirst(node), |
| 111 g.UseRegister(node->InputAt(0)), |
| 112 g.UseRegister(node->InputAt(1))); |
| 113 } |
| 114 |
| 94 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 115 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| 95 ArmOperandGenerator g(selector); | 116 ArmOperandGenerator g(selector); |
| 96 // Use DefineSameAsFirst for ternary ops that clobber their first input, | 117 // Use DefineSameAsFirst for ternary ops that clobber their first input, |
| 97 // e.g. the NEON vbsl instruction. | 118 // e.g. the NEON vbsl instruction. |
| 98 selector->Emit( | 119 selector->Emit( |
| 99 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), | 120 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), |
| 100 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); | 121 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); |
| 101 } | 122 } |
| 102 | 123 |
| 103 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { | 124 void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) { |
| (...skipping 2222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2326 V(F32x4Abs, kArmF32x4Abs) \ | 2347 V(F32x4Abs, kArmF32x4Abs) \ |
| 2327 V(F32x4Neg, kArmF32x4Neg) \ | 2348 V(F32x4Neg, kArmF32x4Neg) \ |
| 2328 V(F32x4RecipApprox, kArmF32x4RecipApprox) \ | 2349 V(F32x4RecipApprox, kArmF32x4RecipApprox) \ |
| 2329 V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox) \ | 2350 V(F32x4RecipSqrtApprox, kArmF32x4RecipSqrtApprox) \ |
| 2330 V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4) \ | 2351 V(I32x4SConvertF32x4, kArmI32x4SConvertF32x4) \ |
| 2331 V(I32x4Neg, kArmI32x4Neg) \ | 2352 V(I32x4Neg, kArmI32x4Neg) \ |
| 2332 V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \ | 2353 V(I32x4UConvertF32x4, kArmI32x4UConvertF32x4) \ |
| 2333 V(I16x8Neg, kArmI16x8Neg) \ | 2354 V(I16x8Neg, kArmI16x8Neg) \ |
| 2334 V(I8x16Neg, kArmI8x16Neg) \ | 2355 V(I8x16Neg, kArmI8x16Neg) \ |
| 2335 V(S128Not, kArmS128Not) \ | 2356 V(S128Not, kArmS128Not) \ |
| 2357 V(S64x2Reverse, kArmS64x2Reverse) \ |
| 2358 V(S32x2Reverse, kArmS32x2Reverse) \ |
| 2359 V(S16x4Reverse, kArmS16x4Reverse) \ |
| 2360 V(S16x2Reverse, kArmS16x2Reverse) \ |
| 2361 V(S8x8Reverse, kArmS8x8Reverse) \ |
| 2362 V(S8x4Reverse, kArmS8x4Reverse) \ |
| 2363 V(S8x2Reverse, kArmS8x2Reverse) \ |
| 2336 V(S1x4Not, kArmS128Not) \ | 2364 V(S1x4Not, kArmS128Not) \ |
| 2337 V(S1x4AnyTrue, kArmS1x4AnyTrue) \ | 2365 V(S1x4AnyTrue, kArmS1x4AnyTrue) \ |
| 2338 V(S1x4AllTrue, kArmS1x4AllTrue) \ | 2366 V(S1x4AllTrue, kArmS1x4AllTrue) \ |
| 2339 V(S1x8Not, kArmS128Not) \ | 2367 V(S1x8Not, kArmS128Not) \ |
| 2340 V(S1x8AnyTrue, kArmS1x8AnyTrue) \ | 2368 V(S1x8AnyTrue, kArmS1x8AnyTrue) \ |
| 2341 V(S1x8AllTrue, kArmS1x8AllTrue) \ | 2369 V(S1x8AllTrue, kArmS1x8AllTrue) \ |
| 2342 V(S1x16Not, kArmS128Not) \ | 2370 V(S1x16Not, kArmS128Not) \ |
| 2343 V(S1x16AnyTrue, kArmS1x16AnyTrue) \ | 2371 V(S1x16AnyTrue, kArmS1x16AnyTrue) \ |
| 2344 V(S1x16AllTrue, kArmS1x16AllTrue) | 2372 V(S1x16AllTrue, kArmS1x16AllTrue) |
| 2345 | 2373 |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2419 V(S1x4And, kArmS128And) \ | 2447 V(S1x4And, kArmS128And) \ |
| 2420 V(S1x4Or, kArmS128Or) \ | 2448 V(S1x4Or, kArmS128Or) \ |
| 2421 V(S1x4Xor, kArmS128Xor) \ | 2449 V(S1x4Xor, kArmS128Xor) \ |
| 2422 V(S1x8And, kArmS128And) \ | 2450 V(S1x8And, kArmS128And) \ |
| 2423 V(S1x8Or, kArmS128Or) \ | 2451 V(S1x8Or, kArmS128Or) \ |
| 2424 V(S1x8Xor, kArmS128Xor) \ | 2452 V(S1x8Xor, kArmS128Xor) \ |
| 2425 V(S1x16And, kArmS128And) \ | 2453 V(S1x16And, kArmS128And) \ |
| 2426 V(S1x16Or, kArmS128Or) \ | 2454 V(S1x16Or, kArmS128Or) \ |
| 2427 V(S1x16Xor, kArmS128Xor) | 2455 V(S1x16Xor, kArmS128Xor) |
| 2428 | 2456 |
| 2457 #define SIMD_SHUFFLE_OP_LIST(V) \ |
| 2458 V(S32x4ZipLeft) \ |
| 2459 V(S32x4ZipRight) \ |
| 2460 V(S32x4UnzipLeft) \ |
| 2461 V(S32x4UnzipRight) \ |
| 2462 V(S32x4TransposeLeft) \ |
| 2463 V(S32x4TransposeRight) \ |
| 2464 V(S16x8ZipLeft) \ |
| 2465 V(S16x8ZipRight) \ |
| 2466 V(S16x8UnzipLeft) \ |
| 2467 V(S16x8UnzipRight) \ |
| 2468 V(S16x8TransposeLeft) \ |
| 2469 V(S16x8TransposeRight) \ |
| 2470 V(S8x16ZipLeft) \ |
| 2471 V(S8x16ZipRight) \ |
| 2472 V(S8x16UnzipLeft) \ |
| 2473 V(S8x16UnzipRight) \ |
| 2474 V(S8x16TransposeLeft) \ |
| 2475 V(S8x16TransposeRight) |
| 2476 |
| 2429 #define SIMD_VISIT_SPLAT(Type) \ | 2477 #define SIMD_VISIT_SPLAT(Type) \ |
| 2430 void InstructionSelector::Visit##Type##Splat(Node* node) { \ | 2478 void InstructionSelector::Visit##Type##Splat(Node* node) { \ |
| 2431 VisitRR(this, kArm##Type##Splat, node); \ | 2479 VisitRR(this, kArm##Type##Splat, node); \ |
| 2432 } | 2480 } |
| 2433 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) | 2481 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) |
| 2434 #undef SIMD_VISIT_SPLAT | 2482 #undef SIMD_VISIT_SPLAT |
| 2435 | 2483 |
| 2436 #define SIMD_VISIT_EXTRACT_LANE(Type) \ | 2484 #define SIMD_VISIT_EXTRACT_LANE(Type) \ |
| 2437 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ | 2485 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \ |
| 2438 VisitRRI(this, kArm##Type##ExtractLane, node); \ | 2486 VisitRRI(this, kArm##Type##ExtractLane, node); \ |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2476 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) | 2524 SIMD_BINOP_LIST(SIMD_VISIT_BINOP) |
| 2477 #undef SIMD_VISIT_BINOP | 2525 #undef SIMD_VISIT_BINOP |
| 2478 | 2526 |
| 2479 #define SIMD_VISIT_SELECT_OP(format) \ | 2527 #define SIMD_VISIT_SELECT_OP(format) \ |
| 2480 void InstructionSelector::VisitS##format##Select(Node* node) { \ | 2528 void InstructionSelector::VisitS##format##Select(Node* node) { \ |
| 2481 VisitRRRR(this, kArmS128Select, node); \ | 2529 VisitRRRR(this, kArmS128Select, node); \ |
| 2482 } | 2530 } |
| 2483 SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP) | 2531 SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP) |
| 2484 #undef SIMD_VISIT_SELECT_OP | 2532 #undef SIMD_VISIT_SELECT_OP |
| 2485 | 2533 |
| 2534 #define SIMD_VISIT_SHUFFLE_OP(Name) \ |
| 2535 void InstructionSelector::Visit##Name(Node* node) { \ |
| 2536 VisitRRRShuffle(this, kArm##Name, node); \ |
| 2537 } |
| 2538 SIMD_SHUFFLE_OP_LIST(SIMD_VISIT_SHUFFLE_OP) |
| 2539 #undef SIMD_VISIT_SHUFFLE_OP |
| 2540 |
| 2541 void InstructionSelector::VisitS8x16Concat(Node* node) { |
| 2542 ArmOperandGenerator g(this); |
| 2543 int32_t imm = OpParameter<int32_t>(node); |
| 2544 Emit(kArmS8x16Concat, g.DefineAsRegister(node), |
| 2545 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), |
| 2546 g.UseImmediate(imm)); |
| 2547 } |
| 2548 |
| 2486 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { | 2549 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { |
| 2487 UNREACHABLE(); | 2550 UNREACHABLE(); |
| 2488 } | 2551 } |
| 2489 | 2552 |
| 2490 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { | 2553 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { |
| 2491 UNREACHABLE(); | 2554 UNREACHABLE(); |
| 2492 } | 2555 } |
| 2493 | 2556 |
| 2494 // static | 2557 // static |
| 2495 MachineOperatorBuilder::Flags | 2558 MachineOperatorBuilder::Flags |
| (...skipping 28 matching lines...) Expand all Loading... |
| 2524 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); | 2587 Vector<MachineType> req_aligned = Vector<MachineType>::New(2); |
| 2525 req_aligned[0] = MachineType::Float32(); | 2588 req_aligned[0] = MachineType::Float32(); |
| 2526 req_aligned[1] = MachineType::Float64(); | 2589 req_aligned[1] = MachineType::Float64(); |
| 2527 return MachineOperatorBuilder::AlignmentRequirements:: | 2590 return MachineOperatorBuilder::AlignmentRequirements:: |
| 2528 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); | 2591 SomeUnalignedAccessUnsupported(req_aligned, req_aligned); |
| 2529 } | 2592 } |
| 2530 | 2593 |
| 2531 } // namespace compiler | 2594 } // namespace compiler |
| 2532 } // namespace internal | 2595 } // namespace internal |
| 2533 } // namespace v8 | 2596 } // namespace v8 |
| OLD | NEW |