Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(135)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 2183923003: [stubs,interpreter] Optimise SMI loading for 64-bit targets. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Optimizing SMI loads at code stub assembler level. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/arm64/instruction-scheduler-arm64.cc ('k') | src/interpreter/interpreter.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 154
155 155
156 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node, 156 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
157 ImmediateMode operand_mode) { 157 ImmediateMode operand_mode) {
158 Arm64OperandGenerator g(selector); 158 Arm64OperandGenerator g(selector);
159 selector->Emit(opcode, g.DefineAsRegister(node), 159 selector->Emit(opcode, g.DefineAsRegister(node),
160 g.UseRegister(node->InputAt(0)), 160 g.UseRegister(node->InputAt(0)),
161 g.UseOperand(node->InputAt(1), operand_mode)); 161 g.UseOperand(node->InputAt(1), operand_mode));
162 } 162 }
163 163
164
165 bool TryMatchAnyShift(InstructionSelector* selector, Node* node, 164 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
166 Node* input_node, InstructionCode* opcode, bool try_ror) { 165 Node* input_node, InstructionCode* opcode, bool try_ror) {
167 Arm64OperandGenerator g(selector); 166 Arm64OperandGenerator g(selector);
168 167
169 if (!selector->CanCover(node, input_node)) return false; 168 if (!selector->CanCover(node, input_node)) return false;
170 if (input_node->InputCount() != 2) return false; 169 if (input_node->InputCount() != 2) return false;
171 if (!g.IsIntegerConstant(input_node->InputAt(1))) return false; 170 if (!g.IsIntegerConstant(input_node->InputAt(1))) return false;
172 171
173 switch (input_node->opcode()) { 172 switch (input_node->opcode()) {
174 case IrOpcode::kWord32Shl: 173 case IrOpcode::kWord32Shl:
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after
451 uint64_t value_minus_one = m->right().Value() - 1; 450 uint64_t value_minus_one = m->right().Value() - 1;
452 if (base::bits::IsPowerOfTwo64(value_minus_one)) { 451 if (base::bits::IsPowerOfTwo64(value_minus_one)) {
453 return WhichPowerOf2_64(value_minus_one); 452 return WhichPowerOf2_64(value_minus_one);
454 } 453 }
455 } 454 }
456 return 0; 455 return 0;
457 } 456 }
458 457
459 } // namespace 458 } // namespace
460 459
461 460 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
462 void InstructionSelector::VisitLoad(Node* node) { 461 ImmediateMode immediate_mode, MachineRepresentation rep,
463 LoadRepresentation load_rep = LoadRepresentationOf(node->op()); 462 Node* output = nullptr) {
464 MachineRepresentation rep = load_rep.representation(); 463 Arm64OperandGenerator g(selector);
465 Arm64OperandGenerator g(this);
466 Node* base = node->InputAt(0); 464 Node* base = node->InputAt(0);
467 Node* index = node->InputAt(1); 465 Node* index = node->InputAt(1);
468 InstructionCode opcode = kArchNop;
469 ImmediateMode immediate_mode = kNoImmediate;
470 InstructionOperand inputs[3]; 466 InstructionOperand inputs[3];
471 size_t input_count = 0; 467 size_t input_count = 0;
472 InstructionOperand outputs[1]; 468 InstructionOperand outputs[1];
469
470 // If output is not nullptr, use that as the output register. This
471 // is used when we merge a conversion into the load.
472 outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
473 inputs[0] = g.UseRegister(base);
474
475 if (g.CanBeImmediate(index, immediate_mode)) {
476 input_count = 2;
477 inputs[1] = g.UseImmediate(index);
478 opcode |= AddressingModeField::encode(kMode_MRI);
479 } else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
480 &inputs[2])) {
481 input_count = 3;
482 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
483 } else {
484 input_count = 2;
485 inputs[1] = g.UseRegister(index);
486 opcode |= AddressingModeField::encode(kMode_MRR);
487 }
488
489 selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
490 }
491
492 void InstructionSelector::VisitLoad(Node* node) {
493 InstructionCode opcode = kArchNop;
494 ImmediateMode immediate_mode = kNoImmediate;
495 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
496 MachineRepresentation rep = load_rep.representation();
473 switch (rep) { 497 switch (rep) {
474 case MachineRepresentation::kFloat32: 498 case MachineRepresentation::kFloat32:
475 opcode = kArm64LdrS; 499 opcode = kArm64LdrS;
476 immediate_mode = kLoadStoreImm32; 500 immediate_mode = kLoadStoreImm32;
477 break; 501 break;
478 case MachineRepresentation::kFloat64: 502 case MachineRepresentation::kFloat64:
479 opcode = kArm64LdrD; 503 opcode = kArm64LdrD;
480 immediate_mode = kLoadStoreImm64; 504 immediate_mode = kLoadStoreImm64;
481 break; 505 break;
482 case MachineRepresentation::kBit: // Fall through. 506 case MachineRepresentation::kBit: // Fall through.
(...skipping 12 matching lines...) Expand all
495 case MachineRepresentation::kTagged: // Fall through. 519 case MachineRepresentation::kTagged: // Fall through.
496 case MachineRepresentation::kWord64: 520 case MachineRepresentation::kWord64:
497 opcode = kArm64Ldr; 521 opcode = kArm64Ldr;
498 immediate_mode = kLoadStoreImm64; 522 immediate_mode = kLoadStoreImm64;
499 break; 523 break;
500 case MachineRepresentation::kSimd128: // Fall through. 524 case MachineRepresentation::kSimd128: // Fall through.
501 case MachineRepresentation::kNone: 525 case MachineRepresentation::kNone:
502 UNREACHABLE(); 526 UNREACHABLE();
503 return; 527 return;
504 } 528 }
505 529 EmitLoad(this, node, opcode, immediate_mode, rep);
506 outputs[0] = g.DefineAsRegister(node);
507 inputs[0] = g.UseRegister(base);
508
509 if (g.CanBeImmediate(index, immediate_mode)) {
510 input_count = 2;
511 inputs[1] = g.UseImmediate(index);
512 opcode |= AddressingModeField::encode(kMode_MRI);
513 } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
514 &inputs[2])) {
515 input_count = 3;
516 opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
517 } else {
518 input_count = 2;
519 inputs[1] = g.UseRegister(index);
520 opcode |= AddressingModeField::encode(kMode_MRR);
521 }
522
523 Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
524 } 530 }
525 531
526 532
527 void InstructionSelector::VisitStore(Node* node) { 533 void InstructionSelector::VisitStore(Node* node) {
528 Arm64OperandGenerator g(this); 534 Arm64OperandGenerator g(this);
529 Node* base = node->InputAt(0); 535 Node* base = node->InputAt(0);
530 Node* index = node->InputAt(1); 536 Node* index = node->InputAt(1);
531 Node* value = node->InputAt(2); 537 Node* value = node->InputAt(2);
532 538
533 StoreRepresentation store_rep = StoreRepresentationOf(node->op()); 539 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
952 } 958 }
953 } 959 }
954 VisitRRO(this, kArm64Lsl32, node, kShift32Imm); 960 VisitRRO(this, kArm64Lsl32, node, kShift32Imm);
955 } 961 }
956 962
957 963
958 void InstructionSelector::VisitWord64Shl(Node* node) { 964 void InstructionSelector::VisitWord64Shl(Node* node) {
959 Arm64OperandGenerator g(this); 965 Arm64OperandGenerator g(this);
960 Int64BinopMatcher m(node); 966 Int64BinopMatcher m(node);
961 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) && 967 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
962 m.right().IsInRange(32, 63)) { 968 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
963 // There's no need to sign/zero-extend to 64-bit if we shift out the upper 969 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
964 // 32 bits anyway. 970 // 32 bits anyway.
965 Emit(kArm64Lsl, g.DefineAsRegister(node), 971 Emit(kArm64Lsl, g.DefineAsRegister(node),
966 g.UseRegister(m.left().node()->InputAt(0)), 972 g.UseRegister(m.left().node()->InputAt(0)),
967 g.UseImmediate(m.right().node())); 973 g.UseImmediate(m.right().node()));
968 return; 974 return;
969 } 975 }
970 VisitRRO(this, kArm64Lsl, node, kShift64Imm); 976 VisitRRO(this, kArm64Lsl, node, kShift64Imm);
971 } 977 }
972 978
(...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after
1556 Node* success_output = NodeProperties::FindProjection(node, 1); 1562 Node* success_output = NodeProperties::FindProjection(node, 1);
1557 if (success_output) { 1563 if (success_output) {
1558 outputs[output_count++] = g.DefineAsRegister(success_output); 1564 outputs[output_count++] = g.DefineAsRegister(success_output);
1559 } 1565 }
1560 1566
1561 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs); 1567 Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
1562 } 1568 }
1563 1569
1564 1570
1565 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { 1571 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1566 VisitRR(this, kArm64Sxtw, node); 1572 Node* value = node->InputAt(0);
1573 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1574 // Generate sign-extending load.
1575 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1576 MachineRepresentation rep = load_rep.representation();
1577 InstructionCode opcode = kArchNop;
1578 ImmediateMode immediate_mode = kNoImmediate;
1579 switch (rep) {
1580 case MachineRepresentation::kBit: // Fall through.
1581 case MachineRepresentation::kWord8:
1582 opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
1583 immediate_mode = kLoadStoreImm8;
1584 break;
1585 case MachineRepresentation::kWord16:
1586 opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
1587 immediate_mode = kLoadStoreImm16;
1588 break;
1589 case MachineRepresentation::kWord32:
1590 opcode = kArm64Ldrsw;
1591 immediate_mode = kLoadStoreImm32;
1592 break;
1593 default:
1594 UNREACHABLE();
1595 return;
1596 }
1597 EmitLoad(this, value, opcode, immediate_mode, rep, node);
1598 } else {
1599 VisitRR(this, kArm64Sxtw, node);
1600 }
1567 } 1601 }
1568 1602
1569 1603
1570 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { 1604 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1571 Arm64OperandGenerator g(this); 1605 Arm64OperandGenerator g(this);
1572 Node* value = node->InputAt(0); 1606 Node* value = node->InputAt(0);
1573 switch (value->opcode()) { 1607 switch (value->opcode()) {
1574 case IrOpcode::kWord32And: 1608 case IrOpcode::kWord32And:
1575 case IrOpcode::kWord32Or: 1609 case IrOpcode::kWord32Or:
1576 case IrOpcode::kWord32Xor: 1610 case IrOpcode::kWord32Xor:
(...skipping 1140 matching lines...) Expand 10 before | Expand all | Expand 10 after
2717 // static 2751 // static
2718 MachineOperatorBuilder::AlignmentRequirements 2752 MachineOperatorBuilder::AlignmentRequirements
2719 InstructionSelector::AlignmentRequirements() { 2753 InstructionSelector::AlignmentRequirements() {
2720 return MachineOperatorBuilder::AlignmentRequirements:: 2754 return MachineOperatorBuilder::AlignmentRequirements::
2721 FullUnalignedAccessSupport(); 2755 FullUnalignedAccessSupport();
2722 } 2756 }
2723 2757
2724 } // namespace compiler 2758 } // namespace compiler
2725 } // namespace internal 2759 } // namespace internal
2726 } // namespace v8 2760 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/instruction-scheduler-arm64.cc ('k') | src/interpreter/interpreter.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698