Chromium Code Reviews

Unified Diff: src/compiler/mips/instruction-selector-mips.cc

Issue 1779713009: Implement optional turbofan UnalignedLoad and UnalignedStore operators (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Unaligned access simulate using load/shift/or and store/shift/and Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View side-by-side diff with in-line comments
Index: src/compiler/mips/instruction-selector-mips.cc
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 2519b9331db35ee20cd7ad2e584d1b560e47685d..f81b7e9e928a542b1c0dec6bbad431a421aeaf40 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -307,6 +307,104 @@ void InstructionSelector::VisitWord32And(Node* node) {
void InstructionSelector::VisitWord32Or(Node* node) {
+ Int32BinopMatcher orm(node);
+
+ bool unalignedMatched = false;
+ bool loadSigned = false;
+
+ int i = 0;
+ uintptr_t prevAddress = 0;
+ Node* smallestLoad = NULL;
+
+ // Unligned access is simulated using combinations of
+ // load byte, shift and or operations, like this
+ // Result = 0
+ // Result = LoadByte (high) | Result << 8
+ // Result = LoadByte (high - 1) | Result << 8
+ // ...
+ // Result = LoadByte (low) | Result << 8
+ // We are trying to match the graph that corresponds
+ // to the above operations and reduce it to
+ // one simple Unaligned load
+ while (orm.left().IsWord32Shl() && orm.right().IsLoad()) {
+ Int32BinopMatcher shl(orm.left().node());
+ LoadMatcher<PtrMatcher> load(orm.right().node());
+
+ // Verifying load address
+ if (load.object().HasValue()) {
+ // Assumption is that the first byte will
+ // always be the most significant
+ if (prevAddress == 0) {
+ prevAddress = load.object().Value();
+ smallestLoad = load.node();
+ } else {
+ // Verifying that load addresses are consecutive
+ if (prevAddress + 1 != load.object().Value()) {
+ unalignedMatched = false;
+ break;
+ }
+ prevAddress = load.object().Value();
+ loadSigned = LoadRepresentationOf(load.node()->op()).IsSigned();
+ }
+ } else {
+ unalignedMatched = false;
+ break;
+ }
+
+ if (shl.left().IsWord32Or() && shl.right().Is(8)) {
+ orm = Int32BinopMatcher(shl.left().node());
+ } else if (shl.left().Is(0)) {
+ unalignedMatched = true;
+ break;
+ } else {
+ unalignedMatched = false;
+ break;
+ }
+
+ i++;
+ // Something is not right, the graph is too deep, break
+ if (i > 8) {
+ unalignedMatched = false;
+ break;
+ }
+ }
+
+ if (unalignedMatched) {
+ MipsOperandGenerator g(this);
+ ArchOpcode opcode = kArchNop;
+
+ if (i == 3) {
+ opcode = kMipsUlw;
+ } else if (i == 1) {
+ opcode = loadSigned ? kMipsUlh : kMipsUlhu;
+ } else {
+ unalignedMatched = false;
+ }
+
+ if (unalignedMatched) {
+ MipsOperandGenerator g(this);
+
+ DCHECK(smallestLoad != NULL);
+
+ Node* base = smallestLoad->InputAt(0);
+ Node* index = smallestLoad->InputAt(1);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+ return;
+ }
+ }
+
VisitBinop(this, node, kMipsOr);
}
@@ -972,6 +1070,100 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUlw;
+ break;
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUlwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUldc1;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+
+ // TODO(mips): I guess this could be done in a better way.
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUswc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUsdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kMipsSb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsUsh;
+ break;
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUsw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -1464,6 +1656,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
+
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1)) ||
+ IsMipsArchVariant(kLoongson)) {
+ flags |= MachineOperatorBuilder::kUnalignedLoad |
+ MachineOperatorBuilder::kUnalignedStore;
+ }
+
return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kInt32DivIsSafe |

Powered by Google App Engine