Index: src/compiler/simd-scalar-lowering.cc |
diff --git a/src/compiler/simd-scalar-lowering.cc b/src/compiler/simd-scalar-lowering.cc |
index 10c43f1aa38f1b077f7fe042478b51532b5577c0..8653820b3876f9becc53e7cc0c6160200dbfcde2 100644 |
--- a/src/compiler/simd-scalar-lowering.cc |
+++ b/src/compiler/simd-scalar-lowering.cc |
@@ -200,12 +200,22 @@ static int GetReturnCountAfterLowering( |
} |
void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) { |
+#if defined(V8_TARGET_BIG_ENDIAN) |
Clemens Hammacher
2017/04/28 09:45:47
It looks like this code would get a lot cleaner if
|
+ new_indices[3] = index; |
+ for (size_t i = 0; i < kMaxLanes - 1; ++i) { |
+ new_indices[i] = graph()->NewNode( |
+ machine()->Int32Add(), index, |
+ graph()->NewNode(common()->Int32Constant( |
+ static_cast<int>(kMaxLanes - i - 1) * kLaneWidth))); |
+ } |
+#else |
new_indices[0] = index; |
for (size_t i = 1; i < kMaxLanes; ++i) { |
new_indices[i] = graph()->NewNode(machine()->Int32Add(), index, |
graph()->NewNode(common()->Int32Constant( |
static_cast<int>(i) * kLaneWidth))); |
} |
+#endif |
} |
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node, |
@@ -229,6 +239,7 @@ void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node, |
rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2], |
control_input); |
rep_nodes[0]->ReplaceInput(2, rep_nodes[1]); |
+ rep_nodes[0]->ReplaceInput(1, indices[0]); |
} else { |
for (size_t i = 1; i < kMaxLanes; ++i) { |
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]); |
@@ -267,6 +278,7 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node, |
rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1], |
rep_nodes[2], control_input); |
rep_nodes[0]->ReplaceInput(3, rep_nodes[1]); |
+ rep_nodes[0]->ReplaceInput(1, indices[0]); |
} else { |
for (size_t i = 1; i < kMaxLanes; ++i) { |