Chromium Code Reviews| Index: src/compiler/x64/instruction-selector-x64.cc |
| diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc |
| index c70944bc86d085509da1b3cdb44305f4043212df..1269e2e346e83859c407e2f036b0929666bb8088 100644 |
| --- a/src/compiler/x64/instruction-selector-x64.cc |
| +++ b/src/compiler/x64/instruction-selector-x64.cc |
| @@ -364,8 +364,253 @@ void InstructionSelector::VisitWord64Ror(Node* node) { |
| VisitWord64Shift(this, node, kX64Ror); |
| } |
| +namespace { |
| + |
| +struct MemoryOperandMatcher FINAL |
| + : public ValueMatcher<int32_t, IrOpcode::kInt32Add> { |
|
titzer
2014/11/05 13:16:09
What do you inherit from ValueMatcher here?
|
| + explicit MemoryOperandMatcher(InstructionSelector* selector, Node* node) |
| + : ValueMatcher<int32_t, IrOpcode::kInt32Add>(node), |
| + is_memory_operand_(false), |
| + scaled_(NULL), |
| + unscaled_(NULL), |
| + constant_(NULL), |
| + scale_factor_(1) { |
| + MatchMemoryOperand(selector); |
|
titzer
2014/11/05 13:16:09
This method is only used in the constructor and I
|
| + } |
| + |
| + bool IsMemoryOperand() const { return is_memory_operand_; } |
| + |
| + AddressingMode GenerateMemoryOperand(X64OperandGenerator* g, |
| + InstructionOperand* inputs[], |
| + size_t* input_count) { |
| + AddressingMode mode = kMode_MRI; |
| + if (unscaled_ != NULL) { |
| + inputs[(*input_count)++] = g->UseRegister(unscaled_); |
| + if (scaled_ != NULL) { |
| + inputs[(*input_count)++] = g->UseRegister(scaled_); |
| + if (constant_ != NULL) { |
| + inputs[(*input_count)++] = g->UseImmediate(constant_); |
| + if (scale_factor_ == 1) { |
|
titzer
2014/11/05 13:16:09
This pattern with switching on the scale factor co
|
| + mode = kMode_MR1I; |
| + } else if (scale_factor_ == 2) { |
| + mode = kMode_MR2I; |
| + } else if (scale_factor_ == 4) { |
| + mode = kMode_MR4I; |
| + } else if (scale_factor_ == 8) { |
| + mode = kMode_MR8I; |
| + } else { |
| + UNREACHABLE(); |
| + } |
| + } else { |
| + if (scale_factor_ == 1) { |
| + mode = kMode_MR1; |
| + } else if (scale_factor_ == 2) { |
| + mode = kMode_MR2; |
| + } else if (scale_factor_ == 4) { |
| + mode = kMode_MR4; |
| + } else if (scale_factor_ == 8) { |
| + mode = kMode_MR8; |
| + } else { |
| + UNREACHABLE(); |
| + } |
| + } |
| + } else { |
| + DCHECK(constant_ != NULL); |
| + inputs[(*input_count)++] = g->UseImmediate(constant_); |
| + mode = kMode_MRI; |
| + } |
| + } else { |
| + DCHECK(scaled_ != NULL); |
| + inputs[(*input_count)++] = g->UseRegister(scaled_); |
| + if (constant_ != NULL) { |
| + inputs[(*input_count)++] = g->UseImmediate(constant_); |
| + if (scale_factor_ == 1) { |
| + mode = kMode_M1I; |
| + } else if (scale_factor_ == 2) { |
| + mode = kMode_M2I; |
| + } else if (scale_factor_ == 4) { |
| + mode = kMode_M4I; |
| + } else if (scale_factor_ == 8) { |
| + mode = kMode_M8I; |
| + } else { |
| + UNREACHABLE(); |
| + } |
| + } else { |
| + if (scale_factor_ == 1) { |
| + mode = kMode_M1; |
| + } else if (scale_factor_ == 2) { |
| + mode = kMode_M2; |
| + } else if (scale_factor_ == 4) { |
| + mode = kMode_M4; |
| + } else if (scale_factor_ == 8) { |
| + mode = kMode_M8; |
| + } else { |
| + UNREACHABLE(); |
| + } |
| + } |
| + } |
| + return mode; |
| + } |
| + |
| + private: |
| + static bool TryCombine(InstructionSelector* selector, Node* use, Node* node, |
|
titzer
2014/11/05 13:16:09
This is tough to follow, partly because of the poi
|
| + Node** constant, Node** unscaled, Node** scaled, |
| + int* scale_factor) { |
| + X64OperandGenerator g(selector); |
| + if (g.CanBeImmediate(node) && *constant == NULL) { |
| + *constant = node; |
|
titzer
2014/11/05 13:16:08
Why would the constant already be non-null and why
|
| + } else { |
| + if (selector->CanCover(use, node) && *scaled == NULL) { |
| + if (node->opcode() == IrOpcode::kInt32Mul) { |
| + Int32BinopMatcher m(node); |
| + Int32Matcher leftm(m.left().node()); |
| + Int32Matcher rightm(m.right().node()); |
| + if (leftm.IsPowerOf2() && leftm.Value() <= 8) { |
|
titzer
2014/11/05 13:16:09
This can't happen because Int32BinopMatcher puts c
|
| + *scale_factor = leftm.Value(); |
| + *scaled = m.right().node(); |
| + return true; |
| + } |
| + if (rightm.IsPowerOf2() && rightm.Value() <= 8) { |
| + *scale_factor = rightm.Value(); |
| + *scaled = m.left().node(); |
| + return true; |
| + } |
| + } |
| + if (node->opcode() == IrOpcode::kWord32Shl) { |
| + Int32BinopMatcher m(node); |
| + Int32Matcher rightm(m.right().node()); |
| + if (rightm.HasValue() && rightm.Value() <= 3 && rightm.Value() >= 0) { |
| + *scale_factor = 1 << rightm.Value(); |
| + *scaled = m.left().node(); |
| + return true; |
| + } |
| + } |
| + } |
| + if (*unscaled == NULL) { |
| + *unscaled = node; |
| + } else if (*scaled == NULL) { |
| + *scaled = node; |
| + } else { |
| + return false; |
|
titzer
2014/11/05 13:16:09
So we can return false if scaled and unscaled are
|
| + } |
| + } |
| + return true; |
| + } |
| + |
| + void MatchMemoryOperand(InstructionSelector* selector) { |
| + if (node()->opcode() != IrOpcode::kInt32Add) return; |
| + |
| + Node* node = this->node(); |
| + Int32BinopMatcher m(node); |
| + Node* left = m.left().node(); |
| + Node* right = m.right().node(); |
| + |
| + is_memory_operand_ = true; |
| + |
| + bool handled_left = false; |
| + if (left->opcode() == IrOpcode::kInt32Add && |
| + selector->CanCover(node, left)) { |
| + Int32BinopMatcher leftm(left); |
| + Node* deeper_left = leftm.left().node(); |
| + Node* deeper_right = leftm.right().node(); |
| + CHECK(TryCombine(selector, left, deeper_left, &constant_, &unscaled_, |
| + &scaled_, &scale_factor_)); |
| + CHECK(TryCombine(selector, left, deeper_right, &constant_, &unscaled_, |
| + &scaled_, &scale_factor_)); |
| + // Combining the operands of the left side's add still need to keep at |
| + // least one operand free for the right side, or the right operand needs |
| + // to be constant. Otherwise, back out the match of the left operand |
| + // altogether. |
| + if (unscaled_ == NULL || scaled_ == NULL) { |
| + handled_left = true; |
| + } else { |
| + X64OperandGenerator g(selector); |
| + if (constant_ == NULL && g.CanBeImmediate(right)) { |
| + constant_ = right; |
| + return; |
| + } |
| + constant_ = NULL; |
| + unscaled_ = NULL; |
| + scaled_ = NULL; |
| + scale_factor_ = 1; |
| + } |
| + } |
| + // If the left operand wasn't an add, just add it to the operand as-is. |
| + if (!handled_left) { |
| + CHECK(TryCombine(selector, node, left, &constant_, &unscaled_, &scaled_, |
| + &scale_factor_)); |
| + } |
| + |
| + bool handled_right = false; |
| + if (right->opcode() == IrOpcode::kInt32Add && |
| + selector->CanCover(node, right)) { |
|
titzer
2014/11/05 13:16:09
What happens if we handled left already here?
|
| + Int32BinopMatcher rightm(right); |
| + Node* deeper_left = rightm.left().node(); |
| + Node* deeper_right = rightm.right().node(); |
| + Node* constant_copy = constant_; |
| + Node* unscaled_copy = unscaled_; |
| + Node* scaled_copy = scaled_; |
| + int scale_factor_copy = scale_factor_; |
| + // If it's not possible to find a home for both the right add's operands, |
| + // then abort the attempt to combine the right operand into the memory |
| + // operand. |
| + if (TryCombine(selector, right, deeper_left, &constant_copy, |
| + &unscaled_copy, &scaled_copy, &scale_factor_copy) && |
| + TryCombine(selector, right, deeper_right, &constant_copy, |
| + &unscaled_copy, &scaled_copy, &scale_factor_copy)) { |
| + handled_right = true; |
| + constant_ = constant_copy; |
| + unscaled_ = unscaled_copy; |
| + scaled_ = scaled_copy; |
| + scale_factor_ = scale_factor_copy; |
| + } |
| + } |
| + |
| + // If the right operand was either not an add or the add couldn't not be |
| + // folded into the base node's memory operand, then just use the right |
| + // operand as-is. |
| + if (!handled_right) { |
| + TryCombine(selector, node, right, &constant_, &unscaled_, &scaled_, |
| + &scale_factor_); |
| + } |
| + } |
| + |
| + bool is_memory_operand_; |
| + Node* scaled_; |
| + Node* unscaled_; |
| + Node* constant_; |
| + int scale_factor_; |
| +}; |
| + |
| +} // namespace |
| + |
| + |
| +static bool TryLeaInt32Add(InstructionSelector* selector, Node* node) { |
| + MemoryOperandMatcher m(selector, node); |
| + if (!m.IsMemoryOperand()) return false; |
| + |
| + X64OperandGenerator g(selector); |
| + InstructionOperand* inputs[4]; |
| + size_t input_count = 0; |
| + |
| + AddressingMode mode = m.GenerateMemoryOperand(&g, inputs, &input_count); |
| + |
| + DCHECK_NE(0, static_cast<int>(input_count)); |
| + DCHECK_GE(arraysize(inputs), input_count); |
| + |
| + InstructionOperand* outputs[1]; |
| + outputs[0] = g.DefineAsRegister(node); |
| + |
| + InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32; |
| + |
| + selector->Emit(opcode, 1, outputs, input_count, inputs); |
| + |
| + return true; |
| +} |
| + |
| void InstructionSelector::VisitInt32Add(Node* node) { |
| + if (TryLeaInt32Add(this, node)) return; |
|
titzer
2014/11/05 13:16:09
I think this would be more readable if it was inli
|
| VisitBinop(this, node, kX64Add32); |
| } |