OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/base/adapters.h" | 5 #include "src/base/adapters.h" |
6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
8 #include "src/compiler/node-properties.h" | 8 #include "src/compiler/node-properties.h" |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 default: | 43 default: |
44 return false; | 44 return false; |
45 } | 45 } |
46 } | 46 } |
47 | 47 |
48 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base, | 48 AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base, |
49 Node* displacement_node, | 49 Node* displacement_node, |
50 InstructionOperand inputs[], | 50 InstructionOperand inputs[], |
51 size_t* input_count) { | 51 size_t* input_count) { |
52 AddressingMode mode = kMode_MRI; | 52 AddressingMode mode = kMode_MRI; |
53 int32_t displacement = (displacement_node == NULL) | 53 int32_t displacement = (displacement_node == nullptr) |
54 ? 0 | 54 ? 0 |
55 : OpParameter<int32_t>(displacement_node); | 55 : OpParameter<int32_t>(displacement_node); |
56 if (base != NULL) { | 56 if (base != nullptr) { |
57 if (base->opcode() == IrOpcode::kInt32Constant) { | 57 if (base->opcode() == IrOpcode::kInt32Constant) { |
58 displacement += OpParameter<int32_t>(base); | 58 displacement += OpParameter<int32_t>(base); |
59 base = NULL; | 59 base = nullptr; |
60 } | 60 } |
61 } | 61 } |
62 if (base != NULL) { | 62 if (base != nullptr) { |
63 inputs[(*input_count)++] = UseRegister(base); | 63 inputs[(*input_count)++] = UseRegister(base); |
64 if (index != NULL) { | 64 if (index != nullptr) { |
65 DCHECK(scale >= 0 && scale <= 3); | 65 DCHECK(scale >= 0 && scale <= 3); |
66 inputs[(*input_count)++] = UseRegister(index); | 66 inputs[(*input_count)++] = UseRegister(index); |
67 if (displacement != 0) { | 67 if (displacement != 0) { |
68 inputs[(*input_count)++] = TempImmediate(displacement); | 68 inputs[(*input_count)++] = TempImmediate(displacement); |
69 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, | 69 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, |
70 kMode_MR4I, kMode_MR8I}; | 70 kMode_MR4I, kMode_MR8I}; |
71 mode = kMRnI_modes[scale]; | 71 mode = kMRnI_modes[scale]; |
72 } else { | 72 } else { |
73 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, | 73 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, |
74 kMode_MR4, kMode_MR8}; | 74 kMode_MR4, kMode_MR8}; |
75 mode = kMRn_modes[scale]; | 75 mode = kMRn_modes[scale]; |
76 } | 76 } |
77 } else { | 77 } else { |
78 if (displacement == 0) { | 78 if (displacement == 0) { |
79 mode = kMode_MR; | 79 mode = kMode_MR; |
80 } else { | 80 } else { |
81 inputs[(*input_count)++] = TempImmediate(displacement); | 81 inputs[(*input_count)++] = TempImmediate(displacement); |
82 mode = kMode_MRI; | 82 mode = kMode_MRI; |
83 } | 83 } |
84 } | 84 } |
85 } else { | 85 } else { |
86 DCHECK(scale >= 0 && scale <= 3); | 86 DCHECK(scale >= 0 && scale <= 3); |
87 if (index != NULL) { | 87 if (index != nullptr) { |
88 inputs[(*input_count)++] = UseRegister(index); | 88 inputs[(*input_count)++] = UseRegister(index); |
89 if (displacement != 0) { | 89 if (displacement != 0) { |
90 inputs[(*input_count)++] = TempImmediate(displacement); | 90 inputs[(*input_count)++] = TempImmediate(displacement); |
91 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, | 91 static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, |
92 kMode_M4I, kMode_M8I}; | 92 kMode_M4I, kMode_M8I}; |
93 mode = kMnI_modes[scale]; | 93 mode = kMnI_modes[scale]; |
94 } else { | 94 } else { |
95 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2, | 95 static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2, |
96 kMode_M4, kMode_M8}; | 96 kMode_M4, kMode_M8}; |
97 mode = kMn_modes[scale]; | 97 mode = kMn_modes[scale]; |
98 } | 98 } |
99 } else { | 99 } else { |
100 inputs[(*input_count)++] = TempImmediate(displacement); | 100 inputs[(*input_count)++] = TempImmediate(displacement); |
101 return kMode_MI; | 101 return kMode_MI; |
102 } | 102 } |
103 } | 103 } |
104 return mode; | 104 return mode; |
105 } | 105 } |
106 | 106 |
107 AddressingMode GetEffectiveAddressMemoryOperand(Node* node, | 107 AddressingMode GetEffectiveAddressMemoryOperand(Node* node, |
108 InstructionOperand inputs[], | 108 InstructionOperand inputs[], |
109 size_t* input_count) { | 109 size_t* input_count) { |
110 BaseWithIndexAndDisplacement32Matcher m(node, true); | 110 BaseWithIndexAndDisplacement32Matcher m(node, true); |
111 DCHECK(m.matches()); | 111 DCHECK(m.matches()); |
112 if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) { | 112 if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) { |
113 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), | 113 return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(), |
114 m.displacement(), inputs, input_count); | 114 m.displacement(), inputs, input_count); |
115 } else { | 115 } else { |
116 inputs[(*input_count)++] = UseRegister(node->InputAt(0)); | 116 inputs[(*input_count)++] = UseRegister(node->InputAt(0)); |
117 inputs[(*input_count)++] = UseRegister(node->InputAt(1)); | 117 inputs[(*input_count)++] = UseRegister(node->InputAt(1)); |
118 return kMode_MR1; | 118 return kMode_MR1; |
119 } | 119 } |
120 } | 120 } |
121 | 121 |
122 bool CanBeBetterLeftOperand(Node* node) const { | 122 bool CanBeBetterLeftOperand(Node* node) const { |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
291 val = g.UseRegister(value); | 291 val = g.UseRegister(value); |
292 } | 292 } |
293 | 293 |
294 InstructionOperand inputs[4]; | 294 InstructionOperand inputs[4]; |
295 size_t input_count = 0; | 295 size_t input_count = 0; |
296 AddressingMode addressing_mode = | 296 AddressingMode addressing_mode = |
297 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); | 297 g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
298 InstructionCode code = | 298 InstructionCode code = |
299 opcode | AddressingModeField::encode(addressing_mode); | 299 opcode | AddressingModeField::encode(addressing_mode); |
300 inputs[input_count++] = val; | 300 inputs[input_count++] = val; |
301 Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs); | 301 Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, |
| 302 inputs); |
302 } | 303 } |
303 } | 304 } |
304 | 305 |
305 | 306 |
306 void InstructionSelector::VisitCheckedLoad(Node* node) { | 307 void InstructionSelector::VisitCheckedLoad(Node* node) { |
307 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); | 308 CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op()); |
308 IA32OperandGenerator g(this); | 309 IA32OperandGenerator g(this); |
309 Node* const buffer = node->InputAt(0); | 310 Node* const buffer = node->InputAt(0); |
310 Node* const offset = node->InputAt(1); | 311 Node* const offset = node->InputAt(1); |
311 Node* const length = node->InputAt(2); | 312 Node* const length = node->InputAt(2); |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
548 selector->Emit(opcode, 1, outputs, input_count, inputs); | 549 selector->Emit(opcode, 1, outputs, input_count, inputs); |
549 } | 550 } |
550 | 551 |
551 } // namespace | 552 } // namespace |
552 | 553 |
553 | 554 |
554 void InstructionSelector::VisitWord32Shl(Node* node) { | 555 void InstructionSelector::VisitWord32Shl(Node* node) { |
555 Int32ScaleMatcher m(node, true); | 556 Int32ScaleMatcher m(node, true); |
556 if (m.matches()) { | 557 if (m.matches()) { |
557 Node* index = node->InputAt(0); | 558 Node* index = node->InputAt(0); |
558 Node* base = m.power_of_two_plus_one() ? index : NULL; | 559 Node* base = m.power_of_two_plus_one() ? index : nullptr; |
559 EmitLea(this, node, index, m.scale(), base, NULL); | 560 EmitLea(this, node, index, m.scale(), base, nullptr); |
560 return; | 561 return; |
561 } | 562 } |
562 VisitShift(this, node, kIA32Shl); | 563 VisitShift(this, node, kIA32Shl); |
563 } | 564 } |
564 | 565 |
565 | 566 |
566 void InstructionSelector::VisitWord32Shr(Node* node) { | 567 void InstructionSelector::VisitWord32Shr(Node* node) { |
567 VisitShift(this, node, kIA32Shr); | 568 VisitShift(this, node, kIA32Shr); |
568 } | 569 } |
569 | 570 |
(...skipping 25 matching lines...) Expand all Loading... |
595 Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); | 596 Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); |
596 } | 597 } |
597 | 598 |
598 | 599 |
599 void InstructionSelector::VisitInt32Add(Node* node) { | 600 void InstructionSelector::VisitInt32Add(Node* node) { |
600 IA32OperandGenerator g(this); | 601 IA32OperandGenerator g(this); |
601 | 602 |
602 // Try to match the Add to a lea pattern | 603 // Try to match the Add to a lea pattern |
603 BaseWithIndexAndDisplacement32Matcher m(node); | 604 BaseWithIndexAndDisplacement32Matcher m(node); |
604 if (m.matches() && | 605 if (m.matches() && |
605 (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) { | 606 (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) { |
606 InstructionOperand inputs[4]; | 607 InstructionOperand inputs[4]; |
607 size_t input_count = 0; | 608 size_t input_count = 0; |
608 AddressingMode mode = g.GenerateMemoryOperandInputs( | 609 AddressingMode mode = g.GenerateMemoryOperandInputs( |
609 m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count); | 610 m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count); |
610 | 611 |
611 DCHECK_NE(0u, input_count); | 612 DCHECK_NE(0u, input_count); |
612 DCHECK_GE(arraysize(inputs), input_count); | 613 DCHECK_GE(arraysize(inputs), input_count); |
613 | 614 |
614 InstructionOperand outputs[1]; | 615 InstructionOperand outputs[1]; |
615 outputs[0] = g.DefineAsRegister(node); | 616 outputs[0] = g.DefineAsRegister(node); |
(...skipping 16 matching lines...) Expand all Loading... |
632 } else { | 633 } else { |
633 VisitBinop(this, node, kIA32Sub); | 634 VisitBinop(this, node, kIA32Sub); |
634 } | 635 } |
635 } | 636 } |
636 | 637 |
637 | 638 |
638 void InstructionSelector::VisitInt32Mul(Node* node) { | 639 void InstructionSelector::VisitInt32Mul(Node* node) { |
639 Int32ScaleMatcher m(node, true); | 640 Int32ScaleMatcher m(node, true); |
640 if (m.matches()) { | 641 if (m.matches()) { |
641 Node* index = node->InputAt(0); | 642 Node* index = node->InputAt(0); |
642 Node* base = m.power_of_two_plus_one() ? index : NULL; | 643 Node* base = m.power_of_two_plus_one() ? index : nullptr; |
643 EmitLea(this, node, index, m.scale(), base, NULL); | 644 EmitLea(this, node, index, m.scale(), base, nullptr); |
644 return; | 645 return; |
645 } | 646 } |
646 IA32OperandGenerator g(this); | 647 IA32OperandGenerator g(this); |
647 Node* left = node->InputAt(0); | 648 Node* left = node->InputAt(0); |
648 Node* right = node->InputAt(1); | 649 Node* right = node->InputAt(1); |
649 if (g.CanBeImmediate(right)) { | 650 if (g.CanBeImmediate(right)) { |
650 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left), | 651 Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left), |
651 g.UseImmediate(right)); | 652 g.UseImmediate(right)); |
652 } else { | 653 } else { |
653 if (g.CanBeBetterLeftOperand(right)) { | 654 if (g.CanBeBetterLeftOperand(right)) { |
(...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1097 return VisitFloat64Compare(selector, value, cont); | 1098 return VisitFloat64Compare(selector, value, cont); |
1098 case IrOpcode::kFloat64LessThanOrEqual: | 1099 case IrOpcode::kFloat64LessThanOrEqual: |
1099 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); | 1100 cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual); |
1100 return VisitFloat64Compare(selector, value, cont); | 1101 return VisitFloat64Compare(selector, value, cont); |
1101 case IrOpcode::kProjection: | 1102 case IrOpcode::kProjection: |
1102 // Check if this is the overflow output projection of an | 1103 // Check if this is the overflow output projection of an |
1103 // <Operation>WithOverflow node. | 1104 // <Operation>WithOverflow node. |
1104 if (ProjectionIndexOf(value->op()) == 1u) { | 1105 if (ProjectionIndexOf(value->op()) == 1u) { |
1105 // We cannot combine the <Operation>WithOverflow with this branch | 1106 // We cannot combine the <Operation>WithOverflow with this branch |
1106 // unless the 0th projection (the use of the actual value of the | 1107 // unless the 0th projection (the use of the actual value of the |
1107 // <Operation> is either NULL, which means there's no use of the | 1108 // <Operation> is either nullptr, which means there's no use of the |
1108 // actual value, or was already defined, which means it is scheduled | 1109 // actual value, or was already defined, which means it is scheduled |
1109 // *AFTER* this branch). | 1110 // *AFTER* this branch). |
1110 Node* const node = value->InputAt(0); | 1111 Node* const node = value->InputAt(0); |
1111 Node* const result = NodeProperties::FindProjection(node, 0); | 1112 Node* const result = NodeProperties::FindProjection(node, 0); |
1112 if (result == NULL || selector->IsDefined(result)) { | 1113 if (result == nullptr || selector->IsDefined(result)) { |
1113 switch (node->opcode()) { | 1114 switch (node->opcode()) { |
1114 case IrOpcode::kInt32AddWithOverflow: | 1115 case IrOpcode::kInt32AddWithOverflow: |
1115 cont->OverwriteAndNegateIfEqual(kOverflow); | 1116 cont->OverwriteAndNegateIfEqual(kOverflow); |
1116 return VisitBinop(selector, node, kIA32Add, cont); | 1117 return VisitBinop(selector, node, kIA32Add, cont); |
1117 case IrOpcode::kInt32SubWithOverflow: | 1118 case IrOpcode::kInt32SubWithOverflow: |
1118 cont->OverwriteAndNegateIfEqual(kOverflow); | 1119 cont->OverwriteAndNegateIfEqual(kOverflow); |
1119 return VisitBinop(selector, node, kIA32Sub, cont); | 1120 return VisitBinop(selector, node, kIA32Sub, cont); |
1120 default: | 1121 default: |
1121 break; | 1122 break; |
1122 } | 1123 } |
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1325 MachineOperatorBuilder::kFloat64RoundTruncate | | 1326 MachineOperatorBuilder::kFloat64RoundTruncate | |
1326 MachineOperatorBuilder::kFloat32RoundTiesEven | | 1327 MachineOperatorBuilder::kFloat32RoundTiesEven | |
1327 MachineOperatorBuilder::kFloat64RoundTiesEven; | 1328 MachineOperatorBuilder::kFloat64RoundTiesEven; |
1328 } | 1329 } |
1329 return flags; | 1330 return flags; |
1330 } | 1331 } |
1331 | 1332 |
1332 } // namespace compiler | 1333 } // namespace compiler |
1333 } // namespace internal | 1334 } // namespace internal |
1334 } // namespace v8 | 1335 } // namespace v8 |
OLD | NEW |