| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| 11 | 11 |
| 12 // Adds X64-specific methods for generating operands. | 12 // Adds X64-specific methods for generating operands. |
| 13 class X64OperandGenerator FINAL : public OperandGenerator { | 13 class X64OperandGenerator FINAL : public OperandGenerator { |
| 14 public: | 14 public: |
| 15 explicit X64OperandGenerator(InstructionSelector* selector) | 15 explicit X64OperandGenerator(InstructionSelector* selector) |
| 16 : OperandGenerator(selector) {} | 16 : OperandGenerator(selector) {} |
| 17 | 17 |
| 18 InstructionOperand* TempRegister(Register reg) { | 18 InstructionOperand* TempRegister(Register reg) { |
| 19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, | 19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, |
| 20 Register::ToAllocationIndex(reg)); | 20 Register::ToAllocationIndex(reg)); |
| 21 } | 21 } |
| 22 | 22 |
| 23 InstructionOperand* UseByteRegister(Node* node) { | |
| 24 // TODO(dcarney): relax constraint. | |
| 25 return UseFixed(node, rdx); | |
| 26 } | |
| 27 | |
| 28 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); } | 23 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); } |
| 29 | 24 |
| 30 bool CanBeImmediate(Node* node) { | 25 bool CanBeImmediate(Node* node) { |
| 31 switch (node->opcode()) { | 26 switch (node->opcode()) { |
| 32 case IrOpcode::kInt32Constant: | 27 case IrOpcode::kInt32Constant: |
| 33 return true; | 28 return true; |
| 34 default: | 29 default: |
| 35 return false; | 30 return false; |
| 36 } | 31 } |
| 37 } | 32 } |
| (...skipping 14 matching lines...) Expand all Loading... |
| 52 return false; | 47 return false; |
| 53 } | 48 } |
| 54 } | 49 } |
| 55 | 50 |
| 56 bool CanBeBetterLeftOperand(Node* node) const { | 51 bool CanBeBetterLeftOperand(Node* node) const { |
| 57 return !selector()->IsLive(node); | 52 return !selector()->IsLive(node); |
| 58 } | 53 } |
| 59 }; | 54 }; |
| 60 | 55 |
| 61 | 56 |
| 57 // Matches nodes of form [x * N] for N in {1,2,4,8} |
| 58 class ScaleFactorMatcher : public NodeMatcher { |
| 59 public: |
| 60 explicit ScaleFactorMatcher(Node* node) |
| 61 : NodeMatcher(node), left_(NULL), power_(0) { |
| 62 Match(); |
| 63 } |
| 64 |
| 65 bool Matches() { return left_ != NULL; } |
| 66 int Power() { |
| 67 DCHECK(Matches()); |
| 68 return power_; |
| 69 } |
| 70 Node* Left() { |
| 71 DCHECK(Matches()); |
| 72 return left_; |
| 73 } |
| 74 |
| 75 private: |
| 76 void Match() { |
| 77 if (opcode() != IrOpcode::kInt32Mul) return; |
| 78 Int32BinopMatcher m(node()); |
| 79 if (!m.right().HasValue()) return; |
| 80 int32_t value = m.right().Value(); |
| 81 switch (value) { |
| 82 case 8: |
| 83 power_++; // Fall through. |
| 84 case 4: |
| 85 power_++; // Fall through. |
| 86 case 2: |
| 87 power_++; // Fall through. |
| 88 case 1: |
| 89 break; |
| 90 default: |
| 91 return; |
| 92 } |
| 93 left_ = m.left().node(); |
| 94 } |
| 95 |
| 96 Node* left_; |
| 97 int power_; |
| 98 }; |
| 99 |
| 100 |
| 101 // Matches nodes of form: |
| 102 // [x * N] |
| 103 // [x * N + K] |
| 104 // [x + K] |
| 105 // [x] -- fallback case |
| 106 // for N in {1,2,4,8} and K int32_t |
| 107 class IndexAndDisplacementMatcher : public NodeMatcher { |
| 108 public: |
| 109 explicit IndexAndDisplacementMatcher(Node* node) |
| 110 : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) { |
| 111 Match(); |
| 112 } |
| 113 |
| 114 Node* index_node() { return index_node_; } |
| 115 int displacement() { return displacement_; } |
| 116 int power() { return power_; } |
| 117 |
| 118 private: |
| 119 void Match() { |
| 120 if (opcode() == IrOpcode::kInt32Add) { |
| 121 // Assume reduction has put constant on the right. |
| 122 Int32BinopMatcher m(node()); |
| 123 if (m.right().HasValue()) { |
| 124 displacement_ = m.right().Value(); |
| 125 index_node_ = m.left().node(); |
| 126 } |
| 127 } |
| 128 // Test scale factor. |
| 129 ScaleFactorMatcher scale_matcher(index_node_); |
| 130 if (scale_matcher.Matches()) { |
| 131 index_node_ = scale_matcher.Left(); |
| 132 power_ = scale_matcher.Power(); |
| 133 } |
| 134 } |
| 135 |
| 136 Node* index_node_; |
| 137 int displacement_; |
| 138 int power_; |
| 139 }; |
| 140 |
| 141 |
| 142 class AddressingModeMatcher { |
| 143 public: |
| 144 AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index) |
| 145 : base_operand_(NULL), |
| 146 index_operand_(NULL), |
| 147 displacement_operand_(NULL), |
| 148 mode_(kMode_None) { |
| 149 Int32Matcher index_imm(index); |
| 150 if (index_imm.HasValue()) { |
| 151 int32_t value = index_imm.Value(); |
| 152 if (value == 0) { |
| 153 mode_ = kMode_MR; |
| 154 } else { |
| 155 mode_ = kMode_MRI; |
| 156 index_operand_ = g->UseImmediate(index); |
| 157 } |
| 158 base_operand_ = g->UseRegister(base); |
| 159 } else { |
| 160 // Compute base operand. |
| 161 Int64Matcher base_imm(base); |
| 162 if (!base_imm.HasValue() || base_imm.Value() != 0) { |
| 163 base_operand_ = g->UseRegister(base); |
| 164 } |
| 165 // Compute index and displacement. |
| 166 IndexAndDisplacementMatcher matcher(index); |
| 167 index_operand_ = g->UseRegister(matcher.index_node()); |
| 168 if (matcher.displacement() != 0) { |
| 169 displacement_operand_ = g->TempImmediate(matcher.displacement()); |
| 170 } |
| 171 // Compute mode with scale factor one. |
| 172 if (base_operand_ == NULL) { |
| 173 if (displacement_operand_ == NULL) { |
| 174 mode_ = kMode_M1; |
| 175 } else { |
| 176 mode_ = kMode_M1I; |
| 177 } |
| 178 } else { |
| 179 if (displacement_operand_ == NULL) { |
| 180 mode_ = kMode_MR1; |
| 181 } else { |
| 182 mode_ = kMode_MR1I; |
| 183 } |
| 184 } |
| 185 // Adjust mode to actual scale factor. |
| 186 mode_ = GetMode(mode_, matcher.power()); |
| 187 } |
| 188 DCHECK_NE(kMode_None, mode_); |
| 189 } |
| 190 |
| 191 AddressingMode GetMode(AddressingMode one, int power) { |
| 192 return static_cast<AddressingMode>(static_cast<int>(one) + power); |
| 193 } |
| 194 |
| 195 size_t SetInputs(InstructionOperand** inputs) { |
| 196 size_t input_count = 0; |
| 197 // Compute inputs_ and input_count. |
| 198 if (base_operand_ != NULL) { |
| 199 inputs[input_count++] = base_operand_; |
| 200 } |
| 201 if (index_operand_ != NULL) { |
| 202 inputs[input_count++] = index_operand_; |
| 203 } |
| 204 if (displacement_operand_ != NULL) { |
| 205 // Pure displacement mode not supported by x64. |
| 206 DCHECK_NE(input_count, 0); |
| 207 inputs[input_count++] = displacement_operand_; |
| 208 } |
| 209 DCHECK_NE(input_count, 0); |
| 210 return input_count; |
| 211 } |
| 212 |
| 213 static const int kMaxInputCount = 3; |
| 214 InstructionOperand* base_operand_; |
| 215 InstructionOperand* index_operand_; |
| 216 InstructionOperand* displacement_operand_; |
| 217 AddressingMode mode_; |
| 218 }; |
| 219 |
| 220 |
| 62 void InstructionSelector::VisitLoad(Node* node) { | 221 void InstructionSelector::VisitLoad(Node* node) { |
| 63 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); | 222 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); |
| 64 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); | 223 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); |
| 65 X64OperandGenerator g(this); | |
| 66 Node* base = node->InputAt(0); | 224 Node* base = node->InputAt(0); |
| 67 Node* index = node->InputAt(1); | 225 Node* index = node->InputAt(1); |
| 68 | 226 |
| 69 ArchOpcode opcode; | 227 ArchOpcode opcode; |
| 70 // TODO(titzer): signed/unsigned small loads | 228 // TODO(titzer): signed/unsigned small loads |
| 71 switch (rep) { | 229 switch (rep) { |
| 72 case kRepFloat32: | 230 case kRepFloat32: |
| 73 opcode = kX64Movss; | 231 opcode = kX64Movss; |
| 74 break; | 232 break; |
| 75 case kRepFloat64: | 233 case kRepFloat64: |
| (...skipping 10 matching lines...) Expand all Loading... |
| 86 opcode = kX64Movl; | 244 opcode = kX64Movl; |
| 87 break; | 245 break; |
| 88 case kRepTagged: // Fall through. | 246 case kRepTagged: // Fall through. |
| 89 case kRepWord64: | 247 case kRepWord64: |
| 90 opcode = kX64Movq; | 248 opcode = kX64Movq; |
| 91 break; | 249 break; |
| 92 default: | 250 default: |
| 93 UNREACHABLE(); | 251 UNREACHABLE(); |
| 94 return; | 252 return; |
| 95 } | 253 } |
| 96 if (g.CanBeImmediate(base)) { | 254 |
| 97 // load [#base + %index] | 255 X64OperandGenerator g(this); |
| 98 Emit(opcode | AddressingModeField::encode(kMode_MRI), | 256 AddressingModeMatcher matcher(&g, base, index); |
| 99 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base)); | 257 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_); |
| 100 } else if (g.CanBeImmediate(index)) { // load [%base + #index] | 258 InstructionOperand* outputs[] = {g.DefineAsRegister(node)}; |
| 101 Emit(opcode | AddressingModeField::encode(kMode_MRI), | 259 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount]; |
| 102 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); | 260 int input_count = matcher.SetInputs(inputs); |
| 103 } else { // load [%base + %index + K] | 261 Emit(code, 1, outputs, input_count, inputs); |
| 104 Emit(opcode | AddressingModeField::encode(kMode_MR1I), | |
| 105 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index)); | |
| 106 } | |
| 107 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] | |
| 108 } | 262 } |
| 109 | 263 |
| 110 | 264 |
| 111 void InstructionSelector::VisitStore(Node* node) { | 265 void InstructionSelector::VisitStore(Node* node) { |
| 112 X64OperandGenerator g(this); | 266 X64OperandGenerator g(this); |
| 113 Node* base = node->InputAt(0); | 267 Node* base = node->InputAt(0); |
| 114 Node* index = node->InputAt(1); | 268 Node* index = node->InputAt(1); |
| 115 Node* value = node->InputAt(2); | 269 Node* value = node->InputAt(2); |
| 116 | 270 |
| 117 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); | 271 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); |
| 118 MachineType rep = RepresentationOf(store_rep.machine_type()); | 272 MachineType rep = RepresentationOf(store_rep.machine_type()); |
| 119 if (store_rep.write_barrier_kind() == kFullWriteBarrier) { | 273 if (store_rep.write_barrier_kind() == kFullWriteBarrier) { |
| 120 DCHECK(rep == kRepTagged); | 274 DCHECK(rep == kRepTagged); |
| 121 // TODO(dcarney): refactor RecordWrite function to take temp registers | 275 // TODO(dcarney): refactor RecordWrite function to take temp registers |
| 122 // and pass them here instead of using fixed regs | 276 // and pass them here instead of using fixed regs |
| 123 // TODO(dcarney): handle immediate indices. | 277 // TODO(dcarney): handle immediate indices. |
| 124 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)}; | 278 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)}; |
| 125 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx), | 279 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx), |
| 126 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps), | 280 g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps), |
| 127 temps); | 281 temps); |
| 128 return; | 282 return; |
| 129 } | 283 } |
| 130 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind()); | 284 DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind()); |
| 131 InstructionOperand* val; | |
| 132 if (g.CanBeImmediate(value)) { | |
| 133 val = g.UseImmediate(value); | |
| 134 } else if (rep == kRepWord8 || rep == kRepBit) { | |
| 135 val = g.UseByteRegister(value); | |
| 136 } else { | |
| 137 val = g.UseRegister(value); | |
| 138 } | |
| 139 ArchOpcode opcode; | 285 ArchOpcode opcode; |
| 140 switch (rep) { | 286 switch (rep) { |
| 141 case kRepFloat32: | 287 case kRepFloat32: |
| 142 opcode = kX64Movss; | 288 opcode = kX64Movss; |
| 143 break; | 289 break; |
| 144 case kRepFloat64: | 290 case kRepFloat64: |
| 145 opcode = kX64Movsd; | 291 opcode = kX64Movsd; |
| 146 break; | 292 break; |
| 147 case kRepBit: // Fall through. | 293 case kRepBit: // Fall through. |
| 148 case kRepWord8: | 294 case kRepWord8: |
| 149 opcode = kX64Movb; | 295 opcode = kX64Movb; |
| 150 break; | 296 break; |
| 151 case kRepWord16: | 297 case kRepWord16: |
| 152 opcode = kX64Movw; | 298 opcode = kX64Movw; |
| 153 break; | 299 break; |
| 154 case kRepWord32: | 300 case kRepWord32: |
| 155 opcode = kX64Movl; | 301 opcode = kX64Movl; |
| 156 break; | 302 break; |
| 157 case kRepTagged: // Fall through. | 303 case kRepTagged: // Fall through. |
| 158 case kRepWord64: | 304 case kRepWord64: |
| 159 opcode = kX64Movq; | 305 opcode = kX64Movq; |
| 160 break; | 306 break; |
| 161 default: | 307 default: |
| 162 UNREACHABLE(); | 308 UNREACHABLE(); |
| 163 return; | 309 return; |
| 164 } | 310 } |
| 165 if (g.CanBeImmediate(base)) { | 311 |
| 166 // store [#base + %index], %|#value | 312 InstructionOperand* val; |
| 167 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, | 313 if (g.CanBeImmediate(value)) { |
| 168 g.UseRegister(index), g.UseImmediate(base), val); | 314 val = g.UseImmediate(value); |
| 169 } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value | 315 } else { |
| 170 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, | 316 val = g.UseRegister(value); |
| 171 g.UseRegister(base), g.UseImmediate(index), val); | |
| 172 } else { // store [%base + %index], %|#value | |
| 173 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL, | |
| 174 g.UseRegister(base), g.UseRegister(index), val); | |
| 175 } | 317 } |
| 176 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] | 318 |
| 319 AddressingModeMatcher matcher(&g, base, index); |
| 320 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_); |
| 321 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1]; |
| 322 int input_count = matcher.SetInputs(inputs); |
| 323 inputs[input_count++] = val; |
| 324 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs); |
| 177 } | 325 } |
| 178 | 326 |
| 179 | 327 |
| 180 // Shared routine for multiple binary operations. | 328 // Shared routine for multiple binary operations. |
| 181 static void VisitBinop(InstructionSelector* selector, Node* node, | 329 static void VisitBinop(InstructionSelector* selector, Node* node, |
| 182 InstructionCode opcode, FlagsContinuation* cont) { | 330 InstructionCode opcode, FlagsContinuation* cont) { |
| 183 X64OperandGenerator g(selector); | 331 X64OperandGenerator g(selector); |
| 184 Int32BinopMatcher m(node); | 332 Int32BinopMatcher m(node); |
| 185 Node* left = m.left().node(); | 333 Node* left = m.left().node(); |
| 186 Node* right = m.right().node(); | 334 Node* right = m.right().node(); |
| (...skipping 508 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 695 if (descriptor->NeedsFrameState()) { | 843 if (descriptor->NeedsFrameState()) { |
| 696 frame_state_descriptor = GetFrameStateDescriptor( | 844 frame_state_descriptor = GetFrameStateDescriptor( |
| 697 call->InputAt(static_cast<int>(descriptor->InputCount()))); | 845 call->InputAt(static_cast<int>(descriptor->InputCount()))); |
| 698 } | 846 } |
| 699 | 847 |
| 700 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); | 848 CallBuffer buffer(zone(), descriptor, frame_state_descriptor); |
| 701 | 849 |
| 702 // Compute InstructionOperands for inputs and outputs. | 850 // Compute InstructionOperands for inputs and outputs. |
| 703 InitializeCallBuffer(call, &buffer, true, true); | 851 InitializeCallBuffer(call, &buffer, true, true); |
| 704 | 852 |
| 705 // TODO(dcarney): stack alignment for c calls. | |
| 706 // TODO(dcarney): shadow space on window for c calls. | |
| 707 // Push any stack arguments. | 853 // Push any stack arguments. |
| 708 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin(); | 854 for (NodeVectorRIter input = buffer.pushed_nodes.rbegin(); |
| 709 input != buffer.pushed_nodes.rend(); input++) { | 855 input != buffer.pushed_nodes.rend(); input++) { |
| 710 // TODO(titzer): handle pushing double parameters. | 856 // TODO(titzer): handle pushing double parameters. |
| 711 Emit(kX64Push, NULL, | 857 Emit(kX64Push, NULL, |
| 712 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input)); | 858 g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input)); |
| 713 } | 859 } |
| 714 | 860 |
| 715 // Select the appropriate opcode based on the call type. | 861 // Select the appropriate opcode based on the call type. |
| 716 InstructionCode opcode; | 862 InstructionCode opcode; |
| (...skipping 19 matching lines...) Expand all Loading... |
| 736 call_instr->MarkAsCall(); | 882 call_instr->MarkAsCall(); |
| 737 if (deoptimization != NULL) { | 883 if (deoptimization != NULL) { |
| 738 DCHECK(continuation != NULL); | 884 DCHECK(continuation != NULL); |
| 739 call_instr->MarkAsControl(); | 885 call_instr->MarkAsControl(); |
| 740 } | 886 } |
| 741 } | 887 } |
| 742 | 888 |
| 743 } // namespace compiler | 889 } // namespace compiler |
| 744 } // namespace internal | 890 } // namespace internal |
| 745 } // namespace v8 | 891 } // namespace v8 |
| OLD | NEW |