OLD | NEW |
---|---|
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/generic-node-inl.h" | 5 #include "src/compiler/generic-node-inl.h" |
6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
8 | 8 |
9 namespace v8 { | 9 namespace v8 { |
10 namespace internal { | 10 namespace internal { |
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
357 | 357 |
358 void InstructionSelector::VisitWord32Ror(Node* node) { | 358 void InstructionSelector::VisitWord32Ror(Node* node) { |
359 VisitWord32Shift(this, node, kX64Ror32); | 359 VisitWord32Shift(this, node, kX64Ror32); |
360 } | 360 } |
361 | 361 |
362 | 362 |
363 void InstructionSelector::VisitWord64Ror(Node* node) { | 363 void InstructionSelector::VisitWord64Ror(Node* node) { |
364 VisitWord64Shift(this, node, kX64Ror); | 364 VisitWord64Shift(this, node, kX64Ror); |
365 } | 365 } |
366 | 366 |
367 namespace { | |
368 | |
369 AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node* scaled, | |
370 int scale_factor, Node* offset, | |
titzer
2014/11/07 16:17:38
Let's call it scale_exponent to avoid confusion.
danno
2014/11/07 16:43:31
Done.
| |
371 Node* constant, | |
372 InstructionOperand* inputs[], | |
373 size_t* input_count) { | |
374 AddressingMode mode = kMode_MRI; | |
375 if (offset != NULL) { | |
376 inputs[(*input_count)++] = g->UseRegister(offset); | |
377 if (scaled != NULL) { | |
378 DCHECK(scale_factor >= 0 && scale_factor <= 3); | |
379 inputs[(*input_count)++] = g->UseRegister(scaled); | |
380 if (constant != NULL) { | |
381 inputs[(*input_count)++] = g->UseImmediate(constant); | |
382 static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, | |
dcarney
2014/11/07 09:11:43
for all these you can just use
AdjustAddressingMo
danno
2014/11/07 16:43:31
That would mean copying the code here, and the imp
| |
383 kMode_MR4I, kMode_MR8I}; | |
384 mode = kMRnI_modes[scale_factor]; | |
385 } else { | |
386 static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2, | |
387 kMode_MR4, kMode_MR8}; | |
388 mode = kMRn_modes[scale_factor]; | |
389 } | |
390 } else { | |
391 DCHECK(constant != NULL); | |
392 inputs[(*input_count)++] = g->UseImmediate(constant); | |
393 mode = kMode_MRI; | |
394 } | |
395 } else { | |
396 DCHECK(scaled != NULL); | |
397 DCHECK(scale_factor >= 0 && scale_factor <= 3); | |
398 inputs[(*input_count)++] = g->UseRegister(scaled); | |
399 if (constant != NULL) { | |
400 inputs[(*input_count)++] = g->UseImmediate(constant); | |
401 static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I, | |
402 kMode_M4I, kMode_M8I}; | |
403 mode = kMnI_modes[scale_factor]; | |
404 } else { | |
405 static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2, kMode_M4, | |
406 kMode_M8}; | |
407 mode = kMn_modes[scale_factor]; | |
408 } | |
409 } | |
410 return mode; | |
411 } | |
412 | |
413 } // namespace | |
414 | |
367 | 415 |
368 void InstructionSelector::VisitInt32Add(Node* node) { | 416 void InstructionSelector::VisitInt32Add(Node* node) { |
417 // Try to match the Add to a leal pattern | |
418 ScaledWithOffsetMatcher m(node); | |
419 X64OperandGenerator g(this); | |
420 if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) { | |
421 InstructionOperand* inputs[4]; | |
422 size_t input_count = 0; | |
423 | |
424 AddressingMode mode = GenerateMemoryOperandInputs( | |
425 &g, m.scaled(), m.scale_factor(), m.offset(), m.constant(), inputs, | |
426 &input_count); | |
427 | |
428 DCHECK_NE(0, static_cast<int>(input_count)); | |
429 DCHECK_GE(arraysize(inputs), input_count); | |
430 | |
431 InstructionOperand* outputs[1]; | |
432 outputs[0] = g.DefineAsRegister(node); | |
433 | |
434 InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32; | |
435 | |
436 Emit(opcode, 1, outputs, input_count, inputs); | |
437 return; | |
438 } | |
439 | |
369 VisitBinop(this, node, kX64Add32); | 440 VisitBinop(this, node, kX64Add32); |
370 } | 441 } |
371 | 442 |
372 | 443 |
373 void InstructionSelector::VisitInt64Add(Node* node) { | 444 void InstructionSelector::VisitInt64Add(Node* node) { |
374 VisitBinop(this, node, kX64Add); | 445 VisitBinop(this, node, kX64Add); |
375 } | 446 } |
376 | 447 |
377 | 448 |
378 void InstructionSelector::VisitInt32Sub(Node* node) { | 449 void InstructionSelector::VisitInt32Sub(Node* node) { |
(...skipping 704 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1083 if (CpuFeatures::IsSupported(SSE4_1)) { | 1154 if (CpuFeatures::IsSupported(SSE4_1)) { |
1084 return MachineOperatorBuilder::kFloat64Floor | | 1155 return MachineOperatorBuilder::kFloat64Floor | |
1085 MachineOperatorBuilder::kFloat64Ceil | | 1156 MachineOperatorBuilder::kFloat64Ceil | |
1086 MachineOperatorBuilder::kFloat64RoundTruncate; | 1157 MachineOperatorBuilder::kFloat64RoundTruncate; |
1087 } | 1158 } |
1088 return MachineOperatorBuilder::kNoFlags; | 1159 return MachineOperatorBuilder::kNoFlags; |
1089 } | 1160 } |
1090 } // namespace compiler | 1161 } // namespace compiler |
1091 } // namespace internal | 1162 } // namespace internal |
1092 } // namespace v8 | 1163 } // namespace v8 |
OLD | NEW |