| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/instruction-selector-impl.h" | 5 #include "src/compiler/instruction-selector-impl.h" |
| 6 #include "src/compiler/node-matchers.h" | 6 #include "src/compiler/node-matchers.h" |
| 7 | 7 |
| 8 namespace v8 { | 8 namespace v8 { |
| 9 namespace internal { | 9 namespace internal { |
| 10 namespace compiler { | 10 namespace compiler { |
| (...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 423 | 423 |
| 424 | 424 |
| 425 void InstructionSelector::VisitInt32Add(Node* node) { | 425 void InstructionSelector::VisitInt32Add(Node* node) { |
| 426 // Try to match the Add to a leal pattern | 426 // Try to match the Add to a leal pattern |
| 427 ScaledWithOffset32Matcher m(node); | 427 ScaledWithOffset32Matcher m(node); |
| 428 X64OperandGenerator g(this); | 428 X64OperandGenerator g(this); |
| 429 // It's possible to use a "leal", but it may not be smaller/cheaper. In the | 429 // It's possible to use a "leal", but it may not be smaller/cheaper. In the |
| 430 // case that there are only two operands to the add and one of them isn't | 430 // case that there are only two operands to the add and one of them isn't |
| 431 // live, use a plain "addl". | 431 // live, use a plain "addl". |
| 432 if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) { | 432 if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) { |
| 433 if (m.offset() != NULL) { | |
| 434 if (m.constant() == NULL) { | |
| 435 if (m.scaled() != NULL && m.scale_exponent() == 0) { | |
| 436 if (!IsLive(m.offset())) { | |
| 437 Emit(kX64Add32, g.DefineSameAsFirst(node), | |
| 438 g.UseRegister(m.offset()), g.Use(m.scaled())); | |
| 439 return; | |
| 440 } else if (!IsLive(m.scaled())) { | |
| 441 Emit(kX64Add32, g.DefineSameAsFirst(node), | |
| 442 g.UseRegister(m.scaled()), g.Use(m.offset())); | |
| 443 return; | |
| 444 } | |
| 445 } | |
| 446 } else { | |
| 447 if (m.scale_exponent() == 0) { | |
| 448 if (m.scaled() == NULL || m.offset() == NULL) { | |
| 449 Node* non_constant = m.scaled() == NULL ? m.offset() : m.scaled(); | |
| 450 if (!IsLive(non_constant)) { | |
| 451 Emit(kX64Add32, g.DefineSameAsFirst(node), | |
| 452 g.UseRegister(non_constant), g.UseImmediate(m.constant())); | |
| 453 return; | |
| 454 } | |
| 455 } | |
| 456 } | |
| 457 } | |
| 458 } | |
| 459 | |
| 460 InstructionOperand* inputs[4]; | 433 InstructionOperand* inputs[4]; |
| 461 size_t input_count = 0; | 434 size_t input_count = 0; |
| 462 AddressingMode mode = GenerateMemoryOperandInputs( | 435 AddressingMode mode = GenerateMemoryOperandInputs( |
| 463 &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs, | 436 &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs, |
| 464 &input_count); | 437 &input_count); |
| 465 | 438 |
| 466 DCHECK_NE(0, static_cast<int>(input_count)); | 439 DCHECK_NE(0, static_cast<int>(input_count)); |
| 467 DCHECK_GE(arraysize(inputs), input_count); | 440 DCHECK_GE(arraysize(inputs), input_count); |
| 468 | 441 |
| 469 InstructionOperand* outputs[1]; | 442 InstructionOperand* outputs[1]; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 484 } | 457 } |
| 485 | 458 |
| 486 | 459 |
| 487 void InstructionSelector::VisitInt32Sub(Node* node) { | 460 void InstructionSelector::VisitInt32Sub(Node* node) { |
| 488 X64OperandGenerator g(this); | 461 X64OperandGenerator g(this); |
| 489 Int32BinopMatcher m(node); | 462 Int32BinopMatcher m(node); |
| 490 if (m.left().Is(0)) { | 463 if (m.left().Is(0)) { |
| 491 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); | 464 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); |
| 492 } else { | 465 } else { |
| 493 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { | 466 if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) { |
| 494 if (IsLive(m.left().node())) { | 467 // Turn subtractions of constant values into immediate "leal" instructions |
| 495 // Special handling for subtraction of constants where the non-constant | 468 // by negating the value. |
| 496 // input is used elsewhere. To eliminate the gap move before the sub to | 469 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), |
| 497 // copy the destination register, use a "leal" instead. | 470 g.DefineAsRegister(node), g.UseRegister(m.left().node()), |
| 498 Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), | 471 g.TempImmediate(-m.right().Value())); |
| 499 g.DefineAsRegister(node), g.UseRegister(m.left().node()), | 472 return; |
| 500 g.TempImmediate(-m.right().Value())); | |
| 501 return; | |
| 502 } | |
| 503 } | 473 } |
| 504 VisitBinop(this, node, kX64Sub32); | 474 VisitBinop(this, node, kX64Sub32); |
| 505 } | 475 } |
| 506 } | 476 } |
| 507 | 477 |
| 508 | 478 |
| 509 void InstructionSelector::VisitInt64Sub(Node* node) { | 479 void InstructionSelector::VisitInt64Sub(Node* node) { |
| 510 X64OperandGenerator g(this); | 480 X64OperandGenerator g(this); |
| 511 Int64BinopMatcher m(node); | 481 Int64BinopMatcher m(node); |
| 512 if (m.left().Is(0)) { | 482 if (m.left().Is(0)) { |
| (...skipping 686 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1199 MachineOperatorBuilder::kFloat64Ceil | | 1169 MachineOperatorBuilder::kFloat64Ceil | |
| 1200 MachineOperatorBuilder::kFloat64RoundTruncate | | 1170 MachineOperatorBuilder::kFloat64RoundTruncate | |
| 1201 MachineOperatorBuilder::kWord32ShiftIsSafe; | 1171 MachineOperatorBuilder::kWord32ShiftIsSafe; |
| 1202 } | 1172 } |
| 1203 return MachineOperatorBuilder::kNoFlags; | 1173 return MachineOperatorBuilder::kNoFlags; |
| 1204 } | 1174 } |
| 1205 | 1175 |
| 1206 } // namespace compiler | 1176 } // namespace compiler |
| 1207 } // namespace internal | 1177 } // namespace internal |
| 1208 } // namespace v8 | 1178 } // namespace v8 |
| OLD | NEW |