Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/generic-node-inl.h" | 5 #include "src/compiler/generic-node-inl.h" |
| 6 #include "src/compiler/instruction-selector-impl.h" | 6 #include "src/compiler/instruction-selector-impl.h" |
| 7 #include "src/compiler/node-matchers.h" | 7 #include "src/compiler/node-matchers.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| (...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 357 | 357 |
| 358 void InstructionSelector::VisitWord32Ror(Node* node) { | 358 void InstructionSelector::VisitWord32Ror(Node* node) { |
| 359 VisitWord32Shift(this, node, kX64Ror32); | 359 VisitWord32Shift(this, node, kX64Ror32); |
| 360 } | 360 } |
| 361 | 361 |
| 362 | 362 |
| 363 void InstructionSelector::VisitWord64Ror(Node* node) { | 363 void InstructionSelector::VisitWord64Ror(Node* node) { |
| 364 VisitWord64Shift(this, node, kX64Ror); | 364 VisitWord64Shift(this, node, kX64Ror); |
| 365 } | 365 } |
| 366 | 366 |
| 367 namespace { | |
| 368 | |
| 369 struct MemoryOperandMatcher FINAL | |
| 370 : public ValueMatcher<int32_t, IrOpcode::kInt32Add> { | |
|
titzer
2014/11/05 13:16:09
What do you inherit from ValueMatcher here?
| |
| 371 explicit MemoryOperandMatcher(InstructionSelector* selector, Node* node) | |
| 372 : ValueMatcher<int32_t, IrOpcode::kInt32Add>(node), | |
| 373 is_memory_operand_(false), | |
| 374 scaled_(NULL), | |
| 375 unscaled_(NULL), | |
| 376 constant_(NULL), | |
| 377 scale_factor_(1) { | |
| 378 MatchMemoryOperand(selector); | |
|
titzer
2014/11/05 13:16:09
This method is only used in the constructor and I
| |
| 379 } | |
| 380 | |
| 381 bool IsMemoryOperand() const { return is_memory_operand_; } | |
| 382 | |
| 383 AddressingMode GenerateMemoryOperand(X64OperandGenerator* g, | |
| 384 InstructionOperand* inputs[], | |
| 385 size_t* input_count) { | |
| 386 AddressingMode mode = kMode_MRI; | |
| 387 if (unscaled_ != NULL) { | |
| 388 inputs[(*input_count)++] = g->UseRegister(unscaled_); | |
| 389 if (scaled_ != NULL) { | |
| 390 inputs[(*input_count)++] = g->UseRegister(scaled_); | |
| 391 if (constant_ != NULL) { | |
| 392 inputs[(*input_count)++] = g->UseImmediate(constant_); | |
| 393 if (scale_factor_ == 1) { | |
|
titzer
2014/11/05 13:16:09
This pattern with switching on the scale factor co
| |
| 394 mode = kMode_MR1I; | |
| 395 } else if (scale_factor_ == 2) { | |
| 396 mode = kMode_MR2I; | |
| 397 } else if (scale_factor_ == 4) { | |
| 398 mode = kMode_MR4I; | |
| 399 } else if (scale_factor_ == 8) { | |
| 400 mode = kMode_MR8I; | |
| 401 } else { | |
| 402 UNREACHABLE(); | |
| 403 } | |
| 404 } else { | |
| 405 if (scale_factor_ == 1) { | |
| 406 mode = kMode_MR1; | |
| 407 } else if (scale_factor_ == 2) { | |
| 408 mode = kMode_MR2; | |
| 409 } else if (scale_factor_ == 4) { | |
| 410 mode = kMode_MR4; | |
| 411 } else if (scale_factor_ == 8) { | |
| 412 mode = kMode_MR8; | |
| 413 } else { | |
| 414 UNREACHABLE(); | |
| 415 } | |
| 416 } | |
| 417 } else { | |
| 418 DCHECK(constant_ != NULL); | |
| 419 inputs[(*input_count)++] = g->UseImmediate(constant_); | |
| 420 mode = kMode_MRI; | |
| 421 } | |
| 422 } else { | |
| 423 DCHECK(scaled_ != NULL); | |
| 424 inputs[(*input_count)++] = g->UseRegister(scaled_); | |
| 425 if (constant_ != NULL) { | |
| 426 inputs[(*input_count)++] = g->UseImmediate(constant_); | |
| 427 if (scale_factor_ == 1) { | |
| 428 mode = kMode_M1I; | |
| 429 } else if (scale_factor_ == 2) { | |
| 430 mode = kMode_M2I; | |
| 431 } else if (scale_factor_ == 4) { | |
| 432 mode = kMode_M4I; | |
| 433 } else if (scale_factor_ == 8) { | |
| 434 mode = kMode_M8I; | |
| 435 } else { | |
| 436 UNREACHABLE(); | |
| 437 } | |
| 438 } else { | |
| 439 if (scale_factor_ == 1) { | |
| 440 mode = kMode_M1; | |
| 441 } else if (scale_factor_ == 2) { | |
| 442 mode = kMode_M2; | |
| 443 } else if (scale_factor_ == 4) { | |
| 444 mode = kMode_M4; | |
| 445 } else if (scale_factor_ == 8) { | |
| 446 mode = kMode_M8; | |
| 447 } else { | |
| 448 UNREACHABLE(); | |
| 449 } | |
| 450 } | |
| 451 } | |
| 452 return mode; | |
| 453 } | |
| 454 | |
| 455 private: | |
| 456 static bool TryCombine(InstructionSelector* selector, Node* use, Node* node, | |
|
titzer
2014/11/05 13:16:09
This is tough to follow, partly because of the poi
| |
| 457 Node** constant, Node** unscaled, Node** scaled, | |
| 458 int* scale_factor) { | |
| 459 X64OperandGenerator g(selector); | |
| 460 if (g.CanBeImmediate(node) && *constant == NULL) { | |
| 461 *constant = node; | |
|
titzer
2014/11/05 13:16:08
Why would the constant already be non-null and why
| |
| 462 } else { | |
| 463 if (selector->CanCover(use, node) && *scaled == NULL) { | |
| 464 if (node->opcode() == IrOpcode::kInt32Mul) { | |
| 465 Int32BinopMatcher m(node); | |
| 466 Int32Matcher leftm(m.left().node()); | |
| 467 Int32Matcher rightm(m.right().node()); | |
| 468 if (leftm.IsPowerOf2() && leftm.Value() <= 8) { | |
|
titzer
2014/11/05 13:16:09
This can't happen because Int32BinopMatcher puts c
| |
| 469 *scale_factor = leftm.Value(); | |
| 470 *scaled = m.right().node(); | |
| 471 return true; | |
| 472 } | |
| 473 if (rightm.IsPowerOf2() && rightm.Value() <= 8) { | |
| 474 *scale_factor = rightm.Value(); | |
| 475 *scaled = m.left().node(); | |
| 476 return true; | |
| 477 } | |
| 478 } | |
| 479 if (node->opcode() == IrOpcode::kWord32Shl) { | |
| 480 Int32BinopMatcher m(node); | |
| 481 Int32Matcher rightm(m.right().node()); | |
| 482 if (rightm.HasValue() && rightm.Value() <= 3 && rightm.Value() >= 0) { | |
| 483 *scale_factor = 1 << rightm.Value(); | |
| 484 *scaled = m.left().node(); | |
| 485 return true; | |
| 486 } | |
| 487 } | |
| 488 } | |
| 489 if (*unscaled == NULL) { | |
| 490 *unscaled = node; | |
| 491 } else if (*scaled == NULL) { | |
| 492 *scaled = node; | |
| 493 } else { | |
| 494 return false; | |
|
titzer
2014/11/05 13:16:09
So we can return false if scaled and unscaled are
| |
| 495 } | |
| 496 } | |
| 497 return true; | |
| 498 } | |
| 499 | |
| 500 void MatchMemoryOperand(InstructionSelector* selector) { | |
| 501 if (node()->opcode() != IrOpcode::kInt32Add) return; | |
| 502 | |
| 503 Node* node = this->node(); | |
| 504 Int32BinopMatcher m(node); | |
| 505 Node* left = m.left().node(); | |
| 506 Node* right = m.right().node(); | |
| 507 | |
| 508 is_memory_operand_ = true; | |
| 509 | |
| 510 bool handled_left = false; | |
| 511 if (left->opcode() == IrOpcode::kInt32Add && | |
| 512 selector->CanCover(node, left)) { | |
| 513 Int32BinopMatcher leftm(left); | |
| 514 Node* deeper_left = leftm.left().node(); | |
| 515 Node* deeper_right = leftm.right().node(); | |
| 516 CHECK(TryCombine(selector, left, deeper_left, &constant_, &unscaled_, | |
| 517 &scaled_, &scale_factor_)); | |
| 518 CHECK(TryCombine(selector, left, deeper_right, &constant_, &unscaled_, | |
| 519 &scaled_, &scale_factor_)); | |
| 520 // Combining the operands of the left side's add still need to keep at | |
| 521 // least one operand free for the right side, or the right operand needs | |
| 522 // to be constant. Otherwise, back out the match of the left operand | |
| 523 // altogether. | |
| 524 if (unscaled_ == NULL || scaled_ == NULL) { | |
| 525 handled_left = true; | |
| 526 } else { | |
| 527 X64OperandGenerator g(selector); | |
| 528 if (constant_ == NULL && g.CanBeImmediate(right)) { | |
| 529 constant_ = right; | |
| 530 return; | |
| 531 } | |
| 532 constant_ = NULL; | |
| 533 unscaled_ = NULL; | |
| 534 scaled_ = NULL; | |
| 535 scale_factor_ = 1; | |
| 536 } | |
| 537 } | |
| 538 // If the left operand wasn't an add, just add it to the operand as-is. | |
| 539 if (!handled_left) { | |
| 540 CHECK(TryCombine(selector, node, left, &constant_, &unscaled_, &scaled_, | |
| 541 &scale_factor_)); | |
| 542 } | |
| 543 | |
| 544 bool handled_right = false; | |
| 545 if (right->opcode() == IrOpcode::kInt32Add && | |
| 546 selector->CanCover(node, right)) { | |
|
titzer
2014/11/05 13:16:09
What happens if we handled left already here?
| |
| 547 Int32BinopMatcher rightm(right); | |
| 548 Node* deeper_left = rightm.left().node(); | |
| 549 Node* deeper_right = rightm.right().node(); | |
| 550 Node* constant_copy = constant_; | |
| 551 Node* unscaled_copy = unscaled_; | |
| 552 Node* scaled_copy = scaled_; | |
| 553 int scale_factor_copy = scale_factor_; | |
| 554 // If it's not possible to find a home for both the right add's operands, | |
| 555 // then abort the attempt to combine the right operand into the memory | |
| 556 // operand. | |
| 557 if (TryCombine(selector, right, deeper_left, &constant_copy, | |
| 558 &unscaled_copy, &scaled_copy, &scale_factor_copy) && | |
| 559 TryCombine(selector, right, deeper_right, &constant_copy, | |
| 560 &unscaled_copy, &scaled_copy, &scale_factor_copy)) { | |
| 561 handled_right = true; | |
| 562 constant_ = constant_copy; | |
| 563 unscaled_ = unscaled_copy; | |
| 564 scaled_ = scaled_copy; | |
| 565 scale_factor_ = scale_factor_copy; | |
| 566 } | |
| 567 } | |
| 568 | |
| 569 // If the right operand was either not an add or the add couldn't not be | |
| 570 // folded into the base node's memory operand, then just use the right | |
| 571 // operand as-is. | |
| 572 if (!handled_right) { | |
| 573 TryCombine(selector, node, right, &constant_, &unscaled_, &scaled_, | |
| 574 &scale_factor_); | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 bool is_memory_operand_; | |
| 579 Node* scaled_; | |
| 580 Node* unscaled_; | |
| 581 Node* constant_; | |
| 582 int scale_factor_; | |
| 583 }; | |
| 584 | |
| 585 } // namespace | |
| 586 | |
| 587 | |
| 588 static bool TryLeaInt32Add(InstructionSelector* selector, Node* node) { | |
| 589 MemoryOperandMatcher m(selector, node); | |
| 590 if (!m.IsMemoryOperand()) return false; | |
| 591 | |
| 592 X64OperandGenerator g(selector); | |
| 593 InstructionOperand* inputs[4]; | |
| 594 size_t input_count = 0; | |
| 595 | |
| 596 AddressingMode mode = m.GenerateMemoryOperand(&g, inputs, &input_count); | |
| 597 | |
| 598 DCHECK_NE(0, static_cast<int>(input_count)); | |
| 599 DCHECK_GE(arraysize(inputs), input_count); | |
| 600 | |
| 601 InstructionOperand* outputs[1]; | |
| 602 outputs[0] = g.DefineAsRegister(node); | |
| 603 | |
| 604 InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32; | |
| 605 | |
| 606 selector->Emit(opcode, 1, outputs, input_count, inputs); | |
| 607 | |
| 608 return true; | |
| 609 } | |
| 610 | |
| 367 | 611 |
| 368 void InstructionSelector::VisitInt32Add(Node* node) { | 612 void InstructionSelector::VisitInt32Add(Node* node) { |
| 613 if (TryLeaInt32Add(this, node)) return; | |
|
titzer
2014/11/05 13:16:09
I think this would be more readable if it was inli
| |
| 369 VisitBinop(this, node, kX64Add32); | 614 VisitBinop(this, node, kX64Add32); |
| 370 } | 615 } |
| 371 | 616 |
| 372 | 617 |
| 373 void InstructionSelector::VisitInt64Add(Node* node) { | 618 void InstructionSelector::VisitInt64Add(Node* node) { |
| 374 VisitBinop(this, node, kX64Add); | 619 VisitBinop(this, node, kX64Add); |
| 375 } | 620 } |
| 376 | 621 |
| 377 | 622 |
| 378 void InstructionSelector::VisitInt32Sub(Node* node) { | 623 void InstructionSelector::VisitInt32Sub(Node* node) { |
| (...skipping 704 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1083 if (CpuFeatures::IsSupported(SSE4_1)) { | 1328 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 1084 return MachineOperatorBuilder::kFloat64Floor | | 1329 return MachineOperatorBuilder::kFloat64Floor | |
| 1085 MachineOperatorBuilder::kFloat64Ceil | | 1330 MachineOperatorBuilder::kFloat64Ceil | |
| 1086 MachineOperatorBuilder::kFloat64RoundTruncate; | 1331 MachineOperatorBuilder::kFloat64RoundTruncate; |
| 1087 } | 1332 } |
| 1088 return MachineOperatorBuilder::kNoFlags; | 1333 return MachineOperatorBuilder::kNoFlags; |
| 1089 } | 1334 } |
| 1090 } // namespace compiler | 1335 } // namespace compiler |
| 1091 } // namespace internal | 1336 } // namespace internal |
| 1092 } // namespace v8 | 1337 } // namespace v8 |
| OLD | NEW |