Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if V8_TARGET_ARCH_IA32 | 30 #if V8_TARGET_ARCH_IA32 |
| 31 | 31 |
| 32 #include "lithium-allocator-inl.h" | |
| 32 #include "ia32/lithium-codegen-ia32.h" | 33 #include "ia32/lithium-codegen-ia32.h" |
| 33 #include "ic.h" | 34 #include "ic.h" |
| 34 #include "code-stubs.h" | 35 #include "code-stubs.h" |
| 35 #include "deoptimizer.h" | 36 #include "deoptimizer.h" |
| 36 #include "stub-cache.h" | 37 #include "stub-cache.h" |
| 37 #include "codegen.h" | 38 #include "codegen.h" |
| 38 | 39 |
| 39 namespace v8 { | 40 namespace v8 { |
| 40 namespace internal { | 41 namespace internal { |
| 41 | 42 |
| (...skipping 286 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 328 // incoming context. | 329 // incoming context. |
| 329 __ CallRuntime(Runtime::kTraceEnter, 0); | 330 __ CallRuntime(Runtime::kTraceEnter, 0); |
| 330 } | 331 } |
| 331 return !is_aborted(); | 332 return !is_aborted(); |
| 332 } | 333 } |
| 333 | 334 |
| 334 | 335 |
| 335 bool LCodeGen::GenerateBody() { | 336 bool LCodeGen::GenerateBody() { |
| 336 ASSERT(is_generating()); | 337 ASSERT(is_generating()); |
| 337 bool emit_instructions = true; | 338 bool emit_instructions = true; |
| 339 | |
| 340 if (!CpuFeatures::IsSupported(SSE2)) PreRecordX87StackUsage(); | |
| 341 | |
| 338 for (current_instruction_ = 0; | 342 for (current_instruction_ = 0; |
| 339 !is_aborted() && current_instruction_ < instructions_->length(); | 343 !is_aborted() && current_instruction_ < instructions_->length(); |
| 340 current_instruction_++) { | 344 current_instruction_++) { |
| 341 LInstruction* instr = instructions_->at(current_instruction_); | 345 LInstruction* instr = instructions_->at(current_instruction_); |
| 342 | 346 |
| 343 // Don't emit code for basic blocks with a replacement. | 347 // Don't emit code for basic blocks with a replacement. |
| 344 if (instr->IsLabel()) { | 348 if (instr->IsLabel()) { |
| 345 emit_instructions = !LLabel::cast(instr)->HasReplacement(); | 349 emit_instructions = !LLabel::cast(instr)->HasReplacement(); |
| 346 } | 350 } |
| 347 if (!emit_instructions) continue; | 351 if (!emit_instructions) continue; |
| 348 | 352 |
| 349 if (FLAG_code_comments && instr->HasInterestingComment(this)) { | 353 if (FLAG_code_comments && instr->HasInterestingComment(this)) { |
| 350 Comment(";;; <@%d,#%d> %s", | 354 Comment(";;; <@%d,#%d> %s", |
| 351 current_instruction_, | 355 current_instruction_, |
| 352 instr->hydrogen_value()->id(), | 356 instr->hydrogen_value()->id(), |
| 353 instr->Mnemonic()); | 357 instr->Mnemonic()); |
| 354 } | 358 } |
| 355 | 359 |
| 356 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); | 360 if (!CpuFeatures::IsSupported(SSE2)) { |
| 361 FlushX87StackIfNecessary(instr); | |
| 362 | |
| 363 if (instr->IsControl()) { | |
| 364 // TODO(olivf) sorry but if you entered this dark corner and | |
| 365 // triggered the assert below, you have to teach the x87 stack how to | |
| 366 // merge phi node inputs. | |
| 367 ASSERT(x87_stack_.depth() <= 1); | |
| 368 x87_stack_.leaving(current_block_, | |
| 369 static_cast<LControlInstruction<0, 0>*>(instr)->SuccessorAt(0)); | |
| 370 } | |
| 371 } | |
| 357 | 372 |
| 358 RecordAndUpdatePosition(instr->position()); | 373 RecordAndUpdatePosition(instr->position()); |
| 359 | 374 |
| 360 instr->CompileToNative(this); | 375 instr->CompileToNative(this); |
| 361 | 376 |
| 362 if (!CpuFeatures::IsSupported(SSE2) && | 377 if (!CpuFeatures::IsSupported(SSE2)) { |
| 363 FLAG_debug_code && FLAG_enable_slow_asserts) { | 378 RestoreX87StackIfNecessary(instr); |
| 379 if (!instr->IsLabel() && !instr->IsGap() && | |
| 380 FLAG_debug_code && FLAG_enable_slow_asserts) { | |
| 364 __ VerifyX87StackDepth(x87_stack_.depth()); | 381 __ VerifyX87StackDepth(x87_stack_.depth()); |
| 382 } | |
| 365 } | 383 } |
| 366 } | 384 } |
| 367 EnsureSpaceForLazyDeopt(); | 385 EnsureSpaceForLazyDeopt(); |
| 368 return !is_aborted(); | 386 return !is_aborted(); |
| 369 } | 387 } |
| 370 | 388 |
| 371 | 389 |
| 372 bool LCodeGen::GenerateJumpTable() { | 390 bool LCodeGen::GenerateJumpTable() { |
| 373 Label needs_frame; | 391 Label needs_frame; |
| 374 if (jump_table_.length() > 0) { | 392 if (jump_table_.length() > 0) { |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 488 X87Register LCodeGen::ToX87Register(int index) const { | 506 X87Register LCodeGen::ToX87Register(int index) const { |
| 489 return X87Register::FromAllocationIndex(index); | 507 return X87Register::FromAllocationIndex(index); |
| 490 } | 508 } |
| 491 | 509 |
| 492 | 510 |
| 493 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 511 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
| 494 return XMMRegister::FromAllocationIndex(index); | 512 return XMMRegister::FromAllocationIndex(index); |
| 495 } | 513 } |
| 496 | 514 |
| 497 | 515 |
| 498 void LCodeGen::X87LoadForUsage(X87Register reg) { | |
| 499 ASSERT(x87_stack_.Contains(reg)); | |
| 500 x87_stack_.Fxch(reg); | |
| 501 x87_stack_.pop(); | |
| 502 } | |
| 503 | |
| 504 | |
| 505 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { | 516 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { |
| 506 ASSERT(Contains(reg) && stack_depth_ > other_slot); | 517 ASSERT(Contains(reg) && stack_depth_ > other_slot); |
| 507 int i = ArrayIndex(reg); | 518 int i = ArrayIndex(reg); |
| 508 int st = st2idx(i); | 519 int st = st2idx(i); |
| 509 if (st != other_slot) { | 520 if (st != other_slot) { |
| 510 int other_i = st2idx(other_slot); | 521 int other_i = st2idx(other_slot); |
| 511 X87Register other = stack_[other_i]; | 522 X87Register other = stack_[other_i]; |
| 512 stack_[other_i] = reg; | 523 stack_[other_i] = reg; |
| 513 stack_[i] = other; | 524 stack_[i] = other; |
| 514 if (st == 0) { | 525 if (st == 0) { |
| 515 __ fxch(other_slot); | 526 __ fxch(other_slot); |
| 516 } else if (other_slot == 0) { | 527 } else if (other_slot == 0) { |
| 517 __ fxch(st); | 528 __ fxch(st); |
| 518 } else { | 529 } else { |
| 519 __ fxch(st); | 530 __ fxch(st); |
| 520 __ fxch(other_slot); | 531 __ fxch(other_slot); |
| 521 __ fxch(st); | 532 __ fxch(st); |
| 522 } | 533 } |
| 523 } | 534 } |
| 524 } | 535 } |
| 525 | 536 |
| 526 | 537 |
| 538 int LCodeGen::X87Stack::PhysicalPos(X87Register reg) { | |
| 539 return st2idx(ArrayIndex(reg)); | |
| 540 } | |
| 541 | |
| 542 | |
| 543 void LCodeGen::X87Stack::leaving(HBasicBlock* block, HBasicBlock* next) { | |
| 544 for (int i = 0; i < X87Register::kNumAllocatableRegisters; i++) { | |
| 545 X87Register reg = X87Register::FromAllocationIndex(i); | |
| 546 // If by the end of block the register marked as defined here, but not | |
| 547 // yet on the stack, this means, the block dominates all definitions. | |
| 548 // To assert a consistent state of the stack for all successors, we | |
| 549 // push a dummy value here. | |
| 550 if (dominated_[i] == block && first_defined_[i] != block) { | |
| 551 if (FLAG_trace_x87) { | |
| 552 PrintF(stdout, | |
| 553 "%s is not defined in dominator block %d, pushing dummy\n", | |
| 554 X87Register::AllocationIndexToString(i), block->block_id()); | |
| 555 } | |
| 556 push(reg); | |
| 557 masm()->fldz(); | |
| 558 } else if (Contains(reg) && !dominated_[reg.code()]->Dominates(next)) { | |
| 559 if (FLAG_trace_x87) { | |
| 560 PrintF(stdout, | |
| 561 "freeing %s: def dominated by %d, but are leaving %d for %d\n", | |
| 562 X87Register::AllocationIndexToString(i), | |
| 563 dominated_[i]->block_id(), block->block_id(), next->block_id()); | |
| 564 } | |
| 565 Free(reg); | |
| 566 } | |
| 567 } | |
| 568 } | |
| 569 | |
| 570 | |
| 571 void LCodeGen::X87Stack::record_usage(X87Register reg, HBasicBlock* block) { | |
| 572 ASSERT(first_defined_[reg.code()] != NULL); | |
| 573 record_definition(reg, block); | |
| 574 } | |
| 575 | |
| 576 | |
| 577 void LCodeGen::X87Stack::record_definition(X87Register reg, | |
| 578 HBasicBlock* block) { | |
| 579 int i = reg.code(); | |
| 580 ASSERT(i < X87Register::kNumAllocatableRegisters); | |
| 581 if (first_defined_[i] == NULL) { | |
| 582 first_defined_[i] = dominated_[i] = block; | |
| 583 if (FLAG_trace_x87) { | |
| 584 PrintF(stdout, "%s first defined in %d\n", | |
| 585 X87Register::AllocationIndexToString(i), | |
| 586 block->block_id()); | |
| 587 } | |
| 588 } else { | |
| 589 HBasicBlock* current = dominated_[i]; | |
| 590 if (current == block) return; | |
| 591 while (current != NULL) { | |
| 592 if (current->Dominates(block)) break; | |
| 593 current = current->dominator(); | |
| 594 } | |
| 595 ASSERT(current != NULL); | |
| 596 if (FLAG_trace_x87) { | |
| 597 PrintF(stdout, "%s reused in %d, update dominator to %d\n", | |
| 598 X87Register::AllocationIndexToString(i), | |
| 599 block->block_id(), current->block_id()); | |
| 600 } | |
| 601 dominated_[i] = current; | |
| 602 } | |
| 603 } | |
| 604 | |
| 605 | |
| 527 int LCodeGen::X87Stack::st2idx(int pos) { | 606 int LCodeGen::X87Stack::st2idx(int pos) { |
| 528 return stack_depth_ - pos - 1; | 607 return stack_depth_ - pos - 1; |
| 529 } | 608 } |
| 530 | 609 |
| 531 | 610 |
| 532 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { | 611 int LCodeGen::X87Stack::ArrayIndex(X87Register reg) { |
| 533 for (int i = 0; i < stack_depth_; i++) { | 612 for (int i = 0; i < stack_depth_; i++) { |
| 534 if (stack_[i].is(reg)) return i; | 613 if (stack_[i].is(reg)) return i; |
| 535 } | 614 } |
| 536 UNREACHABLE(); | 615 UNREACHABLE(); |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 553 if (st > 0) { | 632 if (st > 0) { |
| 554 // keep track of how fstp(i) changes the order of elements | 633 // keep track of how fstp(i) changes the order of elements |
| 555 int tos_i = st2idx(0); | 634 int tos_i = st2idx(0); |
| 556 stack_[i] = stack_[tos_i]; | 635 stack_[i] = stack_[tos_i]; |
| 557 } | 636 } |
| 558 pop(); | 637 pop(); |
| 559 __ fstp(st); | 638 __ fstp(st); |
| 560 } | 639 } |
| 561 | 640 |
| 562 | 641 |
| 642 void LCodeGen::X87Mov(X87Register dst, X87Register src) { | |
| 643 if (x87_stack_.Contains(dst)) { | |
| 644 x87_stack_.Fxch(dst); | |
| 645 __ fstp(0); | |
| 646 } else { | |
| 647 x87_stack_.push(dst); | |
| 648 } | |
| 649 __ fld(x87_stack_.PhysicalPos(src)); | |
| 650 } | |
| 651 | |
| 652 | |
| 563 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { | 653 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) { |
| 564 if (x87_stack_.Contains(dst)) { | 654 if (x87_stack_.Contains(dst)) { |
| 565 x87_stack_.Fxch(dst); | 655 x87_stack_.Fxch(dst); |
| 566 __ fstp(0); | 656 __ fstp(0); |
| 567 } else { | 657 } else { |
| 568 x87_stack_.push(dst); | 658 x87_stack_.push(dst); |
| 569 } | 659 } |
| 570 X87Fld(src, opts); | 660 X87Fld(src, opts); |
| 571 } | 661 } |
| 572 | 662 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 611 } | 701 } |
| 612 // Mark this register as the next register to write to | 702 // Mark this register as the next register to write to |
| 613 stack_[stack_depth_] = reg; | 703 stack_[stack_depth_] = reg; |
| 614 } | 704 } |
| 615 | 705 |
| 616 | 706 |
| 617 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { | 707 void LCodeGen::X87Stack::CommitWrite(X87Register reg) { |
| 618 // Assert the reg is prepared to write, but not on the virtual stack yet | 708 // Assert the reg is prepared to write, but not on the virtual stack yet |
| 619 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && | 709 ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) && |
| 620 stack_depth_ < X87Register::kNumAllocatableRegisters); | 710 stack_depth_ < X87Register::kNumAllocatableRegisters); |
| 621 stack_depth_++; | 711 push(reg); |
| 622 } | 712 } |
| 623 | 713 |
| 624 | 714 |
| 625 void LCodeGen::X87PrepareBinaryOp( | 715 void LCodeGen::X87PrepareBinaryOp( |
| 626 X87Register left, X87Register right, X87Register result) { | 716 X87Register left, X87Register right, X87Register result) { |
| 627 // You need to use DefineSameAsFirst for x87 instructions | 717 // You need to use DefineSameAsFirst for x87 instructions |
| 628 ASSERT(result.is(left)); | 718 ASSERT(result.is(left)); |
| 629 x87_stack_.Fxch(right, 1); | 719 x87_stack_.Fxch(right, 1); |
| 630 x87_stack_.Fxch(left); | 720 x87_stack_.Fxch(left); |
| 631 } | 721 } |
| 632 | 722 |
| 633 | 723 |
| 634 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { | 724 void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) { |
| 635 if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) { | 725 if (stack_depth_ > 0) { |
| 636 bool double_inputs = instr->HasDoubleRegisterInput(); | 726 if (instr->IsReturn()) { |
| 637 | 727 while (stack_depth_ > 0) { |
| 638 // Flush stack from tos down, since FreeX87() will mess with tos | 728 __ fstp(0); |
| 639 for (int i = stack_depth_-1; i >= 0; i--) { | 729 stack_depth_--; |
| 640 X87Register reg = stack_[i]; | |
| 641 // Skip registers which contain the inputs for the next instruction | |
| 642 // when flushing the stack | |
| 643 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { | |
| 644 continue; | |
| 645 } | 730 } |
| 646 Free(reg); | 731 } else if (instr->ClobbersDoubleRegisters()) { |
| 647 if (i < stack_depth_-1) i++; | 732 bool double_inputs = instr->HasDoubleRegisterInput(); |
| 648 } | 733 int inp_arg = 0; |
| 649 } | 734 // Flush stack from tos down, to leave the inputs encountered first at |
| 650 if (instr->IsReturn()) { | 735 // the lower stack positions. |
| 651 while (stack_depth_ > 0) { | 736 for (int i = stack_depth_-1; i >= 0; i--) { |
| 652 __ fstp(0); | 737 X87Register reg = stack_[i]; |
| 653 stack_depth_--; | 738 // Skip registers which contain the inputs. By flushing from tos down, |
| 739 // we effectively leave them at the lower end of the physical stack. | |
| 740 if (double_inputs && instr->IsDoubleInput(reg, cgen)) { | |
| 741 int old_pos = ArrayIndex(reg); | |
| 742 stack_[old_pos] = stack_[inp_arg]; | |
| 743 stack_[inp_arg++] = reg; | |
| 744 continue; | |
|
Toon Verwaest
2013/08/22 13:33:41
As discussed, this seems broken. If we swap input
| |
| 745 } | |
| 746 __ fstp(st2idx(i)); | |
| 747 } | |
| 654 } | 748 } |
| 655 } | 749 } |
| 656 } | 750 } |
| 751 | |
| 752 | |
| 753 void LCodeGen::X87Stack::RestoreIfNecessary(LInstruction* instr, | |
| 754 LCodeGen* cgen) { | |
| 755 if (instr->ClobbersDoubleRegisters()) { | |
| 756 // Restoring the physical stack height is done after the instruction, which | |
| 757 // does not work if the instruction jumps somewhere else. | |
| 758 ASSERT(!instr->IsControl()); | |
| 759 int amount = instr->HasDoubleRegisterResult() | |
| 760 ? stack_depth_ - 1 : stack_depth_; | |
| 761 for (int i = 0; i < amount; i++) __ fld(0); | |
| 762 } | |
| 763 } | |
| 764 | |
| 765 | |
| 766 void LCodeGen::PreRecordX87StackUsage() { | |
| 767 HBasicBlock* current = NULL; | |
| 768 for (current_instruction_ = 0; | |
| 769 !is_aborted() && current_instruction_ < instructions_->length(); | |
| 770 current_instruction_++) { | |
| 771 LInstruction* instr = instructions_->at(current_instruction_); | |
| 772 if (instr->IsLabel()) current = LLabel::cast(instr)->block(); | |
| 773 if (instr->HasDoubleRegisterResult()) { | |
| 774 X87Register res = ToX87Register(instr->result()); | |
| 775 x87_stack_.record_definition(res, current); | |
| 776 } | |
| 777 for (UseIterator it(instr); !it.Done(); it.Advance()) { | |
| 778 LOperand* op = it.Current(); | |
| 779 if (op != NULL && op->IsDoubleRegister()) { | |
| 780 X87Register res = ToX87Register(op); | |
| 781 x87_stack_.record_usage(res, current); | |
| 782 } | |
| 783 } | |
| 784 } | |
| 785 } | |
| 657 | 786 |
| 658 | 787 |
| 659 void LCodeGen::EmitFlushX87ForDeopt() { | 788 void LCodeGen::EmitFlushX87ForDeopt() { |
| 660 // The deoptimizer does not support X87 Registers. But as long as we | 789 // The deoptimizer does not support X87 Registers. But as long as we |
| 661 // deopt from a stub its not a problem, since we will re-materialize the | 790 // deopt from a stub its not a problem, since we will re-materialize the |
| 662 // original stub inputs, which can't be double registers. | 791 // original stub inputs, which can't be double registers. |
| 663 ASSERT(info()->IsStub()); | 792 ASSERT(info()->IsStub()); |
| 664 if (FLAG_debug_code && FLAG_enable_slow_asserts) { | 793 if (FLAG_debug_code && FLAG_enable_slow_asserts) { |
| 665 __ pushfd(); | 794 __ pushfd(); |
| 666 __ VerifyX87StackDepth(x87_stack_.depth()); | 795 __ VerifyX87StackDepth(x87_stack_.depth()); |
| (...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1230 } | 1359 } |
| 1231 | 1360 |
| 1232 | 1361 |
| 1233 void LCodeGen::DoLabel(LLabel* label) { | 1362 void LCodeGen::DoLabel(LLabel* label) { |
| 1234 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", | 1363 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", |
| 1235 current_instruction_, | 1364 current_instruction_, |
| 1236 label->hydrogen_value()->id(), | 1365 label->hydrogen_value()->id(), |
| 1237 label->block_id(), | 1366 label->block_id(), |
| 1238 LabelType(label)); | 1367 LabelType(label)); |
| 1239 __ bind(label->label()); | 1368 __ bind(label->label()); |
| 1240 current_block_ = label->block_id(); | 1369 current_block_ = label->block(); |
| 1241 DoGap(label); | 1370 DoGap(label); |
| 1242 } | 1371 } |
| 1243 | 1372 |
| 1244 | 1373 |
| 1245 void LCodeGen::DoParallelMove(LParallelMove* move) { | 1374 void LCodeGen::DoParallelMove(LParallelMove* move) { |
| 1246 resolver_.Resolve(move); | 1375 resolver_.Resolve(move); |
| 1247 } | 1376 } |
| 1248 | 1377 |
| 1249 | 1378 |
| 1250 void LCodeGen::DoGap(LGap* gap) { | 1379 void LCodeGen::DoGap(LGap* gap) { |
| (...skipping 971 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2222 ASSERT(ToRegister(instr->right()).is(eax)); | 2351 ASSERT(ToRegister(instr->right()).is(eax)); |
| 2223 ASSERT(ToRegister(instr->result()).is(eax)); | 2352 ASSERT(ToRegister(instr->result()).is(eax)); |
| 2224 | 2353 |
| 2225 BinaryOpStub stub(instr->op(), NO_OVERWRITE); | 2354 BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
| 2226 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 2355 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2227 __ nop(); // Signals no inlined code. | 2356 __ nop(); // Signals no inlined code. |
| 2228 } | 2357 } |
| 2229 | 2358 |
| 2230 | 2359 |
| 2231 int LCodeGen::GetNextEmittedBlock() const { | 2360 int LCodeGen::GetNextEmittedBlock() const { |
| 2232 for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { | 2361 for (int i = current_block_->block_id() + 1; |
| 2362 i < graph()->blocks()->length(); ++i) { | |
| 2233 if (!chunk_->GetLabel(i)->HasReplacement()) return i; | 2363 if (!chunk_->GetLabel(i)->HasReplacement()) return i; |
| 2234 } | 2364 } |
| 2235 return -1; | 2365 return -1; |
| 2236 } | 2366 } |
| 2237 | 2367 |
| 2238 | 2368 |
| 2239 template<class InstrType> | 2369 template<class InstrType> |
| 2240 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { | 2370 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { |
| 2241 int left_block = instr->TrueDestination(chunk_); | 2371 int left_block = instr->TrueDestination(chunk_); |
| 2242 int right_block = instr->FalseDestination(chunk_); | 2372 int right_block = instr->FalseDestination(chunk_); |
| (...skipping 283 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2526 | 2656 |
| 2527 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | 2657 bool use_sse2 = CpuFeatures::IsSupported(SSE2); |
| 2528 if (use_sse2) { | 2658 if (use_sse2) { |
| 2529 CpuFeatureScope scope(masm(), SSE2); | 2659 CpuFeatureScope scope(masm(), SSE2); |
| 2530 XMMRegister input_reg = ToDoubleRegister(instr->object()); | 2660 XMMRegister input_reg = ToDoubleRegister(instr->object()); |
| 2531 __ ucomisd(input_reg, input_reg); | 2661 __ ucomisd(input_reg, input_reg); |
| 2532 EmitFalseBranch(instr, parity_odd); | 2662 EmitFalseBranch(instr, parity_odd); |
| 2533 } else { | 2663 } else { |
| 2534 // Put the value to the top of stack | 2664 // Put the value to the top of stack |
| 2535 X87Register src = ToX87Register(instr->object()); | 2665 X87Register src = ToX87Register(instr->object()); |
| 2536 X87LoadForUsage(src); | 2666 X87Fxch(src); |
| 2537 __ fld(0); | 2667 __ fld(0); |
| 2538 __ fld(0); | 2668 __ fld(0); |
| 2539 __ FCmp(); | 2669 __ FCmp(); |
| 2540 Label ok; | 2670 EmitFalseBranch(instr, parity_odd); |
| 2541 __ j(parity_even, &ok); | |
| 2542 __ fstp(0); | |
| 2543 EmitFalseBranch(instr, no_condition); | |
| 2544 __ bind(&ok); | |
| 2545 } | 2671 } |
| 2546 | 2672 |
| 2547 | 2673 |
| 2548 __ sub(esp, Immediate(kDoubleSize)); | 2674 __ sub(esp, Immediate(kDoubleSize)); |
| 2549 if (use_sse2) { | 2675 if (use_sse2) { |
| 2550 CpuFeatureScope scope(masm(), SSE2); | 2676 CpuFeatureScope scope(masm(), SSE2); |
| 2551 XMMRegister input_reg = ToDoubleRegister(instr->object()); | 2677 XMMRegister input_reg = ToDoubleRegister(instr->object()); |
| 2552 __ movdbl(MemOperand(esp, 0), input_reg); | 2678 __ movdbl(MemOperand(esp, 0), input_reg); |
| 2553 } else { | 2679 } else { |
| 2554 __ fstp_d(MemOperand(esp, 0)); | 2680 __ fst_d(MemOperand(esp, 0)); |
| 2555 } | 2681 } |
| 2556 | 2682 |
| 2557 __ add(esp, Immediate(kDoubleSize)); | 2683 __ add(esp, Immediate(kDoubleSize)); |
| 2558 int offset = sizeof(kHoleNanUpper32); | 2684 int offset = sizeof(kHoleNanUpper32); |
| 2559 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); | 2685 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32)); |
| 2560 EmitBranch(instr, equal); | 2686 EmitBranch(instr, equal); |
| 2561 } | 2687 } |
| 2562 | 2688 |
| 2563 | 2689 |
| 2564 Condition LCodeGen::EmitIsObject(Register input, | 2690 Condition LCodeGen::EmitIsObject(Register input, |
| (...skipping 2510 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5075 private: | 5201 private: |
| 5076 LNumberTagD* instr_; | 5202 LNumberTagD* instr_; |
| 5077 }; | 5203 }; |
| 5078 | 5204 |
| 5079 Register reg = ToRegister(instr->result()); | 5205 Register reg = ToRegister(instr->result()); |
| 5080 | 5206 |
| 5081 bool use_sse2 = CpuFeatures::IsSupported(SSE2); | 5207 bool use_sse2 = CpuFeatures::IsSupported(SSE2); |
| 5082 if (!use_sse2) { | 5208 if (!use_sse2) { |
| 5083 // Put the value to the top of stack | 5209 // Put the value to the top of stack |
| 5084 X87Register src = ToX87Register(instr->value()); | 5210 X87Register src = ToX87Register(instr->value()); |
| 5085 X87LoadForUsage(src); | 5211 X87Fxch(src); |
| 5086 } | 5212 } |
| 5087 | 5213 |
| 5088 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 5214 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 5089 if (FLAG_inline_new) { | 5215 if (FLAG_inline_new) { |
| 5090 Register tmp = ToRegister(instr->temp()); | 5216 Register tmp = ToRegister(instr->temp()); |
| 5091 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 5217 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
| 5092 } else { | 5218 } else { |
| 5093 __ jmp(deferred->entry()); | 5219 __ jmp(deferred->entry()); |
| 5094 } | 5220 } |
| 5095 __ bind(deferred->exit()); | 5221 __ bind(deferred->exit()); |
| 5096 if (use_sse2) { | 5222 if (use_sse2) { |
| 5097 CpuFeatureScope scope(masm(), SSE2); | 5223 CpuFeatureScope scope(masm(), SSE2); |
| 5098 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 5224 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 5099 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 5225 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| 5100 } else { | 5226 } else { |
| 5101 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | 5227 __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
| 5102 } | 5228 } |
| 5103 } | 5229 } |
| 5104 | 5230 |
| 5105 | 5231 |
| 5106 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 5232 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 5107 // TODO(3095996): Get rid of this. For now, we need to make the | 5233 // TODO(3095996): Get rid of this. For now, we need to make the |
| 5108 // result register contain a valid pointer because it is already | 5234 // result register contain a valid pointer because it is already |
| 5109 // contained in the register pointer map. | 5235 // contained in the register pointer map. |
| 5110 Register reg = ToRegister(instr->result()); | 5236 Register reg = ToRegister(instr->result()); |
| 5111 __ Set(reg, Immediate(0)); | 5237 __ Set(reg, Immediate(0)); |
| (...skipping 1438 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6550 FixedArray::kHeaderSize - kPointerSize)); | 6676 FixedArray::kHeaderSize - kPointerSize)); |
| 6551 __ bind(&done); | 6677 __ bind(&done); |
| 6552 } | 6678 } |
| 6553 | 6679 |
| 6554 | 6680 |
| 6555 #undef __ | 6681 #undef __ |
| 6556 | 6682 |
| 6557 } } // namespace v8::internal | 6683 } } // namespace v8::internal |
| 6558 | 6684 |
| 6559 #endif // V8_TARGET_ARCH_IA32 | 6685 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |