| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 744 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 755 } else { | 755 } else { |
| 756 Comment(";;; B%d", label->block_id()); | 756 Comment(";;; B%d", label->block_id()); |
| 757 } | 757 } |
| 758 __ bind(label->label()); | 758 __ bind(label->label()); |
| 759 current_block_ = label->block_id(); | 759 current_block_ = label->block_id(); |
| 760 LCodeGen::DoGap(label); | 760 LCodeGen::DoGap(label); |
| 761 } | 761 } |
| 762 | 762 |
| 763 | 763 |
| 764 void LCodeGen::DoParallelMove(LParallelMove* move) { | 764 void LCodeGen::DoParallelMove(LParallelMove* move) { |
| 765 // xmm0 must always be a scratch register. | 765 resolver_.Resolve(move); |
| 766 XMMRegister xmm_scratch = xmm0; | |
| 767 LUnallocated marker_operand(LUnallocated::NONE); | |
| 768 | |
| 769 Register cpu_scratch = esi; | |
| 770 bool destroys_cpu_scratch = false; | |
| 771 | |
| 772 const ZoneList<LMoveOperands>* moves = | |
| 773 resolver_.Resolve(move->move_operands(), &marker_operand); | |
| 774 for (int i = moves->length() - 1; i >= 0; --i) { | |
| 775 LMoveOperands move = moves->at(i); | |
| 776 LOperand* from = move.from(); | |
| 777 LOperand* to = move.to(); | |
| 778 ASSERT(!from->IsDoubleRegister() || | |
| 779 !ToDoubleRegister(from).is(xmm_scratch)); | |
| 780 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch)); | |
| 781 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch)); | |
| 782 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch)); | |
| 783 if (from->IsConstantOperand()) { | |
| 784 __ mov(ToOperand(to), ToImmediate(from)); | |
| 785 } else if (from == &marker_operand) { | |
| 786 if (to->IsRegister() || to->IsStackSlot()) { | |
| 787 __ mov(ToOperand(to), cpu_scratch); | |
| 788 ASSERT(destroys_cpu_scratch); | |
| 789 } else { | |
| 790 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot()); | |
| 791 __ movdbl(ToOperand(to), xmm_scratch); | |
| 792 } | |
| 793 } else if (to == &marker_operand) { | |
| 794 if (from->IsRegister() || from->IsStackSlot()) { | |
| 795 __ mov(cpu_scratch, ToOperand(from)); | |
| 796 destroys_cpu_scratch = true; | |
| 797 } else { | |
| 798 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot()); | |
| 799 __ movdbl(xmm_scratch, ToOperand(from)); | |
| 800 } | |
| 801 } else if (from->IsRegister()) { | |
| 802 __ mov(ToOperand(to), ToRegister(from)); | |
| 803 } else if (to->IsRegister()) { | |
| 804 __ mov(ToRegister(to), ToOperand(from)); | |
| 805 } else if (from->IsStackSlot()) { | |
| 806 ASSERT(to->IsStackSlot()); | |
| 807 __ push(eax); | |
| 808 __ mov(eax, ToOperand(from)); | |
| 809 __ mov(ToOperand(to), eax); | |
| 810 __ pop(eax); | |
| 811 } else if (from->IsDoubleRegister()) { | |
| 812 __ movdbl(ToOperand(to), ToDoubleRegister(from)); | |
| 813 } else if (to->IsDoubleRegister()) { | |
| 814 __ movdbl(ToDoubleRegister(to), ToOperand(from)); | |
| 815 } else { | |
| 816 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); | |
| 817 __ movdbl(xmm_scratch, ToOperand(from)); | |
| 818 __ movdbl(ToOperand(to), xmm_scratch); | |
| 819 } | |
| 820 } | |
| 821 | |
| 822 if (destroys_cpu_scratch) { | |
| 823 __ mov(cpu_scratch, Operand(ebp, -kPointerSize)); | |
| 824 } | |
| 825 } | 766 } |
| 826 | 767 |
| 827 | 768 |
| 828 void LCodeGen::DoGap(LGap* gap) { | 769 void LCodeGen::DoGap(LGap* gap) { |
| 829 for (int i = LGap::FIRST_INNER_POSITION; | 770 for (int i = LGap::FIRST_INNER_POSITION; |
| 830 i <= LGap::LAST_INNER_POSITION; | 771 i <= LGap::LAST_INNER_POSITION; |
| 831 i++) { | 772 i++) { |
| 832 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); | 773 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
| 833 LParallelMove* move = gap->GetParallelMove(inner_pos); | 774 LParallelMove* move = gap->GetParallelMove(inner_pos); |
| 834 if (move != NULL) DoParallelMove(move); | 775 if (move != NULL) DoParallelMove(move); |
| (...skipping 2808 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3643 ASSERT(osr_pc_offset_ == -1); | 3584 ASSERT(osr_pc_offset_ == -1); |
| 3644 osr_pc_offset_ = masm()->pc_offset(); | 3585 osr_pc_offset_ = masm()->pc_offset(); |
| 3645 } | 3586 } |
| 3646 | 3587 |
| 3647 | 3588 |
| 3648 #undef __ | 3589 #undef __ |
| 3649 | 3590 |
| 3650 } } // namespace v8::internal | 3591 } } // namespace v8::internal |
| 3651 | 3592 |
| 3652 #endif // V8_TARGET_ARCH_IA32 | 3593 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |