| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 648 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 659 GeneratePrologue(); | 659 GeneratePrologue(); |
| 660 CpuFeatures::Scope scope(SSE2); | 660 CpuFeatures::Scope scope(SSE2); |
| 661 | 661 |
| 662 Isolate* isolate = masm()->isolate(); | 662 Isolate* isolate = masm()->isolate(); |
| 663 | 663 |
| 664 // Save all general purpose registers before messing with them. | 664 // Save all general purpose registers before messing with them. |
| 665 const int kNumberOfRegisters = Register::kNumRegisters; | 665 const int kNumberOfRegisters = Register::kNumRegisters; |
| 666 | 666 |
| 667 const int kDoubleRegsSize = kDoubleSize * | 667 const int kDoubleRegsSize = kDoubleSize * |
| 668 XMMRegister::kNumAllocatableRegisters; | 668 XMMRegister::kNumAllocatableRegisters; |
| 669 __ sub(Operand(esp), Immediate(kDoubleRegsSize)); | 669 __ sub(esp, Immediate(kDoubleRegsSize)); |
| 670 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | 670 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 671 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | 671 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 672 int offset = i * kDoubleSize; | 672 int offset = i * kDoubleSize; |
| 673 __ movdbl(Operand(esp, offset), xmm_reg); | 673 __ movdbl(Operand(esp, offset), xmm_reg); |
| 674 } | 674 } |
| 675 | 675 |
| 676 __ pushad(); | 676 __ pushad(); |
| 677 | 677 |
| 678 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + | 678 const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + |
| 679 kDoubleRegsSize; | 679 kDoubleRegsSize; |
| 680 | 680 |
| 681 // Get the bailout id from the stack. | 681 // Get the bailout id from the stack. |
| 682 __ mov(ebx, Operand(esp, kSavedRegistersAreaSize)); | 682 __ mov(ebx, Operand(esp, kSavedRegistersAreaSize)); |
| 683 | 683 |
| 684 // Get the address of the location in the code object if possible | 684 // Get the address of the location in the code object if possible |
| 685 // and compute the fp-to-sp delta in register edx. | 685 // and compute the fp-to-sp delta in register edx. |
| 686 if (type() == EAGER) { | 686 if (type() == EAGER) { |
| 687 __ Set(ecx, Immediate(0)); | 687 __ Set(ecx, Immediate(0)); |
| 688 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); | 688 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 689 } else { | 689 } else { |
| 690 __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); | 690 __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); |
| 691 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); | 691 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); |
| 692 } | 692 } |
| 693 __ sub(edx, Operand(ebp)); | 693 __ sub(edx, ebp); |
| 694 __ neg(edx); | 694 __ neg(edx); |
| 695 | 695 |
| 696 // Allocate a new deoptimizer object. | 696 // Allocate a new deoptimizer object. |
| 697 __ PrepareCallCFunction(6, eax); | 697 __ PrepareCallCFunction(6, eax); |
| 698 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); | 698 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); |
| 699 __ mov(Operand(esp, 0 * kPointerSize), eax); // Function. | 699 __ mov(Operand(esp, 0 * kPointerSize), eax); // Function. |
| 700 __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type. | 700 __ mov(Operand(esp, 1 * kPointerSize), Immediate(type())); // Bailout type. |
| 701 __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id. | 701 __ mov(Operand(esp, 2 * kPointerSize), ebx); // Bailout id. |
| 702 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. | 702 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. |
| 703 __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. | 703 __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 722 int double_regs_offset = FrameDescription::double_registers_offset(); | 722 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 723 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | 723 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 724 int dst_offset = i * kDoubleSize + double_regs_offset; | 724 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 725 int src_offset = i * kDoubleSize; | 725 int src_offset = i * kDoubleSize; |
| 726 __ movdbl(xmm0, Operand(esp, src_offset)); | 726 __ movdbl(xmm0, Operand(esp, src_offset)); |
| 727 __ movdbl(Operand(ebx, dst_offset), xmm0); | 727 __ movdbl(Operand(ebx, dst_offset), xmm0); |
| 728 } | 728 } |
| 729 | 729 |
| 730 // Remove the bailout id and the double registers from the stack. | 730 // Remove the bailout id and the double registers from the stack. |
| 731 if (type() == EAGER) { | 731 if (type() == EAGER) { |
| 732 __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize)); | 732 __ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); |
| 733 } else { | 733 } else { |
| 734 __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize)); | 734 __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); |
| 735 } | 735 } |
| 736 | 736 |
| 737 // Compute a pointer to the unwinding limit in register ecx; that is | 737 // Compute a pointer to the unwinding limit in register ecx; that is |
| 738 // the first stack slot not part of the input frame. | 738 // the first stack slot not part of the input frame. |
| 739 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); | 739 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); |
| 740 __ add(ecx, Operand(esp)); | 740 __ add(ecx, esp); |
| 741 | 741 |
| 742 // Unwind the stack down to - but not including - the unwinding | 742 // Unwind the stack down to - but not including - the unwinding |
| 743 // limit and copy the contents of the activation frame to the input | 743 // limit and copy the contents of the activation frame to the input |
| 744 // frame description. | 744 // frame description. |
| 745 __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset())); | 745 __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset())); |
| 746 Label pop_loop; | 746 Label pop_loop; |
| 747 __ bind(&pop_loop); | 747 __ bind(&pop_loop); |
| 748 __ pop(Operand(edx, 0)); | 748 __ pop(Operand(edx, 0)); |
| 749 __ add(Operand(edx), Immediate(sizeof(uint32_t))); | 749 __ add(edx, Immediate(sizeof(uint32_t))); |
| 750 __ cmp(ecx, Operand(esp)); | 750 __ cmp(ecx, esp); |
| 751 __ j(not_equal, &pop_loop); | 751 __ j(not_equal, &pop_loop); |
| 752 | 752 |
| 753 // If frame was dynamically aligned, pop padding. | 753 // If frame was dynamically aligned, pop padding. |
| 754 Label sentinel, sentinel_done; | 754 Label sentinel, sentinel_done; |
| 755 __ pop(Operand(ecx)); | 755 __ pop(ecx); |
| 756 __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); | 756 __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); |
| 757 __ j(equal, &sentinel); | 757 __ j(equal, &sentinel); |
| 758 __ push(Operand(ecx)); | 758 __ push(ecx); |
| 759 __ jmp(&sentinel_done); | 759 __ jmp(&sentinel_done); |
| 760 __ bind(&sentinel); | 760 __ bind(&sentinel); |
| 761 __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), | 761 __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), |
| 762 Immediate(1)); | 762 Immediate(1)); |
| 763 __ bind(&sentinel_done); | 763 __ bind(&sentinel_done); |
| 764 // Compute the output frame in the deoptimizer. | 764 // Compute the output frame in the deoptimizer. |
| 765 __ push(eax); | 765 __ push(eax); |
| 766 __ PrepareCallCFunction(1, ebx); | 766 __ PrepareCallCFunction(1, ebx); |
| 767 __ mov(Operand(esp, 0 * kPointerSize), eax); | 767 __ mov(Operand(esp, 0 * kPointerSize), eax); |
| 768 { | 768 { |
| (...skipping 19 matching lines...) Expand all Loading... |
| 788 // Outer loop state: eax = current FrameDescription**, edx = one past the | 788 // Outer loop state: eax = current FrameDescription**, edx = one past the |
| 789 // last FrameDescription**. | 789 // last FrameDescription**. |
| 790 __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); | 790 __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); |
| 791 __ mov(eax, Operand(eax, Deoptimizer::output_offset())); | 791 __ mov(eax, Operand(eax, Deoptimizer::output_offset())); |
| 792 __ lea(edx, Operand(eax, edx, times_4, 0)); | 792 __ lea(edx, Operand(eax, edx, times_4, 0)); |
| 793 __ bind(&outer_push_loop); | 793 __ bind(&outer_push_loop); |
| 794 // Inner loop state: ebx = current FrameDescription*, ecx = loop index. | 794 // Inner loop state: ebx = current FrameDescription*, ecx = loop index. |
| 795 __ mov(ebx, Operand(eax, 0)); | 795 __ mov(ebx, Operand(eax, 0)); |
| 796 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); | 796 __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); |
| 797 __ bind(&inner_push_loop); | 797 __ bind(&inner_push_loop); |
| 798 __ sub(Operand(ecx), Immediate(sizeof(uint32_t))); | 798 __ sub(ecx, Immediate(sizeof(uint32_t))); |
| 799 __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); | 799 __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); |
| 800 __ test(ecx, Operand(ecx)); | 800 __ test(ecx, ecx); |
| 801 __ j(not_zero, &inner_push_loop); | 801 __ j(not_zero, &inner_push_loop); |
| 802 __ add(Operand(eax), Immediate(kPointerSize)); | 802 __ add(eax, Immediate(kPointerSize)); |
| 803 __ cmp(eax, Operand(edx)); | 803 __ cmp(eax, edx); |
| 804 __ j(below, &outer_push_loop); | 804 __ j(below, &outer_push_loop); |
| 805 | 805 |
| 806 // In case of OSR, we have to restore the XMM registers. | 806 // In case of OSR, we have to restore the XMM registers. |
| 807 if (type() == OSR) { | 807 if (type() == OSR) { |
| 808 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { | 808 for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
| 809 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); | 809 XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
| 810 int src_offset = i * kDoubleSize + double_regs_offset; | 810 int src_offset = i * kDoubleSize + double_regs_offset; |
| 811 __ movdbl(xmm_reg, Operand(ebx, src_offset)); | 811 __ movdbl(xmm_reg, Operand(ebx, src_offset)); |
| 812 } | 812 } |
| 813 } | 813 } |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 846 } | 846 } |
| 847 __ bind(&done); | 847 __ bind(&done); |
| 848 } | 848 } |
| 849 | 849 |
| 850 #undef __ | 850 #undef __ |
| 851 | 851 |
| 852 | 852 |
| 853 } } // namespace v8::internal | 853 } } // namespace v8::internal |
| 854 | 854 |
| 855 #endif // V8_TARGET_ARCH_IA32 | 855 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |