| OLD | NEW |
| 1 | 1 |
| 2 // Copyright 2011 the V8 project authors. All rights reserved. | 2 // Copyright 2011 the V8 project authors. All rights reserved. |
| 3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
| 4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
| 5 // met: | 5 // met: |
| 6 // | 6 // |
| 7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
| 8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
| 9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
| 10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
| (...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 524 output_frame->SetPc(pc_value); | 524 output_frame->SetPc(pc_value); |
| 525 | 525 |
| 526 FullCodeGenerator::State state = | 526 FullCodeGenerator::State state = |
| 527 FullCodeGenerator::StateField::decode(pc_and_state); | 527 FullCodeGenerator::StateField::decode(pc_and_state); |
| 528 output_frame->SetState(Smi::FromInt(state)); | 528 output_frame->SetState(Smi::FromInt(state)); |
| 529 | 529 |
| 530 | 530 |
| 531 // Set the continuation for the topmost frame. | 531 // Set the continuation for the topmost frame. |
| 532 if (is_topmost && bailout_type_ != DEBUGGER) { | 532 if (is_topmost && bailout_type_ != DEBUGGER) { |
| 533 Builtins* builtins = isolate_->builtins(); | 533 Builtins* builtins = isolate_->builtins(); |
| 534 Code* continuation = (bailout_type_ == EAGER) | 534 Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized); |
| 535 ? builtins->builtin(Builtins::kNotifyDeoptimized) | 535 if (bailout_type_ == LAZY) { |
| 536 : builtins->builtin(Builtins::kNotifyLazyDeoptimized); | 536 continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized); |
| 537 } else if (bailout_type_ == SOFT) { |
| 538 continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized); |
| 539 } else { |
| 540 ASSERT(bailout_type_ == EAGER); |
| 541 } |
| 537 output_frame->SetContinuation( | 542 output_frame->SetContinuation( |
| 538 reinterpret_cast<uint32_t>(continuation->entry())); | 543 reinterpret_cast<uint32_t>(continuation->entry())); |
| 539 } | 544 } |
| 540 } | 545 } |
| 541 | 546 |
| 542 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { | 547 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
| 543 // Set the register values. The values are not important as there are no | 548 // Set the register values. The values are not important as there are no |
| 544 // callee saved registers in JavaScript frames, so all registers are | 549 // callee saved registers in JavaScript frames, so all registers are |
| 545 // spilled. Registers fp and sp are set to the correct values though. | 550 // spilled. Registers fp and sp are set to the correct values though. |
| 546 | 551 |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 620 | 625 |
| 621 const int kSavedRegistersAreaSize = | 626 const int kSavedRegistersAreaSize = |
| 622 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 627 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
| 623 | 628 |
| 624 // Get the bailout id from the stack. | 629 // Get the bailout id from the stack. |
| 625 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); | 630 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); |
| 626 | 631 |
| 627 // Get the address of the location in the code object if possible (a3) (return | 632 // Get the address of the location in the code object if possible (a3) (return |
| 628 // address for lazy deoptimization) and compute the fp-to-sp delta in | 633 // address for lazy deoptimization) and compute the fp-to-sp delta in |
| 629 // register t0. | 634 // register t0. |
| 630 if (type() == EAGER) { | 635 if (type() == EAGER || type() == SOFT) { |
| 631 __ mov(a3, zero_reg); | 636 __ mov(a3, zero_reg); |
| 632 // Correct one word for bailout id. | 637 // Correct one word for bailout id. |
| 633 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 638 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 634 } else if (type() == OSR) { | 639 } else if (type() == OSR) { |
| 635 __ mov(a3, ra); | 640 __ mov(a3, ra); |
| 636 // Correct one word for bailout id. | 641 // Correct one word for bailout id. |
| 637 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 642 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 638 } else { | 643 } else { |
| 639 __ mov(a3, ra); | 644 __ mov(a3, ra); |
| 640 // Correct two words for bailout id and return address. | 645 // Correct two words for bailout id and return address. |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 683 // double_registers_[DoubleRegister::kNumAllocatableRegisters] | 688 // double_registers_[DoubleRegister::kNumAllocatableRegisters] |
| 684 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { | 689 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { |
| 685 int dst_offset = i * kDoubleSize + double_regs_offset; | 690 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 686 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; | 691 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; |
| 687 __ ldc1(f0, MemOperand(sp, src_offset)); | 692 __ ldc1(f0, MemOperand(sp, src_offset)); |
| 688 __ sdc1(f0, MemOperand(a1, dst_offset)); | 693 __ sdc1(f0, MemOperand(a1, dst_offset)); |
| 689 } | 694 } |
| 690 | 695 |
| 691 // Remove the bailout id, eventually return address, and the saved registers | 696 // Remove the bailout id, eventually return address, and the saved registers |
| 692 // from the stack. | 697 // from the stack. |
| 693 if (type() == EAGER || type() == OSR) { | 698 if (type() == EAGER || type() == SOFT || type() == OSR) { |
| 694 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 699 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 695 } else { | 700 } else { |
| 696 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); | 701 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); |
| 697 } | 702 } |
| 698 | 703 |
| 699 // Compute a pointer to the unwinding limit in register a2; that is | 704 // Compute a pointer to the unwinding limit in register a2; that is |
| 700 // the first stack slot not part of the input frame. | 705 // the first stack slot not part of the input frame. |
| 701 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); | 706 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); |
| 702 __ Addu(a2, a2, sp); | 707 __ Addu(a2, a2, sp); |
| 703 | 708 |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 800 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 805 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 801 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | 806 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
| 802 | 807 |
| 803 // Create a sequence of deoptimization entries. Note that any | 808 // Create a sequence of deoptimization entries. Note that any |
| 804 // registers may be still live. | 809 // registers may be still live. |
| 805 Label table_start; | 810 Label table_start; |
| 806 __ bind(&table_start); | 811 __ bind(&table_start); |
| 807 for (int i = 0; i < count(); i++) { | 812 for (int i = 0; i < count(); i++) { |
| 808 Label start; | 813 Label start; |
| 809 __ bind(&start); | 814 __ bind(&start); |
| 810 if (type() != EAGER) { | 815 if (type() != EAGER && type() != SOFT) { |
| 811 // Emulate ia32 like call by pushing return address to stack. | 816 // Emulate ia32 like call by pushing return address to stack. |
| 812 __ addiu(sp, sp, -2 * kPointerSize); | 817 __ addiu(sp, sp, -2 * kPointerSize); |
| 813 __ sw(ra, MemOperand(sp, 1 * kPointerSize)); | 818 __ sw(ra, MemOperand(sp, 1 * kPointerSize)); |
| 814 } else { | 819 } else { |
| 815 __ addiu(sp, sp, -1 * kPointerSize); | 820 __ addiu(sp, sp, -1 * kPointerSize); |
| 816 } | 821 } |
| 817 // Jump over the remaining deopt entries (including this one). | 822 // Jump over the remaining deopt entries (including this one). |
| 818 // This code is always reached by calling Jump, which puts the target (label | 823 // This code is always reached by calling Jump, which puts the target (label |
| 819 // start) into t9. | 824 // start) into t9. |
| 820 const int remaining_entries = (count() - i) * table_entry_size_; | 825 const int remaining_entries = (count() - i) * table_entry_size_; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 833 } | 838 } |
| 834 | 839 |
| 835 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), | 840 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), |
| 836 count() * table_entry_size_); | 841 count() * table_entry_size_); |
| 837 } | 842 } |
| 838 | 843 |
| 839 #undef __ | 844 #undef __ |
| 840 | 845 |
| 841 | 846 |
| 842 } } // namespace v8::internal | 847 } } // namespace v8::internal |
| OLD | NEW |