OLD | NEW |
1 | 1 |
2 // Copyright 2011 the V8 project authors. All rights reserved. | 2 // Copyright 2011 the V8 project authors. All rights reserved. |
3 // Redistribution and use in source and binary forms, with or without | 3 // Redistribution and use in source and binary forms, with or without |
4 // modification, are permitted provided that the following conditions are | 4 // modification, are permitted provided that the following conditions are |
5 // met: | 5 // met: |
6 // | 6 // |
7 // * Redistributions of source code must retain the above copyright | 7 // * Redistributions of source code must retain the above copyright |
8 // notice, this list of conditions and the following disclaimer. | 8 // notice, this list of conditions and the following disclaimer. |
9 // * Redistributions in binary form must reproduce the above | 9 // * Redistributions in binary form must reproduce the above |
10 // copyright notice, this list of conditions and the following | 10 // copyright notice, this list of conditions and the following |
(...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
450 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); | 450 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); |
451 } | 451 } |
452 } | 452 } |
453 | 453 |
454 const int kSavedRegistersAreaSize = | 454 const int kSavedRegistersAreaSize = |
455 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 455 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
456 | 456 |
457 // Get the bailout id from the stack. | 457 // Get the bailout id from the stack. |
458 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); | 458 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); |
459 | 459 |
460 // Get the address of the location in the code object if possible (a3) (return | 460 // Get the address of the location in the code object (a3) (return |
461 // address for lazy deoptimization) and compute the fp-to-sp delta in | 461 // address for lazy deoptimization) and compute the fp-to-sp delta in |
462 // register t0. | 462 // register t0. |
463 if (type() == EAGER || type() == SOFT) { | 463 __ mov(a3, ra); |
464 __ mov(a3, zero_reg); | 464 // Correct one word for bailout id. |
465 // Correct one word for bailout id. | 465 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
466 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | |
467 } else if (type() == OSR) { | |
468 __ mov(a3, ra); | |
469 // Correct one word for bailout id. | |
470 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | |
471 } else { | |
472 __ mov(a3, ra); | |
473 // Correct two words for bailout id and return address. | |
474 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); | |
475 } | |
476 | 466 |
477 __ Subu(t0, fp, t0); | 467 __ Subu(t0, fp, t0); |
478 | 468 |
479 // Allocate a new deoptimizer object. | 469 // Allocate a new deoptimizer object. |
480 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. | 470 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. |
481 __ PrepareCallCFunction(6, t1); | 471 __ PrepareCallCFunction(6, t1); |
482 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 472 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
483 __ li(a1, Operand(type())); // bailout type, | 473 __ li(a1, Operand(type())); // bailout type, |
484 // a2: bailout id already loaded. | 474 // a2: bailout id already loaded. |
485 // a3: code address or 0 already loaded. | 475 // a3: code address or 0 already loaded. |
(...skipping 28 matching lines...) Expand all Loading... |
514 int double_regs_offset = FrameDescription::double_registers_offset(); | 504 int double_regs_offset = FrameDescription::double_registers_offset(); |
515 // Copy FPU registers to | 505 // Copy FPU registers to |
516 // double_registers_[DoubleRegister::kNumAllocatableRegisters] | 506 // double_registers_[DoubleRegister::kNumAllocatableRegisters] |
517 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { | 507 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { |
518 int dst_offset = i * kDoubleSize + double_regs_offset; | 508 int dst_offset = i * kDoubleSize + double_regs_offset; |
519 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; | 509 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; |
520 __ ldc1(f0, MemOperand(sp, src_offset)); | 510 __ ldc1(f0, MemOperand(sp, src_offset)); |
521 __ sdc1(f0, MemOperand(a1, dst_offset)); | 511 __ sdc1(f0, MemOperand(a1, dst_offset)); |
522 } | 512 } |
523 | 513 |
524 // Remove the bailout id, eventually return address, and the saved registers | 514 // Remove the bailout id and the saved registers from the stack. |
525 // from the stack. | 515 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
526 if (type() == EAGER || type() == SOFT || type() == OSR) { | |
527 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | |
528 } else { | |
529 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); | |
530 } | |
531 | 516 |
532 // Compute a pointer to the unwinding limit in register a2; that is | 517 // Compute a pointer to the unwinding limit in register a2; that is |
533 // the first stack slot not part of the input frame. | 518 // the first stack slot not part of the input frame. |
534 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); | 519 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); |
535 __ Addu(a2, a2, sp); | 520 __ Addu(a2, a2, sp); |
536 | 521 |
537 // Unwind the stack down to - but not including - the unwinding | 522 // Unwind the stack down to - but not including - the unwinding |
538 // limit and copy the contents of the activation frame to the input | 523 // limit and copy the contents of the activation frame to the input |
539 // frame description. | 524 // frame description. |
540 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); | 525 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
621 __ InitializeRootRegister(); | 606 __ InitializeRootRegister(); |
622 | 607 |
623 __ pop(at); // Get continuation, leave pc on stack. | 608 __ pop(at); // Get continuation, leave pc on stack. |
624 __ pop(ra); | 609 __ pop(ra); |
625 __ Jump(at); | 610 __ Jump(at); |
626 __ stop("Unreachable."); | 611 __ stop("Unreachable."); |
627 } | 612 } |
628 | 613 |
629 | 614 |
630 // Maximum size of a table entry generated below. | 615 // Maximum size of a table entry generated below. |
631 const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize; | 616 const int Deoptimizer::table_entry_size_ = 6 * Assembler::kInstrSize; |
632 | 617 |
633 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 618 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
634 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | 619 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
635 | 620 |
636 // Create a sequence of deoptimization entries. Note that any | 621 // Create a sequence of deoptimization entries. |
637 // registers may be still live. | 622 // Note that registers are still live when jumping to an entry. |
638 Label table_start; | 623 Label table_start; |
639 __ bind(&table_start); | 624 __ bind(&table_start); |
640 for (int i = 0; i < count(); i++) { | 625 for (int i = 0; i < count(); i++) { |
641 Label start; | 626 Label start; |
642 __ bind(&start); | 627 __ bind(&start); |
643 if (type() != EAGER && type() != SOFT) { | 628 __ addiu(sp, sp, -1 * kPointerSize); |
644 // Emulate ia32 like call by pushing return address to stack. | |
645 __ addiu(sp, sp, -2 * kPointerSize); | |
646 __ sw(ra, MemOperand(sp, 1 * kPointerSize)); | |
647 } else { | |
648 __ addiu(sp, sp, -1 * kPointerSize); | |
649 } | |
650 // Jump over the remaining deopt entries (including this one). | 629 // Jump over the remaining deopt entries (including this one). |
651 // This code is always reached by calling Jump, which puts the target (label | 630 // This code is always reached by calling Jump, which puts the target (label |
652 // start) into t9. | 631 // start) into t9. |
653 const int remaining_entries = (count() - i) * table_entry_size_; | 632 const int remaining_entries = (count() - i) * table_entry_size_; |
654 __ Addu(t9, t9, remaining_entries); | 633 __ Addu(t9, t9, remaining_entries); |
655 // 'at' was clobbered so we can only load the current entry value here. | 634 // 'at' was clobbered so we can only load the current entry value here. |
656 __ li(at, i); | 635 __ li(at, i); |
657 __ jr(t9); // Expose delay slot. | 636 __ jr(t9); // Expose delay slot. |
658 __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot. | 637 __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot. |
659 | 638 |
660 // Pad the rest of the code. | 639 // Pad the rest of the code. |
661 while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) { | 640 while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) { |
662 __ nop(); | 641 __ nop(); |
663 } | 642 } |
664 | 643 |
665 ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); | 644 ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start)); |
666 } | 645 } |
667 | 646 |
668 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), | 647 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start), |
669 count() * table_entry_size_); | 648 count() * table_entry_size_); |
670 } | 649 } |
671 | 650 |
672 #undef __ | 651 #undef __ |
673 | 652 |
674 | 653 |
675 } } // namespace v8::internal | 654 } } // namespace v8::internal |
OLD | NEW |