OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 581 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
592 | 592 |
593 | 593 |
594 bool Serializer::serialization_enabled_ = false; | 594 bool Serializer::serialization_enabled_ = false; |
595 bool Serializer::too_late_to_enable_now_ = false; | 595 bool Serializer::too_late_to_enable_now_ = false; |
596 | 596 |
597 | 597 |
598 Deserializer::Deserializer(SnapshotByteSource* source) | 598 Deserializer::Deserializer(SnapshotByteSource* source) |
599 : isolate_(NULL), | 599 : isolate_(NULL), |
600 source_(source), | 600 source_(source), |
601 external_reference_decoder_(NULL) { | 601 external_reference_decoder_(NULL) { |
602 } | 602 for (int i = 0; i < LAST_SPACE + 1; i++) { |
603 | 603 reservations_[i] = kUninitializedReservation; |
604 | |
605 // This routine both allocates a new object, and also keeps | |
606 // track of where objects have been allocated so that we can | |
607 // fix back references when deserializing. | |
608 Address Deserializer::Allocate(int space_index, Space* space, int size) { | |
609 Address address; | |
610 if (!SpaceIsLarge(space_index)) { | |
611 ASSERT(!SpaceIsPaged(space_index) || | |
612 size <= Page::kPageSize - Page::kObjectStartOffset); | |
613 MaybeObject* maybe_new_allocation; | |
614 if (space_index == NEW_SPACE) { | |
615 maybe_new_allocation = | |
616 reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); | |
617 } else { | |
618 maybe_new_allocation = | |
619 reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); | |
620 } | |
621 ASSERT(!maybe_new_allocation->IsFailure()); | |
622 Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); | |
623 HeapObject* new_object = HeapObject::cast(new_allocation); | |
624 address = new_object->address(); | |
625 high_water_[space_index] = address + size; | |
626 } else { | |
627 ASSERT(SpaceIsLarge(space_index)); | |
628 LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); | |
629 Object* new_allocation; | |
630 if (space_index == kLargeData || space_index == kLargeFixedArray) { | |
631 new_allocation = | |
632 lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); | |
633 } else { | |
634 ASSERT_EQ(kLargeCode, space_index); | |
635 new_allocation = | |
636 lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); | |
637 } | |
638 HeapObject* new_object = HeapObject::cast(new_allocation); | |
639 // Record all large objects in the same space. | |
640 address = new_object->address(); | |
641 pages_[LO_SPACE].Add(address); | |
642 } | 604 } |
643 last_object_address_ = address; | |
644 return address; | |
645 } | |
646 | |
647 | |
648 // This returns the address of an object that has been described in the | |
649 // snapshot as being offset bytes back in a particular space. | |
650 HeapObject* Deserializer::GetAddressFromEnd(int space) { | |
651 int offset = source_->GetInt(); | |
652 ASSERT(!SpaceIsLarge(space)); | |
653 offset <<= kObjectAlignmentBits; | |
654 return HeapObject::FromAddress(high_water_[space] - offset); | |
655 } | |
656 | |
657 | |
658 // This returns the address of an object that has been described in the | |
659 // snapshot as being offset bytes into a particular space. | |
660 HeapObject* Deserializer::GetAddressFromStart(int space) { | |
661 int offset = source_->GetInt(); | |
662 if (SpaceIsLarge(space)) { | |
663 // Large spaces have one object per 'page'. | |
664 return HeapObject::FromAddress(pages_[LO_SPACE][offset]); | |
665 } | |
666 offset <<= kObjectAlignmentBits; | |
667 if (space == NEW_SPACE) { | |
668 // New space has only one space - numbered 0. | |
669 return HeapObject::FromAddress(pages_[space][0] + offset); | |
670 } | |
671 ASSERT(SpaceIsPaged(space)); | |
672 int page_of_pointee = offset >> kPageSizeBits; | |
673 Address object_address = pages_[space][page_of_pointee] + | |
674 (offset & Page::kPageAlignmentMask); | |
675 return HeapObject::FromAddress(object_address); | |
676 } | 605 } |
677 | 606 |
678 | 607 |
679 void Deserializer::Deserialize() { | 608 void Deserializer::Deserialize() { |
680 isolate_ = Isolate::Current(); | 609 isolate_ = Isolate::Current(); |
681 ASSERT(isolate_ != NULL); | 610 ASSERT(isolate_ != NULL); |
682 { | 611 isolate_->heap()->ReserveSpace(&reservations_[0], &high_water_[0]); |
Yang
2012/09/13 08:47:59
why dereference and re-reference, and not simply:
Erik Corry
2012/09/13 12:13:35
Done.
| |
683 // Don't GC while deserializing - just expand the heap. | 612 // No active threads. |
684 AlwaysAllocateScope always_allocate; | 613 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); |
685 // Don't use the free lists while deserializing. | 614 // No active handles. |
686 LinearAllocationScope allocate_linearly; | 615 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); |
687 // No active threads. | 616 ASSERT_EQ(NULL, external_reference_decoder_); |
688 ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); | 617 external_reference_decoder_ = new ExternalReferenceDecoder(); |
689 // No active handles. | 618 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
690 ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty()); | 619 isolate_->heap()->RepairFreeListsAfterBoot(); |
691 ASSERT_EQ(NULL, external_reference_decoder_); | 620 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); |
692 external_reference_decoder_ = new ExternalReferenceDecoder(); | |
693 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | |
694 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); | |
695 | 621 |
696 isolate_->heap()->set_native_contexts_list( | 622 isolate_->heap()->set_native_contexts_list( |
697 isolate_->heap()->undefined_value()); | 623 isolate_->heap()->undefined_value()); |
698 | 624 |
699 // Update data pointers to the external strings containing natives sources. | 625 // Update data pointers to the external strings containing natives sources. |
700 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | 626 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { |
701 Object* source = isolate_->heap()->natives_source_cache()->get(i); | 627 Object* source = isolate_->heap()->natives_source_cache()->get(i); |
702 if (!source->IsUndefined()) { | 628 if (!source->IsUndefined()) { |
703 ExternalAsciiString::cast(source)->update_data_cache(); | 629 ExternalAsciiString::cast(source)->update_data_cache(); |
704 } | |
705 } | 630 } |
706 } | 631 } |
707 | 632 |
708 // Issue code events for newly deserialized code objects. | 633 // Issue code events for newly deserialized code objects. |
709 LOG_CODE_EVENT(isolate_, LogCodeObjects()); | 634 LOG_CODE_EVENT(isolate_, LogCodeObjects()); |
710 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); | 635 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); |
711 } | 636 } |
712 | 637 |
713 | 638 |
714 void Deserializer::DeserializePartial(Object** root) { | 639 void Deserializer::DeserializePartial(Object** root) { |
715 isolate_ = Isolate::Current(); | 640 isolate_ = Isolate::Current(); |
716 // Don't GC while deserializing - just expand the heap. | 641 for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { |
717 AlwaysAllocateScope always_allocate; | 642 ASSERT(reservations_[i] != kUninitializedReservation); |
718 // Don't use the free lists while deserializing. | 643 } |
719 LinearAllocationScope allocate_linearly; | 644 isolate_->heap()->ReserveSpace(&reservations_[0], &high_water_[0]); |
Yang
2012/09/13 08:47:59
Ditto.
Erik Corry
2012/09/13 12:13:35
Ditto.
| |
720 if (external_reference_decoder_ == NULL) { | 645 if (external_reference_decoder_ == NULL) { |
721 external_reference_decoder_ = new ExternalReferenceDecoder(); | 646 external_reference_decoder_ = new ExternalReferenceDecoder(); |
722 } | 647 } |
723 | 648 |
724 // Keep track of the code space start and end pointers in case new | 649 // Keep track of the code space start and end pointers in case new |
725 // code objects were unserialized | 650 // code objects were unserialized |
726 OldSpace* code_space = isolate_->heap()->code_space(); | 651 OldSpace* code_space = isolate_->heap()->code_space(); |
727 Address start_address = code_space->top(); | 652 Address start_address = code_space->top(); |
728 VisitPointer(root); | 653 VisitPointer(root); |
729 | 654 |
(...skipping 21 matching lines...) Expand all Loading... | |
751 ReadChunk(start, end, NEW_SPACE, NULL); | 676 ReadChunk(start, end, NEW_SPACE, NULL); |
752 } | 677 } |
753 | 678 |
754 | 679 |
755 // This routine writes the new object into the pointer provided and then | 680 // This routine writes the new object into the pointer provided and then |
756 // returns true if the new object was in young space and false otherwise. | 681 // returns true if the new object was in young space and false otherwise. |
757 // The reason for this strange interface is that otherwise the object is | 682 // The reason for this strange interface is that otherwise the object is |
758 // written very late, which means the FreeSpace map is not set up by the | 683 // written very late, which means the FreeSpace map is not set up by the |
759 // time we need to use it to mark the space at the end of a page free. | 684 // time we need to use it to mark the space at the end of a page free. |
760 void Deserializer::ReadObject(int space_number, | 685 void Deserializer::ReadObject(int space_number, |
761 Space* space, | |
762 Object** write_back) { | 686 Object** write_back) { |
763 int size = source_->GetInt() << kObjectAlignmentBits; | 687 int size = source_->GetInt() << kObjectAlignmentBits; |
764 Address address = Allocate(space_number, space, size); | 688 Address address = Allocate(space_number, size); |
765 *write_back = HeapObject::FromAddress(address); | 689 *write_back = HeapObject::FromAddress(address); |
766 Object** current = reinterpret_cast<Object**>(address); | 690 Object** current = reinterpret_cast<Object**>(address); |
767 Object** limit = current + (size >> kPointerSizeLog2); | 691 Object** limit = current + (size >> kPointerSizeLog2); |
768 if (FLAG_log_snapshot_positions) { | 692 if (FLAG_log_snapshot_positions) { |
769 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); | 693 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); |
770 } | 694 } |
771 ReadChunk(current, limit, space_number, address); | 695 ReadChunk(current, limit, space_number, address); |
772 #ifdef DEBUG | 696 #ifdef DEBUG |
773 bool is_codespace = (space == HEAP->code_space()) || | 697 bool is_codespace = (space_number == CODE_SPACE); |
774 ((space == HEAP->lo_space()) && (space_number == kLargeCode)); | |
775 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); | 698 ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace); |
776 #endif | 699 #endif |
777 } | 700 } |
778 | 701 |
779 | |
780 // This macro is always used with a constant argument so it should all fold | |
781 // away to almost nothing in the generated code. It might be nicer to do this | |
782 // with the ternary operator but there are type issues with that. | |
783 #define ASSIGN_DEST_SPACE(space_number) \ | |
784 Space* dest_space; \ | |
785 if (space_number == NEW_SPACE) { \ | |
786 dest_space = isolate->heap()->new_space(); \ | |
787 } else if (space_number == OLD_POINTER_SPACE) { \ | |
788 dest_space = isolate->heap()->old_pointer_space(); \ | |
789 } else if (space_number == OLD_DATA_SPACE) { \ | |
790 dest_space = isolate->heap()->old_data_space(); \ | |
791 } else if (space_number == CODE_SPACE) { \ | |
792 dest_space = isolate->heap()->code_space(); \ | |
793 } else if (space_number == MAP_SPACE) { \ | |
794 dest_space = isolate->heap()->map_space(); \ | |
795 } else if (space_number == CELL_SPACE) { \ | |
796 dest_space = isolate->heap()->cell_space(); \ | |
797 } else { \ | |
798 ASSERT(space_number >= LO_SPACE); \ | |
799 dest_space = isolate->heap()->lo_space(); \ | |
800 } | |
801 | |
802 | |
803 static const int kUnknownOffsetFromStart = -1; | |
804 | |
805 | |
806 void Deserializer::ReadChunk(Object** current, | 702 void Deserializer::ReadChunk(Object** current, |
807 Object** limit, | 703 Object** limit, |
808 int source_space, | 704 int source_space, |
809 Address current_object_address) { | 705 Address current_object_address) { |
810 Isolate* const isolate = isolate_; | 706 Isolate* const isolate = isolate_; |
707 // Write barrier support costs around 1% in startup time. In fact there | |
708 // are no new space objects in current boot snapshots, so it's not needed, | |
709 // but that may change. | |
811 bool write_barrier_needed = (current_object_address != NULL && | 710 bool write_barrier_needed = (current_object_address != NULL && |
812 source_space != NEW_SPACE && | 711 source_space != NEW_SPACE && |
813 source_space != CELL_SPACE && | 712 source_space != CELL_SPACE && |
814 source_space != CODE_SPACE && | 713 source_space != CODE_SPACE && |
815 source_space != OLD_DATA_SPACE); | 714 source_space != OLD_DATA_SPACE); |
816 while (current < limit) { | 715 while (current < limit) { |
817 int data = source_->Get(); | 716 int data = source_->Get(); |
818 switch (data) { | 717 switch (data) { |
819 #define CASE_STATEMENT(where, how, within, space_number) \ | 718 #define CASE_STATEMENT(where, how, within, space_number) \ |
820 case where + how + within + space_number: \ | 719 case where + how + within + space_number: \ |
821 ASSERT((where & ~kPointedToMask) == 0); \ | 720 ASSERT((where & ~kPointedToMask) == 0); \ |
822 ASSERT((how & ~kHowToCodeMask) == 0); \ | 721 ASSERT((how & ~kHowToCodeMask) == 0); \ |
823 ASSERT((within & ~kWhereToPointMask) == 0); \ | 722 ASSERT((within & ~kWhereToPointMask) == 0); \ |
824 ASSERT((space_number & ~kSpaceMask) == 0); | 723 ASSERT((space_number & ~kSpaceMask) == 0); |
825 | 724 |
826 #define CASE_BODY(where, how, within, space_number_if_any, offset_from_start) \ | 725 #define CASE_BODY(where, how, within, space_number_if_any) \ |
827 { \ | 726 { \ |
828 bool emit_write_barrier = false; \ | 727 bool emit_write_barrier = false; \ |
829 bool current_was_incremented = false; \ | 728 bool current_was_incremented = false; \ |
830 int space_number = space_number_if_any == kAnyOldSpace ? \ | 729 int space_number = space_number_if_any == kAnyOldSpace ? \ |
831 (data & kSpaceMask) : space_number_if_any; \ | 730 (data & kSpaceMask) : space_number_if_any; \ |
832 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ | 731 if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ |
833 ASSIGN_DEST_SPACE(space_number) \ | 732 ReadObject(space_number, current); \ |
834 ReadObject(space_number, dest_space, current); \ | |
835 emit_write_barrier = (space_number == NEW_SPACE); \ | 733 emit_write_barrier = (space_number == NEW_SPACE); \ |
836 } else { \ | 734 } else { \ |
837 Object* new_object = NULL; /* May not be a real Object pointer. */ \ | 735 Object* new_object = NULL; /* May not be a real Object pointer. */ \ |
838 if (where == kNewObject) { \ | 736 if (where == kNewObject) { \ |
839 ASSIGN_DEST_SPACE(space_number) \ | 737 ReadObject(space_number, &new_object); \ |
840 ReadObject(space_number, dest_space, &new_object); \ | |
841 } else if (where == kRootArray) { \ | 738 } else if (where == kRootArray) { \ |
842 int root_id = source_->GetInt(); \ | 739 int root_id = source_->GetInt(); \ |
843 new_object = isolate->heap()->roots_array_start()[root_id]; \ | 740 new_object = isolate->heap()->roots_array_start()[root_id]; \ |
844 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 741 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
845 } else if (where == kPartialSnapshotCache) { \ | 742 } else if (where == kPartialSnapshotCache) { \ |
846 int cache_index = source_->GetInt(); \ | 743 int cache_index = source_->GetInt(); \ |
847 new_object = isolate->serialize_partial_snapshot_cache() \ | 744 new_object = isolate->serialize_partial_snapshot_cache() \ |
848 [cache_index]; \ | 745 [cache_index]; \ |
849 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 746 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
850 } else if (where == kExternalReference) { \ | 747 } else if (where == kExternalReference) { \ |
748 int skip = source_->GetInt(); \ | |
749 current = reinterpret_cast<Object**>(reinterpret_cast<intptr_t>( \ | |
750 current) + skip); \ | |
Yang
2012/09/13 08:47:59
It seems easier readable to use Address instead of
Erik Corry
2012/09/13 12:13:35
Done.
| |
851 int reference_id = source_->GetInt(); \ | 751 int reference_id = source_->GetInt(); \ |
852 Address address = external_reference_decoder_-> \ | 752 Address address = external_reference_decoder_-> \ |
853 Decode(reference_id); \ | 753 Decode(reference_id); \ |
854 new_object = reinterpret_cast<Object*>(address); \ | 754 new_object = reinterpret_cast<Object*>(address); \ |
855 } else if (where == kBackref) { \ | 755 } else if (where == kBackref) { \ |
856 emit_write_barrier = (space_number == NEW_SPACE); \ | 756 emit_write_barrier = (space_number == NEW_SPACE); \ |
857 new_object = GetAddressFromEnd(data & kSpaceMask); \ | 757 new_object = GetAddressFromEnd(data & kSpaceMask); \ |
858 } else { \ | 758 } else { \ |
859 ASSERT(where == kFromStart); \ | 759 ASSERT(where == kBackrefWithSkip); \ |
860 if (offset_from_start == kUnknownOffsetFromStart) { \ | 760 int skip = source_->GetInt(); \ |
861 emit_write_barrier = (space_number == NEW_SPACE); \ | 761 current = reinterpret_cast<Object**>( \ |
862 new_object = GetAddressFromStart(data & kSpaceMask); \ | 762 reinterpret_cast<intptr_t>(current) + skip); \ |
Yang
2012/09/13 08:47:59
Ditto.
Erik Corry
2012/09/13 12:13:35
Ditto
| |
863 } else { \ | 763 emit_write_barrier = (space_number == NEW_SPACE); \ |
864 Address object_address = pages_[space_number][0] + \ | 764 new_object = GetAddressFromEnd(data & kSpaceMask); \ |
865 (offset_from_start << kObjectAlignmentBits); \ | |
866 new_object = HeapObject::FromAddress(object_address); \ | |
867 } \ | |
868 } \ | 765 } \ |
869 if (within == kInnerPointer) { \ | 766 if (within == kInnerPointer) { \ |
870 if (space_number != CODE_SPACE || new_object->IsCode()) { \ | 767 if (space_number != CODE_SPACE || new_object->IsCode()) { \ |
871 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | 768 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ |
872 new_object = reinterpret_cast<Object*>( \ | 769 new_object = reinterpret_cast<Object*>( \ |
873 new_code_object->instruction_start()); \ | 770 new_code_object->instruction_start()); \ |
874 } else { \ | 771 } else { \ |
875 ASSERT(space_number == CODE_SPACE || space_number == kLargeCode);\ | 772 ASSERT(space_number == CODE_SPACE); \ |
876 JSGlobalPropertyCell* cell = \ | 773 JSGlobalPropertyCell* cell = \ |
877 JSGlobalPropertyCell::cast(new_object); \ | 774 JSGlobalPropertyCell::cast(new_object); \ |
878 new_object = reinterpret_cast<Object*>( \ | 775 new_object = reinterpret_cast<Object*>( \ |
879 cell->ValueAddress()); \ | 776 cell->ValueAddress()); \ |
880 } \ | 777 } \ |
881 } \ | 778 } \ |
882 if (how == kFromCode) { \ | 779 if (how == kFromCode) { \ |
883 Address location_of_branch_data = \ | 780 Address location_of_branch_data = \ |
884 reinterpret_cast<Address>(current); \ | 781 reinterpret_cast<Address>(current); \ |
885 Assembler::deserialization_set_special_target_at( \ | 782 Assembler::deserialization_set_special_target_at( \ |
(...skipping 11 matching lines...) Expand all Loading... | |
897 isolate->heap()->RecordWrite( \ | 794 isolate->heap()->RecordWrite( \ |
898 current_object_address, \ | 795 current_object_address, \ |
899 static_cast<int>(current_address - current_object_address)); \ | 796 static_cast<int>(current_address - current_object_address)); \ |
900 } \ | 797 } \ |
901 if (!current_was_incremented) { \ | 798 if (!current_was_incremented) { \ |
902 current++; \ | 799 current++; \ |
903 } \ | 800 } \ |
904 break; \ | 801 break; \ |
905 } \ | 802 } \ |
906 | 803 |
907 // This generates a case and a body for each space. The large object spaces are | |
908 // very rare in snapshots so they are grouped in one body. | |
909 #define ONE_PER_SPACE(where, how, within) \ | |
910 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | |
911 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ | |
912 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ | |
913 CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart) \ | |
914 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ | |
915 CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart) \ | |
916 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
917 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ | |
918 CASE_STATEMENT(where, how, within, CELL_SPACE) \ | |
919 CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart) \ | |
920 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | |
921 CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart) \ | |
922 CASE_STATEMENT(where, how, within, kLargeData) \ | |
923 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
924 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ | |
925 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) | |
926 | |
927 // This generates a case and a body for the new space (which has to do extra | 804 // This generates a case and a body for the new space (which has to do extra |
928 // write barrier handling) and handles the other spaces with 8 fall-through | 805 // write barrier handling) and handles the other spaces with 8 fall-through |
929 // cases and one body. | 806 // cases and one body. |
930 #define ALL_SPACES(where, how, within) \ | 807 #define ALL_SPACES(where, how, within) \ |
931 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | 808 CASE_STATEMENT(where, how, within, NEW_SPACE) \ |
932 CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart) \ | 809 CASE_BODY(where, how, within, NEW_SPACE) \ |
933 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ | 810 CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \ |
934 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ | 811 CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \ |
935 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | 812 CASE_STATEMENT(where, how, within, CODE_SPACE) \ |
936 CASE_STATEMENT(where, how, within, CELL_SPACE) \ | 813 CASE_STATEMENT(where, how, within, CELL_SPACE) \ |
937 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | 814 CASE_STATEMENT(where, how, within, MAP_SPACE) \ |
938 CASE_STATEMENT(where, how, within, kLargeData) \ | 815 CASE_BODY(where, how, within, kAnyOldSpace) |
939 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
940 CASE_STATEMENT(where, how, within, kLargeFixedArray) \ | |
941 CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart) | |
942 | |
943 #define ONE_PER_CODE_SPACE(where, how, within) \ | |
944 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
945 CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \ | |
946 CASE_STATEMENT(where, how, within, kLargeCode) \ | |
947 CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart) | |
948 | 816 |
949 #define FOUR_CASES(byte_code) \ | 817 #define FOUR_CASES(byte_code) \ |
950 case byte_code: \ | 818 case byte_code: \ |
951 case byte_code + 1: \ | 819 case byte_code + 1: \ |
952 case byte_code + 2: \ | 820 case byte_code + 2: \ |
953 case byte_code + 3: | 821 case byte_code + 3: |
954 | 822 |
955 #define SIXTEEN_CASES(byte_code) \ | 823 #define SIXTEEN_CASES(byte_code) \ |
956 FOUR_CASES(byte_code) \ | 824 FOUR_CASES(byte_code) \ |
957 FOUR_CASES(byte_code + 4) \ | 825 FOUR_CASES(byte_code + 4) \ |
(...skipping 11 matching lines...) Expand all Loading... | |
969 } | 837 } |
970 COMMON_RAW_LENGTHS(RAW_CASE) | 838 COMMON_RAW_LENGTHS(RAW_CASE) |
971 #undef RAW_CASE | 839 #undef RAW_CASE |
972 | 840 |
973 // Deserialize a chunk of raw data that doesn't have one of the popular | 841 // Deserialize a chunk of raw data that doesn't have one of the popular |
974 // lengths. | 842 // lengths. |
975 case kRawData: { | 843 case kRawData: { |
976 int size = source_->GetInt(); | 844 int size = source_->GetInt(); |
977 byte* raw_data_out = reinterpret_cast<byte*>(current); | 845 byte* raw_data_out = reinterpret_cast<byte*>(current); |
978 source_->CopyRaw(raw_data_out, size); | 846 source_->CopyRaw(raw_data_out, size); |
979 current = reinterpret_cast<Object**>(raw_data_out + size); | |
980 break; | 847 break; |
981 } | 848 } |
982 | 849 |
983 SIXTEEN_CASES(kRootArrayLowConstants) | 850 SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance) |
984 SIXTEEN_CASES(kRootArrayHighConstants) { | 851 SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) { |
985 int root_id = RootArrayConstantFromByteCode(data); | 852 int root_id = RootArrayConstantFromByteCode(data); |
986 Object* object = isolate->heap()->roots_array_start()[root_id]; | 853 Object* object = isolate->heap()->roots_array_start()[root_id]; |
987 ASSERT(!isolate->heap()->InNewSpace(object)); | 854 ASSERT(!isolate->heap()->InNewSpace(object)); |
988 *current++ = object; | 855 *current++ = object; |
989 break; | 856 break; |
990 } | 857 } |
991 | 858 |
859 SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance) | |
860 SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) { | |
861 int root_id = RootArrayConstantFromByteCode(data); | |
862 int skip = source_->GetInt(); | |
863 current = reinterpret_cast<Object**>( | |
864 reinterpret_cast<intptr_t>(current) + skip); | |
865 Object* object = isolate->heap()->roots_array_start()[root_id]; | |
866 ASSERT(!isolate->heap()->InNewSpace(object)); | |
867 *current++ = object; | |
868 break; | |
869 } | |
870 | |
992 case kRepeat: { | 871 case kRepeat: { |
993 int repeats = source_->GetInt(); | 872 int repeats = source_->GetInt(); |
994 Object* object = current[-1]; | 873 Object* object = current[-1]; |
995 ASSERT(!isolate->heap()->InNewSpace(object)); | 874 ASSERT(!isolate->heap()->InNewSpace(object)); |
996 for (int i = 0; i < repeats; i++) current[i] = object; | 875 for (int i = 0; i < repeats; i++) current[i] = object; |
997 current += repeats; | 876 current += repeats; |
998 break; | 877 break; |
999 } | 878 } |
1000 | 879 |
1001 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == | 880 STATIC_ASSERT(kRootArrayNumberOfConstantEncodings == |
1002 Heap::kOldSpaceRoots); | 881 Heap::kOldSpaceRoots); |
1003 STATIC_ASSERT(kMaxRepeats == 12); | 882 STATIC_ASSERT(kMaxRepeats == 13); |
1004 FOUR_CASES(kConstantRepeat) | 883 case kConstantRepeat: |
1005 FOUR_CASES(kConstantRepeat + 4) | 884 FOUR_CASES(kConstantRepeat + 1) |
1006 FOUR_CASES(kConstantRepeat + 8) { | 885 FOUR_CASES(kConstantRepeat + 5) |
886 FOUR_CASES(kConstantRepeat + 9) { | |
1007 int repeats = RepeatsForCode(data); | 887 int repeats = RepeatsForCode(data); |
1008 Object* object = current[-1]; | 888 Object* object = current[-1]; |
1009 ASSERT(!isolate->heap()->InNewSpace(object)); | 889 ASSERT(!isolate->heap()->InNewSpace(object)); |
1010 for (int i = 0; i < repeats; i++) current[i] = object; | 890 for (int i = 0; i < repeats; i++) current[i] = object; |
1011 current += repeats; | 891 current += repeats; |
1012 break; | 892 break; |
1013 } | 893 } |
1014 | 894 |
1015 // Deserialize a new object and write a pointer to it to the current | 895 // Deserialize a new object and write a pointer to it to the current |
1016 // object. | 896 // object. |
1017 ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject) | 897 ALL_SPACES(kNewObject, kPlain, kStartOfObject) |
1018 // Support for direct instruction pointers in functions. It's an inner | 898 // Support for direct instruction pointers in functions. It's an inner |
1019 // pointer because it points at the entry point, not at the start of the | 899 // pointer because it points at the entry point, not at the start of the |
1020 // code object. | 900 // code object. |
1021 ONE_PER_CODE_SPACE(kNewObject, kPlain, kInnerPointer) | 901 CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE) |
902 CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE) | |
1022 // Deserialize a new code object and write a pointer to its first | 903 // Deserialize a new code object and write a pointer to its first |
1023 // instruction to the current code object. | 904 // instruction to the current code object. |
1024 ONE_PER_SPACE(kNewObject, kFromCode, kInnerPointer) | 905 ALL_SPACES(kNewObject, kFromCode, kInnerPointer) |
1025 // Find a recently deserialized object using its offset from the current | 906 // Find a recently deserialized object using its offset from the current |
1026 // allocation point and write a pointer to it to the current object. | 907 // allocation point and write a pointer to it to the current object. |
1027 ALL_SPACES(kBackref, kPlain, kStartOfObject) | 908 ALL_SPACES(kBackref, kPlain, kStartOfObject) |
909 ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) | |
1028 #if V8_TARGET_ARCH_MIPS | 910 #if V8_TARGET_ARCH_MIPS |
1029 // Deserialize a new object from pointer found in code and write | 911 // Deserialize a new object from pointer found in code and write |
1030 // a pointer to it to the current object. Required only for MIPS, and | 912 // a pointer to it to the current object. Required only for MIPS, and |
1031 // omitted on the other architectures because it is fully unrolled and | 913 // omitted on the other architectures because it is fully unrolled and |
1032 // would cause bloat. | 914 // would cause bloat. |
1033 ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject) | 915 ALL_SPACES(kNewObject, kFromCode, kStartOfObject) |
1034 // Find a recently deserialized code object using its offset from the | 916 // Find a recently deserialized code object using its offset from the |
1035 // current allocation point and write a pointer to it to the current | 917 // current allocation point and write a pointer to it to the current |
1036 // object. Required only for MIPS. | 918 // object. Required only for MIPS. |
1037 ALL_SPACES(kBackref, kFromCode, kStartOfObject) | 919 ALL_SPACES(kBackref, kFromCode, kStartOfObject) |
1038 // Find an already deserialized code object using its offset from | 920 ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) |
1039 // the start and write a pointer to it to the current object. | |
1040 // Required only for MIPS. | |
1041 ALL_SPACES(kFromStart, kFromCode, kStartOfObject) | |
1042 #endif | 921 #endif |
1043 // Find a recently deserialized code object using its offset from the | 922 // Find a recently deserialized code object using its offset from the |
1044 // current allocation point and write a pointer to its first instruction | 923 // current allocation point and write a pointer to its first instruction |
1045 // to the current code object or the instruction pointer in a function | 924 // to the current code object or the instruction pointer in a function |
1046 // object. | 925 // object. |
1047 ALL_SPACES(kBackref, kFromCode, kInnerPointer) | 926 ALL_SPACES(kBackref, kFromCode, kInnerPointer) |
927 ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer) | |
1048 ALL_SPACES(kBackref, kPlain, kInnerPointer) | 928 ALL_SPACES(kBackref, kPlain, kInnerPointer) |
1049 // Find an already deserialized object using its offset from the start | 929 ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer) |
1050 // and write a pointer to it to the current object. | |
1051 ALL_SPACES(kFromStart, kPlain, kStartOfObject) | |
1052 ALL_SPACES(kFromStart, kPlain, kInnerPointer) | |
1053 // Find an already deserialized code object using its offset from the | |
1054 // start and write a pointer to its first instruction to the current code | |
1055 // object. | |
1056 ALL_SPACES(kFromStart, kFromCode, kInnerPointer) | |
1057 // Find an object in the roots array and write a pointer to it to the | 930 // Find an object in the roots array and write a pointer to it to the |
1058 // current object. | 931 // current object. |
1059 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) | 932 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) |
1060 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart) | 933 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0) |
1061 // Find an object in the partial snapshots cache and write a pointer to it | 934 // Find an object in the partial snapshots cache and write a pointer to it |
1062 // to the current object. | 935 // to the current object. |
1063 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | 936 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) |
1064 CASE_BODY(kPartialSnapshotCache, | 937 CASE_BODY(kPartialSnapshotCache, |
1065 kPlain, | 938 kPlain, |
1066 kStartOfObject, | 939 kStartOfObject, |
1067 0, | 940 0) |
1068 kUnknownOffsetFromStart) | |
1069 // Find an code entry in the partial snapshots cache and | 941 // Find an code entry in the partial snapshots cache and |
1070 // write a pointer to it to the current object. | 942 // write a pointer to it to the current object. |
1071 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0) | 943 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0) |
1072 CASE_BODY(kPartialSnapshotCache, | 944 CASE_BODY(kPartialSnapshotCache, |
1073 kPlain, | 945 kPlain, |
1074 kInnerPointer, | 946 kInnerPointer, |
1075 0, | 947 0) |
1076 kUnknownOffsetFromStart) | |
1077 // Find an external reference and write a pointer to it to the current | 948 // Find an external reference and write a pointer to it to the current |
1078 // object. | 949 // object. |
1079 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) | 950 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) |
1080 CASE_BODY(kExternalReference, | 951 CASE_BODY(kExternalReference, |
1081 kPlain, | 952 kPlain, |
1082 kStartOfObject, | 953 kStartOfObject, |
1083 0, | 954 0) |
1084 kUnknownOffsetFromStart) | |
1085 // Find an external reference and write a pointer to it in the current | 955 // Find an external reference and write a pointer to it in the current |
1086 // code object. | 956 // code object. |
1087 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) | 957 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) |
1088 CASE_BODY(kExternalReference, | 958 CASE_BODY(kExternalReference, |
1089 kFromCode, | 959 kFromCode, |
1090 kStartOfObject, | 960 kStartOfObject, |
1091 0, | 961 0) |
1092 kUnknownOffsetFromStart) | |
1093 | 962 |
1094 #undef CASE_STATEMENT | 963 #undef CASE_STATEMENT |
1095 #undef CASE_BODY | 964 #undef CASE_BODY |
1096 #undef ONE_PER_SPACE | |
1097 #undef ALL_SPACES | 965 #undef ALL_SPACES |
1098 #undef ASSIGN_DEST_SPACE | |
1099 | 966 |
1100 case kNewPage: { | 967 case kSkip: { |
1101 int space = source_->Get(); | 968 int size = source_->GetInt(); |
1102 pages_[space].Add(last_object_address_); | 969 current = reinterpret_cast<Object**>( |
1103 if (space == CODE_SPACE) { | 970 reinterpret_cast<intptr_t>(current) + size); |
1104 CPU::FlushICache(last_object_address_, Page::kPageSize); | |
1105 } | |
1106 break; | 971 break; |
1107 } | 972 } |
1108 | 973 |
1109 case kSkip: { | |
1110 current++; | |
1111 break; | |
1112 } | |
1113 | |
1114 case kNativesStringResource: { | 974 case kNativesStringResource: { |
1115 int index = source_->Get(); | 975 int index = source_->Get(); |
1116 Vector<const char> source_vector = Natives::GetRawScriptSource(index); | 976 Vector<const char> source_vector = Natives::GetRawScriptSource(index); |
1117 NativesExternalStringResource* resource = | 977 NativesExternalStringResource* resource = |
1118 new NativesExternalStringResource(isolate->bootstrapper(), | 978 new NativesExternalStringResource(isolate->bootstrapper(), |
1119 source_vector.start(), | 979 source_vector.start(), |
1120 source_vector.length()); | 980 source_vector.length()); |
1121 *current++ = reinterpret_cast<Object*>(resource); | 981 *current++ = reinterpret_cast<Object*>(resource); |
1122 break; | 982 break; |
1123 } | 983 } |
1124 | 984 |
1125 case kSynchronize: { | 985 case kSynchronize: { |
1126 // If we get here then that indicates that you have a mismatch between | 986 // If we get here then that indicates that you have a mismatch between |
1127 // the number of GC roots when serializing and deserializing. | 987 // the number of GC roots when serializing and deserializing. |
1128 UNREACHABLE(); | 988 UNREACHABLE(); |
1129 } | 989 } |
1130 | 990 |
1131 default: | 991 default: |
1132 UNREACHABLE(); | 992 UNREACHABLE(); |
1133 } | 993 } |
1134 } | 994 } |
1135 ASSERT_EQ(current, limit); | 995 ASSERT_EQ(limit, current); |
1136 } | 996 } |
1137 | 997 |
1138 | 998 |
1139 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { | 999 void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { |
1140 const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; | 1000 ASSERT(integer <= 0x3fffff); |
Yang
2012/09/13 08:47:59
I'd prefer integer < (1 << 22)
Erik Corry
2012/09/13 12:13:35
Done.
| |
1141 for (int shift = max_shift; shift > 0; shift -= 7) { | 1001 integer <<= 2; |
1142 if (integer >= static_cast<uintptr_t>(1u) << shift) { | 1002 int bytes = 1; |
1143 Put((static_cast<int>((integer >> shift)) & 0x7f) | 0x80, "IntPart"); | 1003 if (integer > 0xff) bytes = 2; |
1144 } | 1004 if (integer > 0xffff) bytes = 3; |
1145 } | 1005 integer |= bytes; |
1146 PutSection(static_cast<int>(integer & 0x7f), "IntLastPart"); | 1006 Put(static_cast<int>(integer & 0xff), "IntPart1"); |
1007 if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2"); | |
1008 if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3"); | |
1147 } | 1009 } |
1148 | 1010 |
1149 | 1011 |
1150 Serializer::Serializer(SnapshotByteSink* sink) | 1012 Serializer::Serializer(SnapshotByteSink* sink) |
1151 : sink_(sink), | 1013 : sink_(sink), |
1152 current_root_index_(0), | 1014 current_root_index_(0), |
1153 external_reference_encoder_(new ExternalReferenceEncoder), | 1015 external_reference_encoder_(new ExternalReferenceEncoder), |
1154 large_object_total_(0), | |
1155 root_index_wave_front_(0) { | 1016 root_index_wave_front_(0) { |
1156 isolate_ = Isolate::Current(); | 1017 isolate_ = Isolate::Current(); |
1157 // The serializer is meant to be used only to generate initial heap images | 1018 // The serializer is meant to be used only to generate initial heap images |
1158 // from a context in which there is only one isolate. | 1019 // from a context in which there is only one isolate. |
1159 ASSERT(isolate_->IsDefaultIsolate()); | 1020 ASSERT(isolate_->IsDefaultIsolate()); |
1160 for (int i = 0; i <= LAST_SPACE; i++) { | 1021 for (int i = 0; i <= LAST_SPACE; i++) { |
1161 fullness_[i] = 0; | 1022 fullness_[i] = 0; |
1162 } | 1023 } |
1163 } | 1024 } |
1164 | 1025 |
(...skipping 12 matching lines...) Expand all Loading... | |
1177 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); | 1038 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); |
1178 // We don't support serializing installed extensions. | 1039 // We don't support serializing installed extensions. |
1179 CHECK(!isolate->has_installed_extensions()); | 1040 CHECK(!isolate->has_installed_extensions()); |
1180 | 1041 |
1181 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); | 1042 HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
1182 } | 1043 } |
1183 | 1044 |
1184 | 1045 |
1185 void PartialSerializer::Serialize(Object** object) { | 1046 void PartialSerializer::Serialize(Object** object) { |
1186 this->VisitPointer(object); | 1047 this->VisitPointer(object); |
1048 Pad(); | |
1187 } | 1049 } |
1188 | 1050 |
1189 | 1051 |
1190 void Serializer::VisitPointers(Object** start, Object** end) { | 1052 void Serializer::VisitPointers(Object** start, Object** end) { |
1191 Isolate* isolate = Isolate::Current(); | 1053 Isolate* isolate = Isolate::Current(); |
1192 | 1054 |
1193 for (Object** current = start; current < end; current++) { | 1055 for (Object** current = start; current < end; current++) { |
1194 if (start == isolate->heap()->roots_array_start()) { | 1056 if (start == isolate->heap()->roots_array_start()) { |
1195 root_index_wave_front_ = | 1057 root_index_wave_front_ = |
1196 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); | 1058 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); |
1197 } | 1059 } |
1198 if (reinterpret_cast<Address>(current) == | 1060 if (reinterpret_cast<Address>(current) == |
1199 isolate->heap()->store_buffer()->TopAddress()) { | 1061 isolate->heap()->store_buffer()->TopAddress()) { |
1200 sink_->Put(kSkip, "Skip"); | 1062 sink_->Put(kSkip, "Skip"); |
1063 sink_->PutInt(kPointerSize, "SkipOneWord"); | |
1201 } else if ((*current)->IsSmi()) { | 1064 } else if ((*current)->IsSmi()) { |
1202 sink_->Put(kRawData, "RawData"); | 1065 sink_->Put(kRawData + kPointerSize / 4, "Smi"); |
Yang
2012/09/13 08:47:59
The 4 in here obviously refers to the factor 4 in
Erik Corry
2012/09/13 12:13:35
Now the 4 is gone.
| |
1203 sink_->PutInt(kPointerSize, "length"); | |
1204 for (int i = 0; i < kPointerSize; i++) { | 1066 for (int i = 0; i < kPointerSize; i++) { |
1205 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | 1067 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); |
1206 } | 1068 } |
1207 } else { | 1069 } else { |
1208 SerializeObject(*current, kPlain, kStartOfObject); | 1070 SerializeObject(*current, kPlain, kStartOfObject, 0); |
1209 } | 1071 } |
1210 } | 1072 } |
1211 } | 1073 } |
1212 | 1074 |
1213 | 1075 |
1214 // This ensures that the partial snapshot cache keeps things alive during GC and | 1076 // This ensures that the partial snapshot cache keeps things alive during GC and |
1215 // tracks their movement. When it is called during serialization of the startup | 1077 // tracks their movement. When it is called during serialization of the startup |
1216 // snapshot nothing happens. When the partial (context) snapshot is created, | 1078 // snapshot nothing happens. When the partial (context) snapshot is created, |
1217 // this array is populated with the pointers that the partial snapshot will | 1079 // this array is populated with the pointers that the partial snapshot will |
1218 // need. As that happens we emit serialized objects to the startup snapshot | 1080 // need. As that happens we emit serialized objects to the startup snapshot |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1285 | 1147 |
1286 | 1148 |
1287 // Encode the location of an already deserialized object in order to write its | 1149 // Encode the location of an already deserialized object in order to write its |
1288 // location into a later object. We can encode the location as an offset from | 1150 // location into a later object. We can encode the location as an offset from |
1289 // the start of the deserialized objects or as an offset backwards from the | 1151 // the start of the deserialized objects or as an offset backwards from the |
1290 // current allocation pointer. | 1152 // current allocation pointer. |
1291 void Serializer::SerializeReferenceToPreviousObject( | 1153 void Serializer::SerializeReferenceToPreviousObject( |
1292 int space, | 1154 int space, |
1293 int address, | 1155 int address, |
1294 HowToCode how_to_code, | 1156 HowToCode how_to_code, |
1295 WhereToPoint where_to_point) { | 1157 WhereToPoint where_to_point, |
1158 int skip) { | |
1296 int offset = CurrentAllocationAddress(space) - address; | 1159 int offset = CurrentAllocationAddress(space) - address; |
1297 bool from_start = true; | 1160 // Shift out the bits that are always 0. |
1298 if (SpaceIsPaged(space)) { | 1161 offset >>= kObjectAlignmentBits; |
1299 // For paged space it is simple to encode back from current allocation if | 1162 if (skip == 0) { |
1300 // the object is on the same page as the current allocation pointer. | 1163 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); |
1301 if ((CurrentAllocationAddress(space) >> kPageSizeBits) == | 1164 } else { |
1302 (address >> kPageSizeBits)) { | 1165 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, |
1303 from_start = false; | 1166 "BackRefSerWithSkip"); |
1304 address = offset; | 1167 sink_->PutInt(skip, "BackRefSkipDistance"); |
1305 } | |
1306 } else if (space == NEW_SPACE) { | |
1307 // For new space it is always simple to encode back from current allocation. | |
1308 if (offset < address) { | |
1309 from_start = false; | |
1310 address = offset; | |
1311 } | |
1312 } | 1168 } |
1313 // If we are actually dealing with real offsets (and not a numbering of | 1169 sink_->PutInt(offset, "offset"); |
1314 // all objects) then we should shift out the bits that are always 0. | |
1315 if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; | |
1316 if (from_start) { | |
1317 sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer"); | |
1318 sink_->PutInt(address, "address"); | |
1319 } else { | |
1320 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer"); | |
1321 sink_->PutInt(address, "address"); | |
1322 } | |
1323 } | 1170 } |
1324 | 1171 |
1325 | 1172 |
1326 void StartupSerializer::SerializeObject( | 1173 void StartupSerializer::SerializeObject( |
1327 Object* o, | 1174 Object* o, |
1328 HowToCode how_to_code, | 1175 HowToCode how_to_code, |
1329 WhereToPoint where_to_point) { | 1176 WhereToPoint where_to_point, |
1177 int skip) { | |
1330 CHECK(o->IsHeapObject()); | 1178 CHECK(o->IsHeapObject()); |
1331 HeapObject* heap_object = HeapObject::cast(o); | 1179 HeapObject* heap_object = HeapObject::cast(o); |
1332 | 1180 |
1333 int root_index; | 1181 int root_index; |
1334 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { | 1182 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { |
1335 PutRoot(root_index, heap_object, how_to_code, where_to_point); | 1183 PutRoot(root_index, heap_object, how_to_code, where_to_point, skip); |
1336 return; | 1184 return; |
1337 } | 1185 } |
1338 | 1186 |
1339 if (address_mapper_.IsMapped(heap_object)) { | 1187 if (address_mapper_.IsMapped(heap_object)) { |
1340 int space = SpaceOfAlreadySerializedObject(heap_object); | 1188 int space = SpaceOfObject(heap_object); |
1341 int address = address_mapper_.MappedTo(heap_object); | 1189 int address = address_mapper_.MappedTo(heap_object); |
1342 SerializeReferenceToPreviousObject(space, | 1190 SerializeReferenceToPreviousObject(space, |
1343 address, | 1191 address, |
1344 how_to_code, | 1192 how_to_code, |
1345 where_to_point); | 1193 where_to_point, |
1194 skip); | |
1346 } else { | 1195 } else { |
1196 if (skip != 0) { | |
1197 sink_->Put(kSkip, "FlushPendingSkip"); | |
1198 sink_->PutInt(skip, "SkipDistance"); | |
1199 } | |
1200 | |
1347 // Object has not yet been serialized. Serialize it here. | 1201 // Object has not yet been serialized. Serialize it here. |
1348 ObjectSerializer object_serializer(this, | 1202 ObjectSerializer object_serializer(this, |
1349 heap_object, | 1203 heap_object, |
1350 sink_, | 1204 sink_, |
1351 how_to_code, | 1205 how_to_code, |
1352 where_to_point); | 1206 where_to_point); |
1353 object_serializer.Serialize(); | 1207 object_serializer.Serialize(); |
1354 } | 1208 } |
1355 } | 1209 } |
1356 | 1210 |
1357 | 1211 |
1358 void StartupSerializer::SerializeWeakReferences() { | 1212 void StartupSerializer::SerializeWeakReferences() { |
1359 // This phase comes right after the partial serialization (of the snapshot). | 1213 // This phase comes right after the partial serialization (of the snapshot). |
1360 // After we have done the partial serialization the partial snapshot cache | 1214 // After we have done the partial serialization the partial snapshot cache |
1361 // will contain some references needed to decode the partial snapshot. We | 1215 // will contain some references needed to decode the partial snapshot. We |
1362 // add one entry with 'undefined' which is the sentinel that the deserializer | 1216 // add one entry with 'undefined' which is the sentinel that the deserializer |
1363 // uses to know it is done deserializing the array. | 1217 // uses to know it is done deserializing the array. |
1364 Isolate* isolate = Isolate::Current(); | 1218 Isolate* isolate = Isolate::Current(); |
1365 Object* undefined = isolate->heap()->undefined_value(); | 1219 Object* undefined = isolate->heap()->undefined_value(); |
1366 VisitPointer(&undefined); | 1220 VisitPointer(&undefined); |
1367 HEAP->IterateWeakRoots(this, VISIT_ALL); | 1221 HEAP->IterateWeakRoots(this, VISIT_ALL); |
1368 } | 1222 } |
1369 | 1223 |
1370 | 1224 |
1371 void Serializer::PutRoot(int root_index, | 1225 void Serializer::PutRoot(int root_index, |
1372 HeapObject* object, | 1226 HeapObject* object, |
1373 SerializerDeserializer::HowToCode how_to_code, | 1227 SerializerDeserializer::HowToCode how_to_code, |
1374 SerializerDeserializer::WhereToPoint where_to_point) { | 1228 SerializerDeserializer::WhereToPoint where_to_point, |
1229 int skip) { | |
1375 if (how_to_code == kPlain && | 1230 if (how_to_code == kPlain && |
1376 where_to_point == kStartOfObject && | 1231 where_to_point == kStartOfObject && |
1377 root_index < kRootArrayNumberOfConstantEncodings && | 1232 root_index < kRootArrayNumberOfConstantEncodings && |
1378 !HEAP->InNewSpace(object)) { | 1233 !HEAP->InNewSpace(object)) { |
1379 if (root_index < kRootArrayNumberOfLowConstantEncodings) { | 1234 if (skip == 0) { |
1380 sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant"); | 1235 sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index, |
1236 "RootConstant"); | |
1381 } else { | 1237 } else { |
1382 sink_->Put(kRootArrayHighConstants + root_index - | 1238 sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index, |
1383 kRootArrayNumberOfLowConstantEncodings, | 1239 "RootConstant"); |
1384 "RootHiConstant"); | 1240 sink_->PutInt(skip, "SkipInPutRoot"); |
1385 } | 1241 } |
1386 } else { | 1242 } else { |
1243 if (skip != 0) { | |
1244 sink_->Put(kSkip, "SkipFromPutRoot"); | |
1245 sink_->PutInt(skip, "SkipFromPutRootDistance"); | |
1246 } | |
1387 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); | 1247 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); |
1388 sink_->PutInt(root_index, "root_index"); | 1248 sink_->PutInt(root_index, "root_index"); |
1389 } | 1249 } |
1390 } | 1250 } |
1391 | 1251 |
1392 | 1252 |
1393 void PartialSerializer::SerializeObject( | 1253 void PartialSerializer::SerializeObject( |
1394 Object* o, | 1254 Object* o, |
1395 HowToCode how_to_code, | 1255 HowToCode how_to_code, |
1396 WhereToPoint where_to_point) { | 1256 WhereToPoint where_to_point, |
1257 int skip) { | |
1397 CHECK(o->IsHeapObject()); | 1258 CHECK(o->IsHeapObject()); |
1398 HeapObject* heap_object = HeapObject::cast(o); | 1259 HeapObject* heap_object = HeapObject::cast(o); |
1399 | 1260 |
1400 if (heap_object->IsMap()) { | 1261 if (heap_object->IsMap()) { |
1401 // The code-caches link to context-specific code objects, which | 1262 // The code-caches link to context-specific code objects, which |
1402 // the startup and context serializes cannot currently handle. | 1263 // the startup and context serializes cannot currently handle. |
1403 ASSERT(Map::cast(heap_object)->code_cache() == | 1264 ASSERT(Map::cast(heap_object)->code_cache() == |
1404 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); | 1265 heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); |
1405 } | 1266 } |
1406 | 1267 |
1407 int root_index; | 1268 int root_index; |
1408 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { | 1269 if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) { |
1409 PutRoot(root_index, heap_object, how_to_code, where_to_point); | 1270 PutRoot(root_index, heap_object, how_to_code, where_to_point, skip); |
1410 return; | 1271 return; |
1411 } | 1272 } |
1412 | 1273 |
1413 if (ShouldBeInThePartialSnapshotCache(heap_object)) { | 1274 if (ShouldBeInThePartialSnapshotCache(heap_object)) { |
1275 if (skip != 0) { | |
1276 sink_->Put(kSkip, "SkipFromSerializeObject"); | |
1277 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); | |
1278 } | |
1279 | |
1414 int cache_index = PartialSnapshotCacheIndex(heap_object); | 1280 int cache_index = PartialSnapshotCacheIndex(heap_object); |
1415 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, | 1281 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, |
1416 "PartialSnapshotCache"); | 1282 "PartialSnapshotCache"); |
1417 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); | 1283 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); |
1418 return; | 1284 return; |
1419 } | 1285 } |
1420 | 1286 |
1421 // Pointers from the partial snapshot to the objects in the startup snapshot | 1287 // Pointers from the partial snapshot to the objects in the startup snapshot |
1422 // should go through the root array or through the partial snapshot cache. | 1288 // should go through the root array or through the partial snapshot cache. |
1423 // If this is not the case you may have to add something to the root array. | 1289 // If this is not the case you may have to add something to the root array. |
1424 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); | 1290 ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); |
1425 // All the symbols that the partial snapshot needs should be either in the | 1291 // All the symbols that the partial snapshot needs should be either in the |
1426 // root table or in the partial snapshot cache. | 1292 // root table or in the partial snapshot cache. |
1427 ASSERT(!heap_object->IsSymbol()); | 1293 ASSERT(!heap_object->IsSymbol()); |
1428 | 1294 |
1429 if (address_mapper_.IsMapped(heap_object)) { | 1295 if (address_mapper_.IsMapped(heap_object)) { |
1430 int space = SpaceOfAlreadySerializedObject(heap_object); | 1296 int space = SpaceOfObject(heap_object); |
1431 int address = address_mapper_.MappedTo(heap_object); | 1297 int address = address_mapper_.MappedTo(heap_object); |
1432 SerializeReferenceToPreviousObject(space, | 1298 SerializeReferenceToPreviousObject(space, |
1433 address, | 1299 address, |
1434 how_to_code, | 1300 how_to_code, |
1435 where_to_point); | 1301 where_to_point, |
1302 skip); | |
1436 } else { | 1303 } else { |
1304 if (skip != 0) { | |
1305 sink_->Put(kSkip, "SkipFromSerializeObject"); | |
1306 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); | |
1307 } | |
1437 // Object has not yet been serialized. Serialize it here. | 1308 // Object has not yet been serialized. Serialize it here. |
1438 ObjectSerializer serializer(this, | 1309 ObjectSerializer serializer(this, |
1439 heap_object, | 1310 heap_object, |
1440 sink_, | 1311 sink_, |
1441 how_to_code, | 1312 how_to_code, |
1442 where_to_point); | 1313 where_to_point); |
1443 serializer.Serialize(); | 1314 serializer.Serialize(); |
1444 } | 1315 } |
1445 } | 1316 } |
1446 | 1317 |
1447 | 1318 |
1448 void Serializer::ObjectSerializer::Serialize() { | 1319 void Serializer::ObjectSerializer::Serialize() { |
1449 int space = Serializer::SpaceOfObject(object_); | 1320 int space = Serializer::SpaceOfObject(object_); |
1450 int size = object_->Size(); | 1321 int size = object_->Size(); |
1451 | 1322 |
1452 sink_->Put(kNewObject + reference_representation_ + space, | 1323 sink_->Put(kNewObject + reference_representation_ + space, |
1453 "ObjectSerialization"); | 1324 "ObjectSerialization"); |
1454 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); | 1325 sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); |
1455 | 1326 |
1456 LOG(i::Isolate::Current(), | 1327 LOG(i::Isolate::Current(), |
1457 SnapshotPositionEvent(object_->address(), sink_->Position())); | 1328 SnapshotPositionEvent(object_->address(), sink_->Position())); |
1458 | 1329 |
1459 // Mark this object as already serialized. | 1330 // Mark this object as already serialized. |
1460 bool start_new_page; | 1331 int offset = serializer_->Allocate(space, size); |
1461 int offset = serializer_->Allocate(space, size, &start_new_page); | |
1462 serializer_->address_mapper()->AddMapping(object_, offset); | 1332 serializer_->address_mapper()->AddMapping(object_, offset); |
1463 if (start_new_page) { | |
1464 sink_->Put(kNewPage, "NewPage"); | |
1465 sink_->PutSection(space, "NewPageSpace"); | |
1466 } | |
1467 | 1333 |
1468 // Serialize the map (first word of the object). | 1334 // Serialize the map (first word of the object). |
1469 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject); | 1335 serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0); |
1470 | 1336 |
1471 // Serialize the rest of the object. | 1337 // Serialize the rest of the object. |
1472 CHECK_EQ(0, bytes_processed_so_far_); | 1338 CHECK_EQ(0, bytes_processed_so_far_); |
1473 bytes_processed_so_far_ = kPointerSize; | 1339 bytes_processed_so_far_ = kPointerSize; |
1474 object_->IterateBody(object_->map()->instance_type(), size, this); | 1340 object_->IterateBody(object_->map()->instance_type(), size, this); |
1475 OutputRawData(object_->address() + size); | 1341 OutputRawData(object_->address() + size); |
1476 } | 1342 } |
1477 | 1343 |
1478 | 1344 |
1479 void Serializer::ObjectSerializer::VisitPointers(Object** start, | 1345 void Serializer::ObjectSerializer::VisitPointers(Object** start, |
(...skipping 20 matching lines...) Expand all Loading... | |
1500 } | 1366 } |
1501 current += repeat_count; | 1367 current += repeat_count; |
1502 bytes_processed_so_far_ += repeat_count * kPointerSize; | 1368 bytes_processed_so_far_ += repeat_count * kPointerSize; |
1503 if (repeat_count > kMaxRepeats) { | 1369 if (repeat_count > kMaxRepeats) { |
1504 sink_->Put(kRepeat, "SerializeRepeats"); | 1370 sink_->Put(kRepeat, "SerializeRepeats"); |
1505 sink_->PutInt(repeat_count, "SerializeRepeats"); | 1371 sink_->PutInt(repeat_count, "SerializeRepeats"); |
1506 } else { | 1372 } else { |
1507 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); | 1373 sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats"); |
1508 } | 1374 } |
1509 } else { | 1375 } else { |
1510 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject); | 1376 serializer_->SerializeObject( |
1377 current_contents, kPlain, kStartOfObject, 0); | |
1511 bytes_processed_so_far_ += kPointerSize; | 1378 bytes_processed_so_far_ += kPointerSize; |
1512 current++; | 1379 current++; |
1513 } | 1380 } |
1514 } | 1381 } |
1515 } | 1382 } |
1516 } | 1383 } |
1517 | 1384 |
1518 | 1385 |
1519 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { | 1386 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { |
1520 Object** current = rinfo->target_object_address(); | 1387 Object** current = rinfo->target_object_address(); |
1521 | 1388 |
1522 OutputRawData(rinfo->target_address_address()); | 1389 int skip = OutputRawData(rinfo->target_address_address(), |
1390 kCanReturnSkipInsteadOfSkipping); | |
1523 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; | 1391 HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain; |
1524 serializer_->SerializeObject(*current, representation, kStartOfObject); | 1392 serializer_->SerializeObject(*current, representation, kStartOfObject, skip); |
1525 bytes_processed_so_far_ += rinfo->target_address_size(); | 1393 bytes_processed_so_far_ += rinfo->target_address_size(); |
1526 } | 1394 } |
1527 | 1395 |
1528 | 1396 |
1529 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, | 1397 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, |
1530 Address* end) { | 1398 Address* end) { |
1531 Address references_start = reinterpret_cast<Address>(start); | 1399 Address references_start = reinterpret_cast<Address>(start); |
1532 OutputRawData(references_start); | 1400 int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping); |
1533 | 1401 |
1534 for (Address* current = start; current < end; current++) { | 1402 for (Address* current = start; current < end; current++) { |
1535 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); | 1403 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); |
1404 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
1405 skip = 0; | |
1536 int reference_id = serializer_->EncodeExternalReference(*current); | 1406 int reference_id = serializer_->EncodeExternalReference(*current); |
1537 sink_->PutInt(reference_id, "reference id"); | 1407 sink_->PutInt(reference_id, "reference id"); |
1538 } | 1408 } |
1539 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); | 1409 bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); |
1540 } | 1410 } |
1541 | 1411 |
1542 | 1412 |
1543 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { | 1413 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { |
1544 Address references_start = rinfo->target_address_address(); | 1414 Address references_start = rinfo->target_address_address(); |
1545 OutputRawData(references_start); | 1415 int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping); |
1546 | 1416 |
1547 Address* current = rinfo->target_reference_address(); | 1417 Address* current = rinfo->target_reference_address(); |
1548 int representation = rinfo->IsCodedSpecially() ? | 1418 int representation = rinfo->IsCodedSpecially() ? |
1549 kFromCode + kStartOfObject : kPlain + kStartOfObject; | 1419 kFromCode + kStartOfObject : kPlain + kStartOfObject; |
1550 sink_->Put(kExternalReference + representation, "ExternalRef"); | 1420 sink_->Put(kExternalReference + representation, "ExternalRef"); |
1421 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
1551 int reference_id = serializer_->EncodeExternalReference(*current); | 1422 int reference_id = serializer_->EncodeExternalReference(*current); |
1552 sink_->PutInt(reference_id, "reference id"); | 1423 sink_->PutInt(reference_id, "reference id"); |
1553 bytes_processed_so_far_ += rinfo->target_address_size(); | 1424 bytes_processed_so_far_ += rinfo->target_address_size(); |
1554 } | 1425 } |
1555 | 1426 |
1556 | 1427 |
1557 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { | 1428 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { |
1558 Address target_start = rinfo->target_address_address(); | 1429 Address target_start = rinfo->target_address_address(); |
1559 OutputRawData(target_start); | 1430 int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping); |
1560 Address target = rinfo->target_address(); | 1431 Address target = rinfo->target_address(); |
1561 uint32_t encoding = serializer_->EncodeExternalReference(target); | 1432 uint32_t encoding = serializer_->EncodeExternalReference(target); |
1562 CHECK(target == NULL ? encoding == 0 : encoding != 0); | 1433 CHECK(target == NULL ? encoding == 0 : encoding != 0); |
1563 int representation; | 1434 int representation; |
1564 // Can't use a ternary operator because of gcc. | 1435 // Can't use a ternary operator because of gcc. |
1565 if (rinfo->IsCodedSpecially()) { | 1436 if (rinfo->IsCodedSpecially()) { |
1566 representation = kStartOfObject + kFromCode; | 1437 representation = kStartOfObject + kFromCode; |
1567 } else { | 1438 } else { |
1568 representation = kStartOfObject + kPlain; | 1439 representation = kStartOfObject + kPlain; |
1569 } | 1440 } |
1570 sink_->Put(kExternalReference + representation, "ExternalReference"); | 1441 sink_->Put(kExternalReference + representation, "ExternalReference"); |
1442 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
1571 sink_->PutInt(encoding, "reference id"); | 1443 sink_->PutInt(encoding, "reference id"); |
1572 bytes_processed_so_far_ += rinfo->target_address_size(); | 1444 bytes_processed_so_far_ += rinfo->target_address_size(); |
1573 } | 1445 } |
1574 | 1446 |
1575 | 1447 |
1576 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { | 1448 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { |
1577 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); | 1449 CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); |
1578 Address target_start = rinfo->target_address_address(); | 1450 Address target_start = rinfo->target_address_address(); |
1579 OutputRawData(target_start); | 1451 int skip = OutputRawData(target_start, kCanReturnSkipInsteadOfSkipping); |
1580 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | 1452 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
1581 serializer_->SerializeObject(target, kFromCode, kInnerPointer); | 1453 serializer_->SerializeObject(target, kFromCode, kInnerPointer, skip); |
1582 bytes_processed_so_far_ += rinfo->target_address_size(); | 1454 bytes_processed_so_far_ += rinfo->target_address_size(); |
1583 } | 1455 } |
1584 | 1456 |
1585 | 1457 |
1586 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { | 1458 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { |
1587 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); | 1459 Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); |
1588 OutputRawData(entry_address); | 1460 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping); |
1589 serializer_->SerializeObject(target, kPlain, kInnerPointer); | 1461 serializer_->SerializeObject(target, kPlain, kInnerPointer, skip); |
1590 bytes_processed_so_far_ += kPointerSize; | 1462 bytes_processed_so_far_ += kPointerSize; |
1591 } | 1463 } |
1592 | 1464 |
1593 | 1465 |
1594 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { | 1466 void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) { |
1595 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); | 1467 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
1596 JSGlobalPropertyCell* cell = | 1468 JSGlobalPropertyCell* cell = |
1597 JSGlobalPropertyCell::cast(rinfo->target_cell()); | 1469 JSGlobalPropertyCell::cast(rinfo->target_cell()); |
1598 OutputRawData(rinfo->pc()); | 1470 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping); |
1599 serializer_->SerializeObject(cell, kPlain, kInnerPointer); | 1471 serializer_->SerializeObject(cell, kPlain, kInnerPointer, skip); |
1600 } | 1472 } |
1601 | 1473 |
1602 | 1474 |
1603 void Serializer::ObjectSerializer::VisitExternalAsciiString( | 1475 void Serializer::ObjectSerializer::VisitExternalAsciiString( |
1604 v8::String::ExternalAsciiStringResource** resource_pointer) { | 1476 v8::String::ExternalAsciiStringResource** resource_pointer) { |
1605 Address references_start = reinterpret_cast<Address>(resource_pointer); | 1477 Address references_start = reinterpret_cast<Address>(resource_pointer); |
1606 OutputRawData(references_start); | 1478 OutputRawData(references_start); |
1607 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | 1479 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { |
1608 Object* source = HEAP->natives_source_cache()->get(i); | 1480 Object* source = HEAP->natives_source_cache()->get(i); |
1609 if (!source->IsUndefined()) { | 1481 if (!source->IsUndefined()) { |
1610 ExternalAsciiString* string = ExternalAsciiString::cast(source); | 1482 ExternalAsciiString* string = ExternalAsciiString::cast(source); |
1611 typedef v8::String::ExternalAsciiStringResource Resource; | 1483 typedef v8::String::ExternalAsciiStringResource Resource; |
1612 const Resource* resource = string->resource(); | 1484 const Resource* resource = string->resource(); |
1613 if (resource == *resource_pointer) { | 1485 if (resource == *resource_pointer) { |
1614 sink_->Put(kNativesStringResource, "NativesStringResource"); | 1486 sink_->Put(kNativesStringResource, "NativesStringResource"); |
1615 sink_->PutSection(i, "NativesStringResourceEnd"); | 1487 sink_->PutSection(i, "NativesStringResourceEnd"); |
1616 bytes_processed_so_far_ += sizeof(resource); | 1488 bytes_processed_so_far_ += sizeof(resource); |
1617 return; | 1489 return; |
1618 } | 1490 } |
1619 } | 1491 } |
1620 } | 1492 } |
1621 // One of the strings in the natives cache should match the resource. We | 1493 // One of the strings in the natives cache should match the resource. We |
1622 // can't serialize any other kinds of external strings. | 1494 // can't serialize any other kinds of external strings. |
1623 UNREACHABLE(); | 1495 UNREACHABLE(); |
1624 } | 1496 } |
1625 | 1497 |
1626 | 1498 |
1627 void Serializer::ObjectSerializer::OutputRawData(Address up_to) { | 1499 int Serializer::ObjectSerializer::OutputRawData( |
1500 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) { | |
1628 Address object_start = object_->address(); | 1501 Address object_start = object_->address(); |
1502 Address base = object_start + bytes_processed_so_far_; | |
1629 int up_to_offset = static_cast<int>(up_to - object_start); | 1503 int up_to_offset = static_cast<int>(up_to - object_start); |
1630 int skipped = up_to_offset - bytes_processed_so_far_; | 1504 int to_skip = up_to_offset - bytes_processed_so_far_; |
1505 int bytes_to_output = to_skip; | |
1506 bytes_processed_so_far_ += to_skip; | |
1631 // This assert will fail if the reloc info gives us the target_address_address | 1507 // This assert will fail if the reloc info gives us the target_address_address |
1632 // locations in a non-ascending order. Luckily that doesn't happen. | 1508 // locations in a non-ascending order. Luckily that doesn't happen. |
1633 ASSERT(skipped >= 0); | 1509 ASSERT(to_skip >= 0); |
1634 if (skipped != 0) { | 1510 bool outputting_code = false; |
1635 Address base = object_start + bytes_processed_so_far_; | 1511 if (to_skip != 0 && code_object_ && !code_has_been_output_) { |
1512 // Output the code all at once and fix later. | |
1513 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_; | |
1514 outputting_code = true; | |
1515 code_has_been_output_ = true; | |
1516 } | |
1517 if (bytes_to_output != 0 && | |
1518 (!code_object_ || outputting_code)) { | |
1636 #define RAW_CASE(index, length) \ | 1519 #define RAW_CASE(index, length) \ |
1637 if (skipped == length) { \ | 1520 if (!outputting_code && bytes_to_output == length && length == to_skip) { \ |
1638 sink_->PutSection(kRawData + index, "RawDataFixed"); \ | 1521 sink_->PutSection(kRawData + index, "RawDataFixed"); \ |
1522 to_skip = 0; /* This insn already skips. */ \ | |
1639 } else /* NOLINT */ | 1523 } else /* NOLINT */ |
1640 COMMON_RAW_LENGTHS(RAW_CASE) | 1524 COMMON_RAW_LENGTHS(RAW_CASE) |
1641 #undef RAW_CASE | 1525 #undef RAW_CASE |
1642 { /* NOLINT */ | 1526 { /* NOLINT */ |
1527 // We always end up here if we are outputting the code of a code object. | |
1643 sink_->Put(kRawData, "RawData"); | 1528 sink_->Put(kRawData, "RawData"); |
1644 sink_->PutInt(skipped, "length"); | 1529 sink_->PutInt(bytes_to_output, "length"); |
1645 } | 1530 } |
1646 for (int i = 0; i < skipped; i++) { | 1531 for (int i = 0; i < bytes_to_output; i++) { |
1647 unsigned int data = base[i]; | 1532 unsigned int data = base[i]; |
1648 sink_->PutSection(data, "Byte"); | 1533 sink_->PutSection(data, "Byte"); |
1649 } | 1534 } |
1650 bytes_processed_so_far_ += skipped; | |
1651 } | 1535 } |
1536 if (to_skip != 0 && return_skip == kIgnoringReturn) { | |
1537 sink_->Put(kSkip, "Skip"); | |
1538 sink_->PutInt(to_skip, "SkipDistance"); | |
1539 to_skip = 0; | |
1540 } | |
1541 return to_skip; | |
1652 } | 1542 } |
1653 | 1543 |
1654 | 1544 |
1655 int Serializer::SpaceOfObject(HeapObject* object) { | 1545 int Serializer::SpaceOfObject(HeapObject* object) { |
1656 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | 1546 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { |
1657 AllocationSpace s = static_cast<AllocationSpace>(i); | 1547 AllocationSpace s = static_cast<AllocationSpace>(i); |
1658 if (HEAP->InSpace(object, s)) { | 1548 if (HEAP->InSpace(object, s)) { |
1659 if (i == LO_SPACE) { | 1549 ASSERT(i < kNumberOfSpaces); |
1660 if (object->IsCode()) { | |
1661 return kLargeCode; | |
1662 } else if (object->IsFixedArray()) { | |
1663 return kLargeFixedArray; | |
1664 } else { | |
1665 return kLargeData; | |
1666 } | |
1667 } | |
1668 return i; | 1550 return i; |
1669 } | 1551 } |
1670 } | 1552 } |
1671 UNREACHABLE(); | 1553 UNREACHABLE(); |
1672 return 0; | 1554 return 0; |
1673 } | 1555 } |
1674 | 1556 |
1675 | 1557 |
1676 int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { | 1558 int Serializer::Allocate(int space, int size) { |
1677 for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { | |
1678 AllocationSpace s = static_cast<AllocationSpace>(i); | |
1679 if (HEAP->InSpace(object, s)) { | |
1680 return i; | |
1681 } | |
1682 } | |
1683 UNREACHABLE(); | |
1684 return 0; | |
1685 } | |
1686 | |
1687 | |
1688 int Serializer::Allocate(int space, int size, bool* new_page) { | |
1689 CHECK(space >= 0 && space < kNumberOfSpaces); | 1559 CHECK(space >= 0 && space < kNumberOfSpaces); |
1690 if (SpaceIsLarge(space)) { | |
1691 // In large object space we merely number the objects instead of trying to | |
1692 // determine some sort of address. | |
1693 *new_page = true; | |
1694 large_object_total_ += size; | |
1695 return fullness_[LO_SPACE]++; | |
1696 } | |
1697 *new_page = false; | |
1698 if (fullness_[space] == 0) { | |
1699 *new_page = true; | |
1700 } | |
1701 if (SpaceIsPaged(space)) { | |
1702 // Paged spaces are a little special. We encode their addresses as if the | |
1703 // pages were all contiguous and each page were filled up in the range | |
1704 // 0 - Page::kObjectAreaSize. In practice the pages may not be contiguous | |
1705 // and allocation does not start at offset 0 in the page, but this scheme | |
1706 // means the deserializer can get the page number quickly by shifting the | |
1707 // serialized address. | |
1708 CHECK(IsPowerOf2(Page::kPageSize)); | |
1709 int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); | |
1710 CHECK(size <= SpaceAreaSize(space)); | |
1711 if (used_in_this_page + size > SpaceAreaSize(space)) { | |
1712 *new_page = true; | |
1713 fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); | |
1714 } | |
1715 } | |
1716 int allocation_address = fullness_[space]; | 1560 int allocation_address = fullness_[space]; |
1717 fullness_[space] = allocation_address + size; | 1561 fullness_[space] = allocation_address + size; |
1718 return allocation_address; | 1562 return allocation_address; |
1719 } | 1563 } |
1720 | 1564 |
1721 | 1565 |
1722 int Serializer::SpaceAreaSize(int space) { | 1566 int Serializer::SpaceAreaSize(int space) { |
1723 if (space == CODE_SPACE) { | 1567 if (space == CODE_SPACE) { |
1724 return isolate_->memory_allocator()->CodePageAreaSize(); | 1568 return isolate_->memory_allocator()->CodePageAreaSize(); |
1725 } else { | 1569 } else { |
1726 return Page::kPageSize - Page::kObjectStartOffset; | 1570 return Page::kPageSize - Page::kObjectStartOffset; |
1727 } | 1571 } |
1728 } | 1572 } |
1729 | 1573 |
1730 | 1574 |
1575 void Serializer::Pad() { | |
1576 // The non-branching GetInt will read up to 3 bytes too far, so we need | |
1577 // to pad the snapshot to make sure we don't read over the end. | |
1578 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) { | |
1579 sink_->Put(kNop, "Padding"); | |
1580 } | |
1581 } | |
1582 | |
1583 | |
1584 bool SnapshotByteSource::AtEOF() { | |
1585 if (0u + length_ - position_ > sizeof(uint32_t)) return false; | |
1586 for (int x = position_; x < length_; x++) { | |
1587 if (data_[x] != SerializerDeserializer::nop()) return false; | |
1588 } | |
1589 return true; | |
1590 } | |
1591 | |
1731 } } // namespace v8::internal | 1592 } } // namespace v8::internal |
OLD | NEW |