OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/platform/platform.h" | 9 #include "src/base/platform/platform.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 580 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
591 Isolate* isolate_; | 591 Isolate* isolate_; |
592 }; | 592 }; |
593 | 593 |
594 | 594 |
595 Deserializer::Deserializer(SnapshotByteSource* source) | 595 Deserializer::Deserializer(SnapshotByteSource* source) |
596 : isolate_(NULL), | 596 : isolate_(NULL), |
597 attached_objects_(NULL), | 597 attached_objects_(NULL), |
598 source_(source), | 598 source_(source), |
599 external_reference_decoder_(NULL), | 599 external_reference_decoder_(NULL), |
600 deserialized_large_objects_(0) { | 600 deserialized_large_objects_(0) { |
601 for (int i = 0; i < kNumberOfSpaces; i++) { | 601 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0; |
602 reservations_[i] = kUninitializedReservation; | |
603 } | |
604 } | 602 } |
605 | 603 |
606 | 604 |
607 void Deserializer::FlushICacheForNewCodeObjects() { | 605 void Deserializer::FlushICacheForNewCodeObjects() { |
608 PageIterator it(isolate_->heap()->code_space()); | 606 PageIterator it(isolate_->heap()->code_space()); |
609 while (it.has_next()) { | 607 while (it.has_next()) { |
610 Page* p = it.next(); | 608 Page* p = it.next(); |
611 CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); | 609 CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); |
612 } | 610 } |
613 } | 611 } |
614 | 612 |
615 | 613 |
| 614 bool Deserializer::ReserveSpace() { |
| 615 if (!isolate_->heap()->ReserveSpace(reservations_)) return false; |
| 616 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { |
| 617 high_water_[i] = reservations_[i][0].start; |
| 618 } |
| 619 return true; |
| 620 } |
| 621 |
| 622 |
616 void Deserializer::Deserialize(Isolate* isolate) { | 623 void Deserializer::Deserialize(Isolate* isolate) { |
617 isolate_ = isolate; | 624 isolate_ = isolate; |
618 DCHECK(isolate_ != NULL); | 625 DCHECK(isolate_ != NULL); |
619 isolate_->heap()->ReserveSpace(reservations_, high_water_); | 626 if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context"); |
620 // No active threads. | 627 // No active threads. |
621 DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); | 628 DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse()); |
622 // No active handles. | 629 // No active handles. |
623 DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); | 630 DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); |
624 DCHECK_EQ(NULL, external_reference_decoder_); | 631 DCHECK_EQ(NULL, external_reference_decoder_); |
625 external_reference_decoder_ = new ExternalReferenceDecoder(isolate); | 632 external_reference_decoder_ = new ExternalReferenceDecoder(isolate); |
626 isolate_->heap()->IterateSmiRoots(this); | 633 isolate_->heap()->IterateSmiRoots(this); |
627 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | 634 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); |
628 isolate_->heap()->RepairFreeListsAfterBoot(); | 635 isolate_->heap()->RepairFreeListsAfterBoot(); |
629 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); | 636 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); |
(...skipping 21 matching lines...) Expand all Loading... |
651 } | 658 } |
652 | 659 |
653 FlushICacheForNewCodeObjects(); | 660 FlushICacheForNewCodeObjects(); |
654 | 661 |
655 // Issue code events for newly deserialized code objects. | 662 // Issue code events for newly deserialized code objects. |
656 LOG_CODE_EVENT(isolate_, LogCodeObjects()); | 663 LOG_CODE_EVENT(isolate_, LogCodeObjects()); |
657 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); | 664 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); |
658 } | 665 } |
659 | 666 |
660 | 667 |
661 void Deserializer::DeserializePartial(Isolate* isolate, Object** root) { | 668 void Deserializer::DeserializePartial(Isolate* isolate, Object** root, |
| 669 OnOOM on_oom) { |
662 isolate_ = isolate; | 670 isolate_ = isolate; |
663 for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { | 671 for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { |
664 DCHECK(reservations_[i] != kUninitializedReservation); | 672 DCHECK(reservations_[i].length() > 0); |
665 } | 673 } |
666 Heap* heap = isolate->heap(); | 674 if (!ReserveSpace()) { |
667 heap->ReserveSpace(reservations_, high_water_); | 675 if (on_oom == FATAL_ON_OOM) FatalProcessOutOfMemory("deserialize context"); |
| 676 *root = NULL; |
| 677 return; |
| 678 } |
668 if (external_reference_decoder_ == NULL) { | 679 if (external_reference_decoder_ == NULL) { |
669 external_reference_decoder_ = new ExternalReferenceDecoder(isolate); | 680 external_reference_decoder_ = new ExternalReferenceDecoder(isolate); |
670 } | 681 } |
671 | 682 |
672 DisallowHeapAllocation no_gc; | 683 DisallowHeapAllocation no_gc; |
673 | 684 |
674 // Keep track of the code space start and end pointers in case new | 685 // Keep track of the code space start and end pointers in case new |
675 // code objects were unserialized | 686 // code objects were unserialized |
676 OldSpace* code_space = isolate_->heap()->code_space(); | 687 OldSpace* code_space = isolate_->heap()->code_space(); |
677 Address start_address = code_space->top(); | 688 Address start_address = code_space->top(); |
(...skipping 15 matching lines...) Expand all Loading... |
693 } | 704 } |
694 if (attached_objects_) attached_objects_->Dispose(); | 705 if (attached_objects_) attached_objects_->Dispose(); |
695 } | 706 } |
696 | 707 |
697 | 708 |
698 // This is called on the roots. It is the driver of the deserialization | 709 // This is called on the roots. It is the driver of the deserialization |
699 // process. It is also called on the body of each function. | 710 // process. It is also called on the body of each function. |
700 void Deserializer::VisitPointers(Object** start, Object** end) { | 711 void Deserializer::VisitPointers(Object** start, Object** end) { |
701 // The space must be new space. Any other space would cause ReadChunk to try | 712 // The space must be new space. Any other space would cause ReadChunk to try |
702 // to update the remembered using NULL as the address. | 713 // to update the remembered using NULL as the address. |
703 ReadChunk(start, end, NEW_SPACE, NULL); | 714 ReadData(start, end, NEW_SPACE, NULL); |
704 } | 715 } |
705 | 716 |
706 | 717 |
707 void Deserializer::RelinkAllocationSite(AllocationSite* site) { | 718 void Deserializer::RelinkAllocationSite(AllocationSite* site) { |
708 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { | 719 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { |
709 site->set_weak_next(isolate_->heap()->undefined_value()); | 720 site->set_weak_next(isolate_->heap()->undefined_value()); |
710 } else { | 721 } else { |
711 site->set_weak_next(isolate_->heap()->allocation_sites_list()); | 722 site->set_weak_next(isolate_->heap()->allocation_sites_list()); |
712 } | 723 } |
713 isolate_->heap()->set_allocation_sites_list(site); | 724 isolate_->heap()->set_allocation_sites_list(site); |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
781 Object** write_back) { | 792 Object** write_back) { |
782 int size = source_->GetInt() << kObjectAlignmentBits; | 793 int size = source_->GetInt() << kObjectAlignmentBits; |
783 Address address = Allocate(space_number, size); | 794 Address address = Allocate(space_number, size); |
784 HeapObject* obj = HeapObject::FromAddress(address); | 795 HeapObject* obj = HeapObject::FromAddress(address); |
785 isolate_->heap()->OnAllocationEvent(obj, size); | 796 isolate_->heap()->OnAllocationEvent(obj, size); |
786 Object** current = reinterpret_cast<Object**>(address); | 797 Object** current = reinterpret_cast<Object**>(address); |
787 Object** limit = current + (size >> kPointerSizeLog2); | 798 Object** limit = current + (size >> kPointerSizeLog2); |
788 if (FLAG_log_snapshot_positions) { | 799 if (FLAG_log_snapshot_positions) { |
789 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); | 800 LOG(isolate_, SnapshotPositionEvent(address, source_->position())); |
790 } | 801 } |
791 ReadChunk(current, limit, space_number, address); | 802 ReadData(current, limit, space_number, address); |
792 | 803 |
793 // TODO(mvstanton): consider treating the heap()->allocation_sites_list() | 804 // TODO(mvstanton): consider treating the heap()->allocation_sites_list() |
794 // as a (weak) root. If this root is relocated correctly, | 805 // as a (weak) root. If this root is relocated correctly, |
795 // RelinkAllocationSite() isn't necessary. | 806 // RelinkAllocationSite() isn't necessary. |
796 if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj)); | 807 if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj)); |
797 | 808 |
798 // Fix up strings from serialized user code. | 809 // Fix up strings from serialized user code. |
799 if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); | 810 if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); |
800 | 811 |
801 *write_back = obj; | 812 *write_back = obj; |
802 #ifdef DEBUG | 813 #ifdef DEBUG |
803 if (obj->IsCode()) { | 814 if (obj->IsCode()) { |
804 DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); | 815 DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); |
805 } else { | 816 } else { |
806 DCHECK(space_number != CODE_SPACE); | 817 DCHECK(space_number != CODE_SPACE); |
807 } | 818 } |
808 #endif | 819 #endif |
809 } | 820 } |
810 | 821 |
811 | 822 |
812 // We know the space requirements before deserialization and can | 823 // We know the space requirements before deserialization and can |
813 // pre-allocate that reserved space. During deserialization, all we need | 824 // pre-allocate that reserved space. During deserialization, all we need |
814 // to do is to bump up the pointer for each space in the reserved | 825 // to do is to bump up the pointer for each space in the reserved |
815 // space. This is also used for fixing back references. | 826 // space. This is also used for fixing back references. |
| 827 // We may have to split up the pre-allocation into several chunks |
| 828 // because it would not fit onto a single page, we have to keep track |
| 829 // of when to move to the next chunk. |
816 // Since multiple large objects cannot be folded into one large object | 830 // Since multiple large objects cannot be folded into one large object |
817 // space allocation, we have to do an actual allocation when deserializing | 831 // space allocation, we have to do an actual allocation when deserializing |
818 // each large object. Instead of tracking offset for back references, we | 832 // each large object. Instead of tracking offset for back references, we |
819 // reference large objects by index. | 833 // reference large objects by index. |
820 Address Deserializer::Allocate(int space_index, int size) { | 834 Address Deserializer::Allocate(int space_index, int size) { |
821 if (space_index == LO_SPACE) { | 835 if (space_index == LO_SPACE) { |
822 AlwaysAllocateScope scope(isolate_); | 836 AlwaysAllocateScope scope(isolate_); |
823 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); | 837 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); |
824 Executability exec = static_cast<Executability>(source_->GetInt()); | 838 Executability exec = static_cast<Executability>(source_->Get()); |
825 AllocationResult result = lo_space->AllocateRaw(size, exec); | 839 AllocationResult result = lo_space->AllocateRaw(size, exec); |
826 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); | 840 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); |
827 deserialized_large_objects_.Add(obj); | 841 deserialized_large_objects_.Add(obj); |
828 return obj->address(); | 842 return obj->address(); |
829 } else { | 843 } else { |
830 DCHECK(space_index < kNumberOfPreallocatedSpaces); | 844 DCHECK(space_index < kNumberOfPreallocatedSpaces); |
831 Address address = high_water_[space_index]; | 845 Address address = high_water_[space_index]; |
| 846 DCHECK_NE(NULL, address); |
| 847 const Heap::Reservation& reservation = reservations_[space_index]; |
| 848 int chunk_index = current_chunk_[space_index]; |
| 849 if (address + size > reservation[chunk_index].end) { |
| 850 // The last chunk size matches exactly the already deserialized data. |
| 851 DCHECK_EQ(address, reservation[chunk_index].end); |
| 852 // Move to next reserved chunk. |
| 853 chunk_index = ++current_chunk_[space_index]; |
| 854 DCHECK_LT(chunk_index, reservation.length()); |
| 855 // Prepare for next allocation in the next chunk. |
| 856 address = reservation[chunk_index].start; |
| 857 } else { |
| 858 high_water_[space_index] = address + size; |
| 859 } |
832 high_water_[space_index] = address + size; | 860 high_water_[space_index] = address + size; |
833 return address; | 861 return address; |
834 } | 862 } |
835 } | 863 } |
836 | 864 |
837 | 865 |
838 void Deserializer::ReadChunk(Object** current, | 866 void Deserializer::ReadData(Object** current, Object** limit, int source_space, |
839 Object** limit, | 867 Address current_object_address) { |
840 int source_space, | |
841 Address current_object_address) { | |
842 Isolate* const isolate = isolate_; | 868 Isolate* const isolate = isolate_; |
843 // Write barrier support costs around 1% in startup time. In fact there | 869 // Write barrier support costs around 1% in startup time. In fact there |
844 // are no new space objects in current boot snapshots, so it's not needed, | 870 // are no new space objects in current boot snapshots, so it's not needed, |
845 // but that may change. | 871 // but that may change. |
846 bool write_barrier_needed = (current_object_address != NULL && | 872 bool write_barrier_needed = (current_object_address != NULL && |
847 source_space != NEW_SPACE && | 873 source_space != NEW_SPACE && |
848 source_space != CELL_SPACE && | 874 source_space != CELL_SPACE && |
849 source_space != PROPERTY_CELL_SPACE && | 875 source_space != PROPERTY_CELL_SPACE && |
850 source_space != CODE_SPACE && | 876 source_space != CODE_SPACE && |
851 source_space != OLD_DATA_SPACE); | 877 source_space != OLD_DATA_SPACE); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
883 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 909 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
884 } else if (where == kExternalReference) { \ | 910 } else if (where == kExternalReference) { \ |
885 int skip = source_->GetInt(); \ | 911 int skip = source_->GetInt(); \ |
886 current = reinterpret_cast<Object**>( \ | 912 current = reinterpret_cast<Object**>( \ |
887 reinterpret_cast<Address>(current) + skip); \ | 913 reinterpret_cast<Address>(current) + skip); \ |
888 int reference_id = source_->GetInt(); \ | 914 int reference_id = source_->GetInt(); \ |
889 Address address = external_reference_decoder_->Decode(reference_id); \ | 915 Address address = external_reference_decoder_->Decode(reference_id); \ |
890 new_object = reinterpret_cast<Object*>(address); \ | 916 new_object = reinterpret_cast<Object*>(address); \ |
891 } else if (where == kBackref) { \ | 917 } else if (where == kBackref) { \ |
892 emit_write_barrier = (space_number == NEW_SPACE); \ | 918 emit_write_barrier = (space_number == NEW_SPACE); \ |
893 new_object = GetAddressFromEnd(data & kSpaceMask); \ | 919 new_object = GetBackReferencedObject(data & kSpaceMask); \ |
894 if (deserializing_user_code()) { \ | 920 if (deserializing_user_code()) { \ |
895 new_object = ProcessBackRefInSerializedCode(new_object); \ | 921 new_object = ProcessBackRefInSerializedCode(new_object); \ |
896 } \ | 922 } \ |
897 } else if (where == kBuiltin) { \ | 923 } else if (where == kBuiltin) { \ |
898 DCHECK(deserializing_user_code()); \ | 924 DCHECK(deserializing_user_code()); \ |
899 int builtin_id = source_->GetInt(); \ | 925 int builtin_id = source_->GetInt(); \ |
900 DCHECK_LE(0, builtin_id); \ | 926 DCHECK_LE(0, builtin_id); \ |
901 DCHECK_LT(builtin_id, Builtins::builtin_count); \ | 927 DCHECK_LT(builtin_id, Builtins::builtin_count); \ |
902 Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ | 928 Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ |
903 new_object = isolate->builtins()->builtin(name); \ | 929 new_object = isolate->builtins()->builtin(name); \ |
904 emit_write_barrier = false; \ | 930 emit_write_barrier = false; \ |
905 } else if (where == kAttachedReference) { \ | 931 } else if (where == kAttachedReference) { \ |
906 DCHECK(deserializing_user_code()); \ | 932 DCHECK(deserializing_user_code()); \ |
907 int index = source_->GetInt(); \ | 933 int index = source_->GetInt(); \ |
908 new_object = *attached_objects_->at(index); \ | 934 new_object = *attached_objects_->at(index); \ |
909 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | 935 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ |
910 } else { \ | 936 } else { \ |
911 DCHECK(where == kBackrefWithSkip); \ | 937 DCHECK(where == kBackrefWithSkip); \ |
912 int skip = source_->GetInt(); \ | 938 int skip = source_->GetInt(); \ |
913 current = reinterpret_cast<Object**>( \ | 939 current = reinterpret_cast<Object**>( \ |
914 reinterpret_cast<Address>(current) + skip); \ | 940 reinterpret_cast<Address>(current) + skip); \ |
915 emit_write_barrier = (space_number == NEW_SPACE); \ | 941 emit_write_barrier = (space_number == NEW_SPACE); \ |
916 new_object = GetAddressFromEnd(data & kSpaceMask); \ | 942 new_object = GetBackReferencedObject(data & kSpaceMask); \ |
917 if (deserializing_user_code()) { \ | 943 if (deserializing_user_code()) { \ |
918 new_object = ProcessBackRefInSerializedCode(new_object); \ | 944 new_object = ProcessBackRefInSerializedCode(new_object); \ |
919 } \ | 945 } \ |
920 } \ | 946 } \ |
921 if (within == kInnerPointer) { \ | 947 if (within == kInnerPointer) { \ |
922 if (space_number != CODE_SPACE || new_object->IsCode()) { \ | 948 if (space_number != CODE_SPACE || new_object->IsCode()) { \ |
923 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | 949 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ |
924 new_object = \ | 950 new_object = \ |
925 reinterpret_cast<Object*>(new_code_object->instruction_start()); \ | 951 reinterpret_cast<Object*>(new_code_object->instruction_start()); \ |
926 } else { \ | 952 } else { \ |
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1214 | 1240 |
1215 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) | 1241 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) |
1216 : isolate_(isolate), | 1242 : isolate_(isolate), |
1217 sink_(sink), | 1243 sink_(sink), |
1218 external_reference_encoder_(new ExternalReferenceEncoder(isolate)), | 1244 external_reference_encoder_(new ExternalReferenceEncoder(isolate)), |
1219 root_index_wave_front_(0), | 1245 root_index_wave_front_(0), |
1220 code_address_map_(NULL), | 1246 code_address_map_(NULL), |
1221 seen_large_objects_index_(0) { | 1247 seen_large_objects_index_(0) { |
1222 // The serializer is meant to be used only to generate initial heap images | 1248 // The serializer is meant to be used only to generate initial heap images |
1223 // from a context in which there is only one isolate. | 1249 // from a context in which there is only one isolate. |
1224 for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0; | 1250 for (int i = 0; i < kNumberOfSpaces; i++) pending_chunk_[i] = 0; |
1225 } | 1251 } |
1226 | 1252 |
1227 | 1253 |
1228 Serializer::~Serializer() { | 1254 Serializer::~Serializer() { |
1229 delete external_reference_encoder_; | 1255 delete external_reference_encoder_; |
1230 if (code_address_map_ != NULL) delete code_address_map_; | 1256 if (code_address_map_ != NULL) delete code_address_map_; |
1231 } | 1257 } |
1232 | 1258 |
1233 | 1259 |
1234 void StartupSerializer::SerializeStrongReferences() { | 1260 void StartupSerializer::SerializeStrongReferences() { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1276 for (int i = 0; i < kPointerSize; i++) { | 1302 for (int i = 0; i < kPointerSize; i++) { |
1277 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | 1303 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); |
1278 } | 1304 } |
1279 } else { | 1305 } else { |
1280 SerializeObject(*current, kPlain, kStartOfObject, 0); | 1306 SerializeObject(*current, kPlain, kStartOfObject, 0); |
1281 } | 1307 } |
1282 } | 1308 } |
1283 } | 1309 } |
1284 | 1310 |
1285 | 1311 |
| 1312 void Serializer::FinalizeAllocation() { |
| 1313 DCHECK_EQ(0, completed_chunks_[LO_SPACE].length()); // Not yet finalized. |
| 1314 for (int i = 0; i < kNumberOfSpaces; i++) { |
| 1315 // Complete the last pending chunk and if there are no completed chunks, |
| 1316 // make sure there is at least one empty chunk. |
| 1317 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) { |
| 1318 completed_chunks_[i].Add(pending_chunk_[i]); |
| 1319 pending_chunk_[i] = 0; |
| 1320 } |
| 1321 } |
| 1322 } |
| 1323 |
| 1324 |
1286 // This ensures that the partial snapshot cache keeps things alive during GC and | 1325 // This ensures that the partial snapshot cache keeps things alive during GC and |
1287 // tracks their movement. When it is called during serialization of the startup | 1326 // tracks their movement. When it is called during serialization of the startup |
1288 // snapshot nothing happens. When the partial (context) snapshot is created, | 1327 // snapshot nothing happens. When the partial (context) snapshot is created, |
1289 // this array is populated with the pointers that the partial snapshot will | 1328 // this array is populated with the pointers that the partial snapshot will |
1290 // need. As that happens we emit serialized objects to the startup snapshot | 1329 // need. As that happens we emit serialized objects to the startup snapshot |
1291 // that correspond to the elements of this cache array. On deserialization we | 1330 // that correspond to the elements of this cache array. On deserialization we |
1292 // therefore need to visit the cache array. This fills it up with pointers to | 1331 // therefore need to visit the cache array. This fills it up with pointers to |
1293 // deserialized objects. | 1332 // deserialized objects. |
1294 void SerializerDeserializer::Iterate(Isolate* isolate, | 1333 void SerializerDeserializer::Iterate(Isolate* isolate, |
1295 ObjectVisitor* visitor) { | 1334 ObjectVisitor* visitor) { |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1362 } else { | 1401 } else { |
1363 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, | 1402 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, |
1364 "BackRefSerWithSkip"); | 1403 "BackRefSerWithSkip"); |
1365 sink_->PutInt(skip, "BackRefSkipDistance"); | 1404 sink_->PutInt(skip, "BackRefSkipDistance"); |
1366 } | 1405 } |
1367 | 1406 |
1368 if (space == LO_SPACE) { | 1407 if (space == LO_SPACE) { |
1369 int index = address_mapper_.MappedTo(heap_object); | 1408 int index = address_mapper_.MappedTo(heap_object); |
1370 sink_->PutInt(index, "large object index"); | 1409 sink_->PutInt(index, "large object index"); |
1371 } else { | 1410 } else { |
1372 int address = address_mapper_.MappedTo(heap_object); | 1411 uint32_t existing_allocation = address_mapper_.MappedTo(heap_object); |
1373 int offset = CurrentAllocationAddress(space) - address; | |
1374 // Shift out the bits that are always 0. | 1412 // Shift out the bits that are always 0. |
1375 offset >>= kObjectAlignmentBits; | 1413 existing_allocation >>= kObjectAlignmentBits; |
1376 sink_->PutInt(offset, "offset"); | 1414 sink_->PutInt(existing_allocation, "allocation"); |
1377 } | 1415 } |
1378 } | 1416 } |
1379 | 1417 |
1380 | 1418 |
1381 void StartupSerializer::SerializeObject( | 1419 void StartupSerializer::SerializeObject( |
1382 Object* o, | 1420 Object* o, |
1383 HowToCode how_to_code, | 1421 HowToCode how_to_code, |
1384 WhereToPoint where_to_point, | 1422 WhereToPoint where_to_point, |
1385 int skip) { | 1423 int skip) { |
1386 CHECK(o->IsHeapObject()); | 1424 CHECK(o->IsHeapObject()); |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1526 serializer_->code_address_map_->Lookup(object_->address()); | 1564 serializer_->code_address_map_->Lookup(object_->address()); |
1527 LOG(serializer_->isolate_, | 1565 LOG(serializer_->isolate_, |
1528 CodeNameEvent(object_->address(), sink_->Position(), code_name)); | 1566 CodeNameEvent(object_->address(), sink_->Position(), code_name)); |
1529 LOG(serializer_->isolate_, | 1567 LOG(serializer_->isolate_, |
1530 SnapshotPositionEvent(object_->address(), sink_->Position())); | 1568 SnapshotPositionEvent(object_->address(), sink_->Position())); |
1531 } | 1569 } |
1532 | 1570 |
1533 // Mark this object as already serialized. | 1571 // Mark this object as already serialized. |
1534 if (space == LO_SPACE) { | 1572 if (space == LO_SPACE) { |
1535 if (object_->IsCode()) { | 1573 if (object_->IsCode()) { |
1536 sink_->PutInt(EXECUTABLE, "executable large object"); | 1574 sink_->Put(EXECUTABLE, "executable large object"); |
1537 } else { | 1575 } else { |
1538 sink_->PutInt(NOT_EXECUTABLE, "not executable large object"); | 1576 sink_->Put(NOT_EXECUTABLE, "not executable large object"); |
1539 } | 1577 } |
1540 int index = serializer_->AllocateLargeObject(size); | 1578 int index = serializer_->AllocateLargeObject(size); |
1541 serializer_->address_mapper()->AddMapping(object_, index); | 1579 serializer_->address_mapper()->AddMapping(object_, index); |
1542 } else { | 1580 } else { |
1543 int offset = serializer_->Allocate(space, size); | 1581 int allocation = serializer_->Allocate(space, size); |
1544 serializer_->address_mapper()->AddMapping(object_, offset); | 1582 serializer_->address_mapper()->AddMapping(object_, allocation); |
1545 } | 1583 } |
1546 | 1584 |
1547 // Serialize the map (first word of the object). | 1585 // Serialize the map (first word of the object). |
1548 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0); | 1586 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0); |
1549 } | 1587 } |
1550 | 1588 |
1551 | 1589 |
1552 void Serializer::ObjectSerializer::SerializeExternalString() { | 1590 void Serializer::ObjectSerializer::SerializeExternalString() { |
1553 // Instead of serializing this as an external string, we serialize | 1591 // Instead of serializing this as an external string, we serialize |
1554 // an imaginary sequential string with the same content. | 1592 // an imaginary sequential string with the same content. |
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1860 if (object->GetHeap()->InSpace(object, s)) { | 1898 if (object->GetHeap()->InSpace(object, s)) { |
1861 DCHECK(i < kNumberOfSpaces); | 1899 DCHECK(i < kNumberOfSpaces); |
1862 return i; | 1900 return i; |
1863 } | 1901 } |
1864 } | 1902 } |
1865 UNREACHABLE(); | 1903 UNREACHABLE(); |
1866 return 0; | 1904 return 0; |
1867 } | 1905 } |
1868 | 1906 |
1869 | 1907 |
1870 int Serializer::AllocateLargeObject(int size) { | 1908 uint32_t Serializer::AllocateLargeObject(int size) { |
1871 fullness_[LO_SPACE] += size; | 1909 // Large objects are allocated one-by-one when deserializing. We do not |
| 1910 // have to keep track of multiple chunks. |
| 1911 pending_chunk_[LO_SPACE] += size; |
1872 return seen_large_objects_index_++; | 1912 return seen_large_objects_index_++; |
1873 } | 1913 } |
1874 | 1914 |
1875 | 1915 |
1876 int Serializer::Allocate(int space, int size) { | 1916 uint32_t Serializer::Allocate(int space, int size) { |
1877 CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); | 1917 CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); |
1878 int allocation_address = fullness_[space]; | 1918 DCHECK(size > 0 && size < Page::kMaxRegularHeapObjectSize); |
1879 fullness_[space] = allocation_address + size; | 1919 uint32_t new_chunk_size = pending_chunk_[space] + size; |
1880 return allocation_address; | 1920 uint32_t allocation; |
| 1921 if (new_chunk_size > Page::kMaxRegularHeapObjectSize) { |
| 1922 // The new chunk size would not fit onto a single page. Complete the |
| 1923 // current chunk and start a new one. |
| 1924 completed_chunks_[space].Add(pending_chunk_[space]); |
| 1925 pending_chunk_[space] = 0; |
| 1926 new_chunk_size = size; |
| 1927 } |
| 1928 // For back-referencing, each allocation is encoded as a combination |
| 1929 // of chunk index and offset inside the chunk. |
| 1930 allocation = ChunkIndexBits::encode(completed_chunks_[space].length()) | |
| 1931 OffsetBits::encode(pending_chunk_[space]); |
| 1932 pending_chunk_[space] = new_chunk_size; |
| 1933 return allocation; |
1881 } | 1934 } |
1882 | 1935 |
1883 | 1936 |
1884 int Serializer::SpaceAreaSize(int space) { | 1937 int Serializer::SpaceAreaSize(int space) { |
1885 if (space == CODE_SPACE) { | 1938 if (space == CODE_SPACE) { |
1886 return isolate_->memory_allocator()->CodePageAreaSize(); | 1939 return isolate_->memory_allocator()->CodePageAreaSize(); |
1887 } else { | 1940 } else { |
1888 return Page::kPageSize - Page::kObjectStartOffset; | 1941 return Page::kPageSize - Page::kObjectStartOffset; |
1889 } | 1942 } |
1890 } | 1943 } |
(...skipping 25 matching lines...) Expand all Loading... |
1916 ListSnapshotSink list_sink(&payload); | 1969 ListSnapshotSink list_sink(&payload); |
1917 DebugSnapshotSink debug_sink(&list_sink); | 1970 DebugSnapshotSink debug_sink(&list_sink); |
1918 SnapshotByteSink* sink = FLAG_trace_code_serializer | 1971 SnapshotByteSink* sink = FLAG_trace_code_serializer |
1919 ? static_cast<SnapshotByteSink*>(&debug_sink) | 1972 ? static_cast<SnapshotByteSink*>(&debug_sink) |
1920 : static_cast<SnapshotByteSink*>(&list_sink); | 1973 : static_cast<SnapshotByteSink*>(&list_sink); |
1921 CodeSerializer cs(isolate, sink, *source, info->code()); | 1974 CodeSerializer cs(isolate, sink, *source, info->code()); |
1922 DisallowHeapAllocation no_gc; | 1975 DisallowHeapAllocation no_gc; |
1923 Object** location = Handle<Object>::cast(info).location(); | 1976 Object** location = Handle<Object>::cast(info).location(); |
1924 cs.VisitPointer(location); | 1977 cs.VisitPointer(location); |
1925 cs.Pad(); | 1978 cs.Pad(); |
| 1979 cs.FinalizeAllocation(); |
1926 | 1980 |
1927 SerializedCodeData data(&payload, &cs); | 1981 SerializedCodeData data(&payload, &cs); |
1928 ScriptData* script_data = data.GetScriptData(); | 1982 ScriptData* script_data = data.GetScriptData(); |
1929 | 1983 |
1930 if (FLAG_profile_deserialization) { | 1984 if (FLAG_profile_deserialization) { |
1931 double ms = timer.Elapsed().InMillisecondsF(); | 1985 double ms = timer.Elapsed().InMillisecondsF(); |
1932 int length = script_data->length(); | 1986 int length = script_data->length(); |
1933 PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms); | 1987 PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms); |
1934 } | 1988 } |
1935 | 1989 |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2086 void CodeSerializer::SerializeSourceObject(HowToCode how_to_code, | 2140 void CodeSerializer::SerializeSourceObject(HowToCode how_to_code, |
2087 WhereToPoint where_to_point) { | 2141 WhereToPoint where_to_point) { |
2088 if (FLAG_trace_code_serializer) PrintF("Encoding source object\n"); | 2142 if (FLAG_trace_code_serializer) PrintF("Encoding source object\n"); |
2089 | 2143 |
2090 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject); | 2144 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject); |
2091 sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source"); | 2145 sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source"); |
2092 sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex"); | 2146 sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex"); |
2093 } | 2147 } |
2094 | 2148 |
2095 | 2149 |
2096 Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate, | 2150 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize( |
2097 ScriptData* data, | 2151 Isolate* isolate, ScriptData* data, Handle<String> source) { |
2098 Handle<String> source) { | |
2099 base::ElapsedTimer timer; | 2152 base::ElapsedTimer timer; |
2100 if (FLAG_profile_deserialization) timer.Start(); | 2153 if (FLAG_profile_deserialization) timer.Start(); |
2101 | 2154 |
2102 Object* root; | 2155 Object* root; |
2103 | 2156 |
2104 { | 2157 { |
2105 HandleScope scope(isolate); | 2158 HandleScope scope(isolate); |
2106 | 2159 |
2107 SerializedCodeData scd(data, *source); | 2160 SerializedCodeData scd(data, *source); |
2108 SnapshotByteSource payload(scd.Payload(), scd.PayloadLength()); | 2161 SnapshotByteSource payload(scd.Payload(), scd.PayloadLength()); |
2109 Deserializer deserializer(&payload); | 2162 Deserializer deserializer(&payload); |
| 2163 |
2110 STATIC_ASSERT(NEW_SPACE == 0); | 2164 STATIC_ASSERT(NEW_SPACE == 0); |
2111 for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) { | 2165 int current_space = NEW_SPACE; |
2112 deserializer.set_reservation(i, scd.GetReservation(i)); | 2166 Vector<const SerializedCodeData::Reservation> res = scd.Reservations(); |
| 2167 for (const auto& r : res) { |
| 2168 deserializer.AddReservation(current_space, r.chunk_size()); |
| 2169 if (r.is_last_chunk()) current_space++; |
2113 } | 2170 } |
| 2171 DCHECK_EQ(kNumberOfSpaces, current_space); |
2114 | 2172 |
2115 // Prepare and register list of attached objects. | 2173 // Prepare and register list of attached objects. |
2116 Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys(); | 2174 Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys(); |
2117 Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New( | 2175 Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New( |
2118 code_stub_keys.length() + kCodeStubsBaseIndex); | 2176 code_stub_keys.length() + kCodeStubsBaseIndex); |
2119 attached_objects[kSourceObjectIndex] = source; | 2177 attached_objects[kSourceObjectIndex] = source; |
2120 for (int i = 0; i < code_stub_keys.length(); i++) { | 2178 for (int i = 0; i < code_stub_keys.length(); i++) { |
2121 attached_objects[i + kCodeStubsBaseIndex] = | 2179 attached_objects[i + kCodeStubsBaseIndex] = |
2122 CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked(); | 2180 CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked(); |
2123 } | 2181 } |
2124 deserializer.SetAttachedObjects(&attached_objects); | 2182 deserializer.SetAttachedObjects(&attached_objects); |
2125 | 2183 |
2126 // Deserialize. | 2184 // Deserialize. |
2127 deserializer.DeserializePartial(isolate, &root); | 2185 deserializer.DeserializePartial(isolate, &root, Deserializer::NULL_ON_OOM); |
| 2186 if (root == NULL) { |
| 2187 // Deserializing may fail if the reservations cannot be fulfilled. |
| 2188 if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n"); |
| 2189 return MaybeHandle<SharedFunctionInfo>(); |
| 2190 } |
2128 deserializer.FlushICacheForNewCodeObjects(); | 2191 deserializer.FlushICacheForNewCodeObjects(); |
2129 } | 2192 } |
2130 | 2193 |
2131 if (FLAG_profile_deserialization) { | 2194 if (FLAG_profile_deserialization) { |
2132 double ms = timer.Elapsed().InMillisecondsF(); | 2195 double ms = timer.Elapsed().InMillisecondsF(); |
2133 int length = data->length(); | 2196 int length = data->length(); |
2134 PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms); | 2197 PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms); |
2135 } | 2198 } |
2136 Handle<SharedFunctionInfo> result(SharedFunctionInfo::cast(root), isolate); | 2199 Handle<SharedFunctionInfo> result(SharedFunctionInfo::cast(root), isolate); |
2137 result->set_deserialized(true); | 2200 result->set_deserialized(true); |
2138 return result; | 2201 return result; |
2139 } | 2202 } |
2140 | 2203 |
2141 | 2204 |
2142 SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs) | 2205 SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs) |
2143 : owns_script_data_(true) { | 2206 : owns_script_data_(true) { |
2144 DisallowHeapAllocation no_gc; | 2207 DisallowHeapAllocation no_gc; |
2145 List<uint32_t>* stub_keys = cs->stub_keys(); | 2208 List<uint32_t>* stub_keys = cs->stub_keys(); |
2146 | 2209 |
| 2210 // Gather reservation chunk sizes. |
| 2211 List<uint32_t> reservations(SerializerDeserializer::kNumberOfSpaces); |
| 2212 STATIC_ASSERT(NEW_SPACE == 0); |
| 2213 for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) { |
| 2214 Vector<const uint32_t> chunks = cs->FinalAllocationChunks(i); |
| 2215 for (int j = 0; j < chunks.length(); j++) { |
| 2216 DCHECK(i == LO_SPACE || chunks[j] < Page::kMaxRegularHeapObjectSize); |
| 2217 uint32_t chunk = ChunkSizeBits::encode(chunks[j]) | |
| 2218 IsLastChunkBits::encode(j == chunks.length() - 1); |
| 2219 reservations.Add(chunk); |
| 2220 } |
| 2221 } |
| 2222 |
2147 // Calculate sizes. | 2223 // Calculate sizes. |
| 2224 int reservation_size = reservations.length() * kInt32Size; |
2148 int num_stub_keys = stub_keys->length(); | 2225 int num_stub_keys = stub_keys->length(); |
2149 int stub_keys_size = stub_keys->length() * kInt32Size; | 2226 int stub_keys_size = stub_keys->length() * kInt32Size; |
2150 int data_length = kHeaderSize + stub_keys_size + payload->length(); | 2227 int data_length = |
| 2228 kHeaderSize + reservation_size + stub_keys_size + payload->length(); |
2151 | 2229 |
2152 // Allocate backing store and create result data. | 2230 // Allocate backing store and create result data. |
2153 byte* data = NewArray<byte>(data_length); | 2231 byte* data = NewArray<byte>(data_length); |
2154 DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)); | 2232 DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)); |
2155 script_data_ = new ScriptData(data, data_length); | 2233 script_data_ = new ScriptData(data, data_length); |
2156 script_data_->AcquireDataOwnership(); | 2234 script_data_->AcquireDataOwnership(); |
2157 | 2235 |
2158 // Set header values. | 2236 // Set header values. |
2159 SetHeaderValue(kCheckSumOffset, CheckSum(cs->source())); | 2237 SetHeaderValue(kCheckSumOffset, CheckSum(cs->source())); |
| 2238 SetHeaderValue(kReservationsOffset, reservations.length()); |
2160 SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys); | 2239 SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys); |
2161 SetHeaderValue(kPayloadLengthOffset, payload->length()); | 2240 SetHeaderValue(kPayloadLengthOffset, payload->length()); |
2162 STATIC_ASSERT(NEW_SPACE == 0); | 2241 |
2163 for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) { | 2242 // Copy reservation chunk sizes. |
2164 SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i)); | 2243 CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()), |
2165 } | 2244 reservation_size); |
2166 | 2245 |
2167 // Copy code stub keys. | 2246 // Copy code stub keys. |
2168 CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()), | 2247 CopyBytes(data + kHeaderSize + reservation_size, |
2169 stub_keys_size); | 2248 reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size); |
2170 | 2249 |
2171 // Copy serialized data. | 2250 // Copy serialized data. |
2172 CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(), | 2251 CopyBytes(data + kHeaderSize + reservation_size + stub_keys_size, |
2173 static_cast<size_t>(payload->length())); | 2252 payload->begin(), static_cast<size_t>(payload->length())); |
2174 } | 2253 } |
2175 | 2254 |
2176 | 2255 |
2177 bool SerializedCodeData::IsSane(String* source) { | 2256 bool SerializedCodeData::IsSane(String* source) { |
2178 return GetHeaderValue(kCheckSumOffset) == CheckSum(source) && | 2257 return GetHeaderValue(kCheckSumOffset) == CheckSum(source) && |
2179 PayloadLength() >= SharedFunctionInfo::kSize; | 2258 PayloadLength() >= SharedFunctionInfo::kSize; |
2180 } | 2259 } |
2181 | 2260 |
2182 | 2261 |
2183 int SerializedCodeData::CheckSum(String* string) { | 2262 int SerializedCodeData::CheckSum(String* string) { |
2184 int checksum = Version::Hash(); | 2263 int checksum = Version::Hash(); |
2185 #ifdef DEBUG | 2264 #ifdef DEBUG |
2186 uint32_t seed = static_cast<uint32_t>(checksum); | 2265 uint32_t seed = static_cast<uint32_t>(checksum); |
2187 checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed)); | 2266 checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed)); |
2188 #endif // DEBUG | 2267 #endif // DEBUG |
2189 return checksum; | 2268 return checksum; |
2190 } | 2269 } |
2191 } } // namespace v8::internal | 2270 } } // namespace v8::internal |
OLD | NEW |