| OLD | NEW | 
|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #ifndef V8_SERIALIZE_H_ | 5 #ifndef V8_SERIALIZE_H_ | 
| 6 #define V8_SERIALIZE_H_ | 6 #define V8_SERIALIZE_H_ | 
| 7 | 7 | 
| 8 #include "src/compiler.h" | 8 #include "src/compiler.h" | 
| 9 #include "src/hashmap.h" | 9 #include "src/hashmap.h" | 
| 10 #include "src/heap-profiler.h" | 10 #include "src/heap-profiler.h" | 
| (...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 397   enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM }; | 397   enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM }; | 
| 398 | 398 | 
| 399   // Deserialize a single object and the objects reachable from it. | 399   // Deserialize a single object and the objects reachable from it. | 
| 400   // We may want to abort gracefully even if deserialization fails. | 400   // We may want to abort gracefully even if deserialization fails. | 
| 401   void DeserializePartial(Isolate* isolate, Object** root, | 401   void DeserializePartial(Isolate* isolate, Object** root, | 
| 402                           OnOOM on_oom = FATAL_ON_OOM); | 402                           OnOOM on_oom = FATAL_ON_OOM); | 
| 403 | 403 | 
| 404   void AddReservation(int space, uint32_t chunk) { | 404   void AddReservation(int space, uint32_t chunk) { | 
| 405     DCHECK(space >= 0); | 405     DCHECK(space >= 0); | 
| 406     DCHECK(space < kNumberOfSpaces); | 406     DCHECK(space < kNumberOfSpaces); | 
| 407     DCHECK(space == LO_SPACE || |  | 
| 408            chunk <= static_cast<uint32_t>(Page::kMaxRegularHeapObjectSize)); |  | 
| 409     reservations_[space].Add({chunk, NULL, NULL}); | 407     reservations_[space].Add({chunk, NULL, NULL}); | 
| 410   } | 408   } | 
| 411 | 409 | 
| 412   void FlushICacheForNewCodeObjects(); | 410   void FlushICacheForNewCodeObjects(); | 
| 413 | 411 | 
| 414   // Serialized user code reference certain objects that are provided in a list | 412   // Serialized user code reference certain objects that are provided in a list | 
| 415   // By calling this method, we assume that we are deserializing user code. | 413   // By calling this method, we assume that we are deserializing user code. | 
| 416   void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) { | 414   void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) { | 
| 417     attached_objects_ = attached_objects; | 415     attached_objects_ = attached_objects; | 
| 418   } | 416   } | 
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 491 // There can be only one serializer per V8 process. | 489 // There can be only one serializer per V8 process. | 
| 492 class Serializer : public SerializerDeserializer { | 490 class Serializer : public SerializerDeserializer { | 
| 493  public: | 491  public: | 
| 494   Serializer(Isolate* isolate, SnapshotByteSink* sink); | 492   Serializer(Isolate* isolate, SnapshotByteSink* sink); | 
| 495   ~Serializer(); | 493   ~Serializer(); | 
| 496   virtual void VisitPointers(Object** start, Object** end) OVERRIDE; | 494   virtual void VisitPointers(Object** start, Object** end) OVERRIDE; | 
| 497 | 495 | 
| 498   void FinalizeAllocation(); | 496   void FinalizeAllocation(); | 
| 499 | 497 | 
| 500   Vector<const uint32_t> FinalAllocationChunks(int space) const { | 498   Vector<const uint32_t> FinalAllocationChunks(int space) const { | 
| 501     DCHECK_EQ(1, completed_chunks_[LO_SPACE].length());  // Already finalized. | 499     if (space == LO_SPACE) { | 
| 502     DCHECK_EQ(0, pending_chunk_[space]);                 // No pending chunks. | 500       return Vector<const uint32_t>(&large_objects_total_size_, 1); | 
| 503     return completed_chunks_[space].ToConstVector(); | 501     } else { | 
|  | 502       DCHECK_EQ(0, pending_chunk_[space]);  // No pending chunks. | 
|  | 503       return completed_chunks_[space].ToConstVector(); | 
|  | 504     } | 
| 504   } | 505   } | 
| 505 | 506 | 
| 506   Isolate* isolate() const { return isolate_; } | 507   Isolate* isolate() const { return isolate_; } | 
| 507 | 508 | 
| 508   BackReferenceMap* back_reference_map() { return &back_reference_map_; } | 509   BackReferenceMap* back_reference_map() { return &back_reference_map_; } | 
| 509   RootIndexMap* root_index_map() { return &root_index_map_; } | 510   RootIndexMap* root_index_map() { return &root_index_map_; } | 
| 510 | 511 | 
| 511  protected: | 512  protected: | 
| 512   class ObjectSerializer : public ObjectVisitor { | 513   class ObjectSerializer : public ObjectVisitor { | 
| 513    public: | 514    public: | 
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 573                               WhereToPoint where_to_point, int skip); | 574                               WhereToPoint where_to_point, int skip); | 
| 574   void InitializeAllocators(); | 575   void InitializeAllocators(); | 
| 575   // This will return the space for an object. | 576   // This will return the space for an object. | 
| 576   static AllocationSpace SpaceOfObject(HeapObject* object); | 577   static AllocationSpace SpaceOfObject(HeapObject* object); | 
| 577   BackReference AllocateLargeObject(int size); | 578   BackReference AllocateLargeObject(int size); | 
| 578   BackReference Allocate(AllocationSpace space, int size); | 579   BackReference Allocate(AllocationSpace space, int size); | 
| 579   int EncodeExternalReference(Address addr) { | 580   int EncodeExternalReference(Address addr) { | 
| 580     return external_reference_encoder_->Encode(addr); | 581     return external_reference_encoder_->Encode(addr); | 
| 581   } | 582   } | 
| 582 | 583 | 
| 583   int SpaceAreaSize(int space); | 584   // GetInt reads 4 bytes at once, requiring padding at the end. | 
|  | 585   void Pad(); | 
| 584 | 586 | 
| 585   // Some roots should not be serialized, because their actual value depends on | 587   // Some roots should not be serialized, because their actual value depends on | 
| 586   // absolute addresses and they are reset after deserialization, anyway. | 588   // absolute addresses and they are reset after deserialization, anyway. | 
| 587   bool ShouldBeSkipped(Object** current); | 589   bool ShouldBeSkipped(Object** current); | 
| 588 | 590 | 
|  | 591   // We may not need the code address map for logging for every instance | 
|  | 592   // of the serializer.  Initialize it on demand. | 
|  | 593   void InitializeCodeAddressMap(); | 
|  | 594 | 
|  | 595   inline uint32_t max_chunk_size(int space) const { | 
|  | 596     DCHECK_LE(0, space); | 
|  | 597     DCHECK_LT(space, kNumberOfSpaces); | 
|  | 598     return max_chunk_size_[space]; | 
|  | 599   } | 
|  | 600 | 
| 589   Isolate* isolate_; | 601   Isolate* isolate_; | 
| 590 | 602 | 
| 591   // Objects from the same space are put into chunks for bulk-allocation |  | 
| 592   // when deserializing. We have to make sure that each chunk fits into a |  | 
| 593   // page. So we track the chunk size in pending_chunk_ of a space, but |  | 
| 594   // when it exceeds a page, we complete the current chunk and start a new one. |  | 
| 595   uint32_t pending_chunk_[kNumberOfSpaces]; |  | 
| 596   List<uint32_t> completed_chunks_[kNumberOfSpaces]; |  | 
| 597 |  | 
| 598   SnapshotByteSink* sink_; | 603   SnapshotByteSink* sink_; | 
| 599   ExternalReferenceEncoder* external_reference_encoder_; | 604   ExternalReferenceEncoder* external_reference_encoder_; | 
| 600 | 605 | 
| 601   BackReferenceMap back_reference_map_; | 606   BackReferenceMap back_reference_map_; | 
| 602   RootIndexMap root_index_map_; | 607   RootIndexMap root_index_map_; | 
| 603   void Pad(); |  | 
| 604 | 608 | 
| 605   friend class ObjectSerializer; | 609   friend class ObjectSerializer; | 
| 606   friend class Deserializer; | 610   friend class Deserializer; | 
| 607 | 611 | 
| 608   // We may not need the code address map for logging for every instance |  | 
| 609   // of the serializer.  Initialize it on demand. |  | 
| 610   void InitializeCodeAddressMap(); |  | 
| 611 |  | 
| 612  private: | 612  private: | 
| 613   CodeAddressMap* code_address_map_; | 613   CodeAddressMap* code_address_map_; | 
|  | 614   // Objects from the same space are put into chunks for bulk-allocation | 
|  | 615   // when deserializing. We have to make sure that each chunk fits into a | 
|  | 616   // page. So we track the chunk size in pending_chunk_ of a space, but | 
|  | 617   // when it exceeds a page, we complete the current chunk and start a new one. | 
|  | 618   uint32_t pending_chunk_[kNumberOfPreallocatedSpaces]; | 
|  | 619   List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces]; | 
|  | 620   uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces]; | 
|  | 621 | 
| 614   // We map serialized large objects to indexes for back-referencing. | 622   // We map serialized large objects to indexes for back-referencing. | 
|  | 623   uint32_t large_objects_total_size_; | 
| 615   uint32_t seen_large_objects_index_; | 624   uint32_t seen_large_objects_index_; | 
|  | 625 | 
| 616   DISALLOW_COPY_AND_ASSIGN(Serializer); | 626   DISALLOW_COPY_AND_ASSIGN(Serializer); | 
| 617 }; | 627 }; | 
| 618 | 628 | 
| 619 | 629 | 
| 620 class PartialSerializer : public Serializer { | 630 class PartialSerializer : public Serializer { | 
| 621  public: | 631  public: | 
| 622   PartialSerializer(Isolate* isolate, | 632   PartialSerializer(Isolate* isolate, | 
| 623                     Serializer* startup_snapshot_serializer, | 633                     Serializer* startup_snapshot_serializer, | 
| 624                     SnapshotByteSink* sink) | 634                     SnapshotByteSink* sink) | 
| 625     : Serializer(isolate, sink), | 635     : Serializer(isolate, sink), | 
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 844   // Following the header, we store, in sequential order | 854   // Following the header, we store, in sequential order | 
| 845   // - code stub keys | 855   // - code stub keys | 
| 846   // - serialization payload | 856   // - serialization payload | 
| 847 | 857 | 
| 848   ScriptData* script_data_; | 858   ScriptData* script_data_; | 
| 849   bool owns_script_data_; | 859   bool owns_script_data_; | 
| 850 }; | 860 }; | 
| 851 } }  // namespace v8::internal | 861 } }  // namespace v8::internal | 
| 852 | 862 | 
| 853 #endif  // V8_SERIALIZE_H_ | 863 #endif  // V8_SERIALIZE_H_ | 
| OLD | NEW | 
|---|