| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SERIALIZE_H_ | 5 #ifndef V8_SERIALIZE_H_ |
| 6 #define V8_SERIALIZE_H_ | 6 #define V8_SERIALIZE_H_ |
| 7 | 7 |
| 8 #include "src/compiler.h" | 8 #include "src/compiler.h" |
| 9 #include "src/hashmap.h" | 9 #include "src/hashmap.h" |
| 10 #include "src/heap-profiler.h" | 10 #include "src/heap-profiler.h" |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 145 class SerializerDeserializer: public ObjectVisitor { | 145 class SerializerDeserializer: public ObjectVisitor { |
| 146 public: | 146 public: |
| 147 static void Iterate(Isolate* isolate, ObjectVisitor* visitor); | 147 static void Iterate(Isolate* isolate, ObjectVisitor* visitor); |
| 148 | 148 |
| 149 static int nop() { return kNop; } | 149 static int nop() { return kNop; } |
| 150 | 150 |
| 151 // No reservation for large object space necessary. | 151 // No reservation for large object space necessary. |
| 152 static const int kNumberOfPreallocatedSpaces = LO_SPACE; | 152 static const int kNumberOfPreallocatedSpaces = LO_SPACE; |
| 153 static const int kNumberOfSpaces = INVALID_SPACE; | 153 static const int kNumberOfSpaces = INVALID_SPACE; |
| 154 | 154 |
| 155 // To encode object for back-references. |
| 156 class OffsetBits : public BitField<uint32_t, 0, kPageSizeBits> {}; |
| 157 class ChunkIndexBits |
| 158 : public BitField<uint32_t, kPageSizeBits, 32 - kPageSizeBits> {}; |
| 159 |
| 155 protected: | 160 protected: |
| 156 // Where the pointed-to object can be found: | 161 // Where the pointed-to object can be found: |
| 157 enum Where { | 162 enum Where { |
| 158 kNewObject = 0, // Object is next in snapshot. | 163 kNewObject = 0, // Object is next in snapshot. |
| 159 // 1-7 One per space. | 164 // 1-7 One per space. |
| 160 kRootArray = 0x9, // Object is found in root array. | 165 kRootArray = 0x9, // Object is found in root array. |
| 161 kPartialSnapshotCache = 0xa, // Object is in the cache. | 166 kPartialSnapshotCache = 0xa, // Object is in the cache. |
| 162 kExternalReference = 0xb, // Pointer to an external reference. | 167 kExternalReference = 0xb, // Pointer to an external reference. |
| 163 kSkip = 0xc, // Skip n bytes. | 168 kSkip = 0xc, // Skip n bytes. |
| 164 kBuiltin = 0xd, // Builtin code object. | 169 kBuiltin = 0xd, // Builtin code object. |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 241 class Deserializer: public SerializerDeserializer { | 246 class Deserializer: public SerializerDeserializer { |
| 242 public: | 247 public: |
| 243 // Create a deserializer from a snapshot byte source. | 248 // Create a deserializer from a snapshot byte source. |
| 244 explicit Deserializer(SnapshotByteSource* source); | 249 explicit Deserializer(SnapshotByteSource* source); |
| 245 | 250 |
| 246 virtual ~Deserializer(); | 251 virtual ~Deserializer(); |
| 247 | 252 |
| 248 // Deserialize the snapshot into an empty heap. | 253 // Deserialize the snapshot into an empty heap. |
| 249 void Deserialize(Isolate* isolate); | 254 void Deserialize(Isolate* isolate); |
| 250 | 255 |
| 256 enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM }; |
| 257 |
| 251 // Deserialize a single object and the objects reachable from it. | 258 // Deserialize a single object and the objects reachable from it. |
| 252 void DeserializePartial(Isolate* isolate, Object** root); | 259 // We may want to abort gracefully even if deserialization fails. |
| 260 void DeserializePartial(Isolate* isolate, Object** root, |
| 261 OnOOM on_oom = FATAL_ON_OOM); |
| 253 | 262 |
| 254 void set_reservation(int space_number, int reservation) { | 263 void AddReservation(int space, uint32_t chunk) { |
| 255 DCHECK(space_number >= 0); | 264 DCHECK(space >= 0); |
| 256 DCHECK(space_number < kNumberOfSpaces); | 265 DCHECK(space < kNumberOfSpaces); |
| 257 reservations_[space_number] = reservation; | 266 DCHECK(space == LO_SPACE || chunk < Page::kMaxRegularHeapObjectSize); |
| 267 reservations_[space].Add({chunk, NULL, NULL}); |
| 258 } | 268 } |
| 259 | 269 |
| 260 void FlushICacheForNewCodeObjects(); | 270 void FlushICacheForNewCodeObjects(); |
| 261 | 271 |
| 262 // Serialized user code reference certain objects that are provided in a list | 272 // Serialized user code reference certain objects that are provided in a list |
| 263 // By calling this method, we assume that we are deserializing user code. | 273 // By calling this method, we assume that we are deserializing user code. |
| 264 void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) { | 274 void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) { |
| 265 attached_objects_ = attached_objects; | 275 attached_objects_ = attached_objects; |
| 266 } | 276 } |
| 267 | 277 |
| 268 bool deserializing_user_code() { return attached_objects_ != NULL; } | 278 bool deserializing_user_code() { return attached_objects_ != NULL; } |
| 269 | 279 |
| 270 private: | 280 private: |
| 271 virtual void VisitPointers(Object** start, Object** end); | 281 virtual void VisitPointers(Object** start, Object** end); |
| 272 | 282 |
| 273 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { | 283 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { |
| 274 UNREACHABLE(); | 284 UNREACHABLE(); |
| 275 } | 285 } |
| 276 | 286 |
| 287 bool ReserveSpace(); |
| 288 |
| 277 // Allocation sites are present in the snapshot, and must be linked into | 289 // Allocation sites are present in the snapshot, and must be linked into |
| 278 // a list at deserialization time. | 290 // a list at deserialization time. |
| 279 void RelinkAllocationSite(AllocationSite* site); | 291 void RelinkAllocationSite(AllocationSite* site); |
| 280 | 292 |
| 281 // Fills in some heap data in an area from start to end (non-inclusive). The | 293 // Fills in some heap data in an area from start to end (non-inclusive). The |
| 282 // space id is used for the write barrier. The object_address is the address | 294 // space id is used for the write barrier. The object_address is the address |
| 283 // of the object we are writing into, or NULL if we are not writing into an | 295 // of the object we are writing into, or NULL if we are not writing into an |
| 284 // object, i.e. if we are writing a series of tagged values that are not on | 296 // object, i.e. if we are writing a series of tagged values that are not on |
| 285 // the heap. | 297 // the heap. |
| 286 void ReadChunk( | 298 void ReadData(Object** start, Object** end, int space, |
| 287 Object** start, Object** end, int space, Address object_address); | 299 Address object_address); |
| 288 void ReadObject(int space_number, Object** write_back); | 300 void ReadObject(int space_number, Object** write_back); |
| 289 Address Allocate(int space_index, int size); | 301 Address Allocate(int space_index, int size); |
| 290 | 302 |
| 291 // Special handling for serialized code like hooking up internalized strings. | 303 // Special handling for serialized code like hooking up internalized strings. |
| 292 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); | 304 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); |
| 293 Object* ProcessBackRefInSerializedCode(Object* obj); | 305 Object* ProcessBackRefInSerializedCode(Object* obj); |
| 294 | 306 |
| 295 // This returns the address of an object that has been described in the | 307 // This returns the address of an object that has been described in the |
| 296 // snapshot as being offset bytes back in a particular space. | 308 // snapshot by chunk index and offset. |
| 297 HeapObject* GetAddressFromEnd(int space) { | 309 HeapObject* GetBackReferencedObject(int space) { |
| 298 int offset = source_->GetInt(); | 310 if (space == LO_SPACE) { |
| 299 if (space == LO_SPACE) return deserialized_large_objects_[offset]; | 311 uint32_t index = source_->GetInt(); |
| 300 DCHECK(space < kNumberOfPreallocatedSpaces); | 312 return deserialized_large_objects_[index]; |
| 301 offset <<= kObjectAlignmentBits; | 313 } else { |
| 302 return HeapObject::FromAddress(high_water_[space] - offset); | 314 uint32_t allocation = source_->GetInt() << kObjectAlignmentBits; |
| 315 DCHECK(space < kNumberOfPreallocatedSpaces); |
| 316 uint32_t chunk_index = ChunkIndexBits::decode(allocation); |
| 317 uint32_t offset = OffsetBits::decode(allocation); |
| 318 DCHECK_LE(chunk_index, current_chunk_[space]); |
| 319 return HeapObject::FromAddress(reservations_[space][chunk_index].start + |
| 320 offset); |
| 321 } |
| 303 } | 322 } |
| 304 | 323 |
| 305 // Cached current isolate. | 324 // Cached current isolate. |
| 306 Isolate* isolate_; | 325 Isolate* isolate_; |
| 307 | 326 |
| 308 // Objects from the attached object descriptions in the serialized user code. | 327 // Objects from the attached object descriptions in the serialized user code. |
| 309 Vector<Handle<Object> >* attached_objects_; | 328 Vector<Handle<Object> >* attached_objects_; |
| 310 | 329 |
| 311 SnapshotByteSource* source_; | 330 SnapshotByteSource* source_; |
| 312 // This is the address of the next object that will be allocated in each | 331 // The address of the next object that will be allocated in each space. |
| 313 // space. It is used to calculate the addresses of back-references. | 332 // Each space has a number of chunks reserved by the GC, with each chunk |
| 333 // fitting into a page. Deserialized objects are allocated into the |
| 334 // current chunk of the target space by bumping up high water mark. |
| 335 Heap::Reservation reservations_[kNumberOfSpaces]; |
| 336 uint32_t current_chunk_[kNumberOfPreallocatedSpaces]; |
| 314 Address high_water_[kNumberOfPreallocatedSpaces]; | 337 Address high_water_[kNumberOfPreallocatedSpaces]; |
| 315 | 338 |
| 316 int reservations_[kNumberOfSpaces]; | |
| 317 static const intptr_t kUninitializedReservation = -1; | |
| 318 | |
| 319 ExternalReferenceDecoder* external_reference_decoder_; | 339 ExternalReferenceDecoder* external_reference_decoder_; |
| 320 | 340 |
| 321 List<HeapObject*> deserialized_large_objects_; | 341 List<HeapObject*> deserialized_large_objects_; |
| 322 | 342 |
| 323 DISALLOW_COPY_AND_ASSIGN(Deserializer); | 343 DISALLOW_COPY_AND_ASSIGN(Deserializer); |
| 324 }; | 344 }; |
| 325 | 345 |
| 326 | 346 |
| 327 // Mapping objects to their location after deserialization. | 347 // Mapping objects to their location after deserialization. |
| 328 // This is used during building, but not at runtime by V8. | 348 // This is used during building, but not at runtime by V8. |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 373 | 393 |
| 374 | 394 |
| 375 class CodeAddressMap; | 395 class CodeAddressMap; |
| 376 | 396 |
| 377 // There can be only one serializer per V8 process. | 397 // There can be only one serializer per V8 process. |
| 378 class Serializer : public SerializerDeserializer { | 398 class Serializer : public SerializerDeserializer { |
| 379 public: | 399 public: |
| 380 Serializer(Isolate* isolate, SnapshotByteSink* sink); | 400 Serializer(Isolate* isolate, SnapshotByteSink* sink); |
| 381 ~Serializer(); | 401 ~Serializer(); |
| 382 void VisitPointers(Object** start, Object** end); | 402 void VisitPointers(Object** start, Object** end); |
| 383 // You can call this after serialization to find out how much space was used | 403 |
| 384 // in each space. | 404 void FinalizeAllocation(); |
| 385 int CurrentAllocationAddress(int space) const { | 405 |
| 386 DCHECK(space < kNumberOfSpaces); | 406 Vector<const uint32_t> FinalAllocationChunks(int space) const { |
| 387 return fullness_[space]; | 407 DCHECK_EQ(1, completed_chunks_[LO_SPACE].length()); // Already finalized. |
| 408 DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks. |
| 409 return completed_chunks_[space].ToConstVector(); |
| 388 } | 410 } |
| 389 | 411 |
| 390 Isolate* isolate() const { return isolate_; } | 412 Isolate* isolate() const { return isolate_; } |
| 391 | 413 |
| 392 SerializationAddressMapper* address_mapper() { return &address_mapper_; } | 414 SerializationAddressMapper* address_mapper() { return &address_mapper_; } |
| 393 void PutRoot(int index, | 415 void PutRoot(int index, |
| 394 HeapObject* object, | 416 HeapObject* object, |
| 395 HowToCode how, | 417 HowToCode how, |
| 396 WhereToPoint where, | 418 WhereToPoint where, |
| 397 int skip); | 419 int skip); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 463 HowToCode how_to_code, | 485 HowToCode how_to_code, |
| 464 WhereToPoint where_to_point, | 486 WhereToPoint where_to_point, |
| 465 int skip) = 0; | 487 int skip) = 0; |
| 466 void SerializeReferenceToPreviousObject(HeapObject* heap_object, | 488 void SerializeReferenceToPreviousObject(HeapObject* heap_object, |
| 467 HowToCode how_to_code, | 489 HowToCode how_to_code, |
| 468 WhereToPoint where_to_point, | 490 WhereToPoint where_to_point, |
| 469 int skip); | 491 int skip); |
| 470 void InitializeAllocators(); | 492 void InitializeAllocators(); |
| 471 // This will return the space for an object. | 493 // This will return the space for an object. |
| 472 static int SpaceOfObject(HeapObject* object); | 494 static int SpaceOfObject(HeapObject* object); |
| 473 int AllocateLargeObject(int size); | 495 uint32_t AllocateLargeObject(int size); |
| 474 int Allocate(int space, int size); | 496 uint32_t Allocate(int space, int size); |
| 475 int EncodeExternalReference(Address addr) { | 497 int EncodeExternalReference(Address addr) { |
| 476 return external_reference_encoder_->Encode(addr); | 498 return external_reference_encoder_->Encode(addr); |
| 477 } | 499 } |
| 478 | 500 |
| 479 int SpaceAreaSize(int space); | 501 int SpaceAreaSize(int space); |
| 480 | 502 |
| 481 // Some roots should not be serialized, because their actual value depends on | 503 // Some roots should not be serialized, because their actual value depends on |
| 482 // absolute addresses and they are reset after deserialization, anyway. | 504 // absolute addresses and they are reset after deserialization, anyway. |
| 483 bool ShouldBeSkipped(Object** current); | 505 bool ShouldBeSkipped(Object** current); |
| 484 | 506 |
| 485 Isolate* isolate_; | 507 Isolate* isolate_; |
| 486 // Keep track of the fullness of each space in order to generate | 508 |
| 487 // relative addresses for back references. | 509 // Objects from the same space are put into chunks for bulk-allocation |
| 488 int fullness_[kNumberOfSpaces]; | 510 // when deserializing. We have to make sure that each chunk fits into a |
| 511 // page. So we track the chunk size in pending_chunk_ of a space, but |
| 512 // when it exceeds a page, we complete the current chunk and start a new one. |
| 513 uint32_t pending_chunk_[kNumberOfSpaces]; |
| 514 List<uint32_t> completed_chunks_[kNumberOfSpaces]; |
| 515 |
| 489 SnapshotByteSink* sink_; | 516 SnapshotByteSink* sink_; |
| 490 ExternalReferenceEncoder* external_reference_encoder_; | 517 ExternalReferenceEncoder* external_reference_encoder_; |
| 491 | 518 |
| 492 SerializationAddressMapper address_mapper_; | 519 SerializationAddressMapper address_mapper_; |
| 493 intptr_t root_index_wave_front_; | 520 intptr_t root_index_wave_front_; |
| 494 void Pad(); | 521 void Pad(); |
| 495 | 522 |
| 496 friend class ObjectSerializer; | 523 friend class ObjectSerializer; |
| 497 friend class Deserializer; | 524 friend class Deserializer; |
| 498 | 525 |
| 499 // We may not need the code address map for logging for every instance | 526 // We may not need the code address map for logging for every instance |
| 500 // of the serializer. Initialize it on demand. | 527 // of the serializer. Initialize it on demand. |
| 501 void InitializeCodeAddressMap(); | 528 void InitializeCodeAddressMap(); |
| 502 | 529 |
| 503 private: | 530 private: |
| 504 CodeAddressMap* code_address_map_; | 531 CodeAddressMap* code_address_map_; |
| 505 // We map serialized large objects to indexes for back-referencing. | 532 // We map serialized large objects to indexes for back-referencing. |
| 506 int seen_large_objects_index_; | 533 uint32_t seen_large_objects_index_; |
| 507 DISALLOW_COPY_AND_ASSIGN(Serializer); | 534 DISALLOW_COPY_AND_ASSIGN(Serializer); |
| 508 }; | 535 }; |
| 509 | 536 |
| 510 | 537 |
| 511 class PartialSerializer : public Serializer { | 538 class PartialSerializer : public Serializer { |
| 512 public: | 539 public: |
| 513 PartialSerializer(Isolate* isolate, | 540 PartialSerializer(Isolate* isolate, |
| 514 Serializer* startup_snapshot_serializer, | 541 Serializer* startup_snapshot_serializer, |
| 515 SnapshotByteSink* sink) | 542 SnapshotByteSink* sink) |
| 516 : Serializer(isolate, sink), | 543 : Serializer(isolate, sink), |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 578 DISALLOW_COPY_AND_ASSIGN(StartupSerializer); | 605 DISALLOW_COPY_AND_ASSIGN(StartupSerializer); |
| 579 }; | 606 }; |
| 580 | 607 |
| 581 | 608 |
| 582 class CodeSerializer : public Serializer { | 609 class CodeSerializer : public Serializer { |
| 583 public: | 610 public: |
| 584 static ScriptData* Serialize(Isolate* isolate, | 611 static ScriptData* Serialize(Isolate* isolate, |
| 585 Handle<SharedFunctionInfo> info, | 612 Handle<SharedFunctionInfo> info, |
| 586 Handle<String> source); | 613 Handle<String> source); |
| 587 | 614 |
| 588 static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate, | 615 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize( |
| 589 ScriptData* data, | 616 Isolate* isolate, ScriptData* data, Handle<String> source); |
| 590 Handle<String> source); | |
| 591 | 617 |
| 592 static const int kSourceObjectIndex = 0; | 618 static const int kSourceObjectIndex = 0; |
| 593 static const int kCodeStubsBaseIndex = 1; | 619 static const int kCodeStubsBaseIndex = 1; |
| 594 | 620 |
| 595 String* source() { | 621 String* source() { |
| 596 DCHECK(!AllowHeapAllocation::IsAllowed()); | 622 DCHECK(!AllowHeapAllocation::IsAllowed()); |
| 597 return source_; | 623 return source_; |
| 598 } | 624 } |
| 599 | 625 |
| 600 List<uint32_t>* stub_keys() { return &stub_keys_; } | 626 List<uint32_t>* stub_keys() { return &stub_keys_; } |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 647 | 673 |
| 648 // Return ScriptData object and relinquish ownership over it to the caller. | 674 // Return ScriptData object and relinquish ownership over it to the caller. |
| 649 ScriptData* GetScriptData() { | 675 ScriptData* GetScriptData() { |
| 650 ScriptData* result = script_data_; | 676 ScriptData* result = script_data_; |
| 651 script_data_ = NULL; | 677 script_data_ = NULL; |
| 652 DCHECK(owns_script_data_); | 678 DCHECK(owns_script_data_); |
| 653 owns_script_data_ = false; | 679 owns_script_data_ = false; |
| 654 return result; | 680 return result; |
| 655 } | 681 } |
| 656 | 682 |
| 683 class Reservation { |
| 684 public: |
| 685 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); } |
| 686 bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); } |
| 687 |
| 688 private: |
| 689 uint32_t reservation; |
| 690 |
| 691 DISALLOW_COPY_AND_ASSIGN(Reservation); |
| 692 }; |
| 693 |
| 694 Vector<const Reservation> Reservations() const { |
| 695 return Vector<const Reservation>(reinterpret_cast<const Reservation*>( |
| 696 script_data_->data() + kHeaderSize), |
| 697 GetHeaderValue(kReservationsOffset)); |
| 698 } |
| 699 |
| 657 Vector<const uint32_t> CodeStubKeys() const { | 700 Vector<const uint32_t> CodeStubKeys() const { |
| 658 return Vector<const uint32_t>( | 701 int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size; |
| 659 reinterpret_cast<const uint32_t*>(script_data_->data() + kHeaderSize), | 702 const byte* start = script_data_->data() + kHeaderSize + reservations_size; |
| 660 GetHeaderValue(kNumCodeStubKeysOffset)); | 703 return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start), |
| 704 GetHeaderValue(kNumCodeStubKeysOffset)); |
| 661 } | 705 } |
| 662 | 706 |
| 663 const byte* Payload() const { | 707 const byte* Payload() const { |
| 708 int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size; |
| 664 int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size; | 709 int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size; |
| 665 return script_data_->data() + kHeaderSize + code_stubs_size; | 710 return script_data_->data() + kHeaderSize + reservations_size + |
| 711 code_stubs_size; |
| 666 } | 712 } |
| 667 | 713 |
| 668 int PayloadLength() const { | 714 int PayloadLength() const { |
| 669 int payload_length = GetHeaderValue(kPayloadLengthOffset); | 715 int payload_length = GetHeaderValue(kPayloadLengthOffset); |
| 670 DCHECK_EQ(script_data_->data() + script_data_->length(), | 716 DCHECK_EQ(script_data_->data() + script_data_->length(), |
| 671 Payload() + payload_length); | 717 Payload() + payload_length); |
| 672 return payload_length; | 718 return payload_length; |
| 673 } | 719 } |
| 674 | 720 |
| 675 int GetReservation(int space) const { | |
| 676 return GetHeaderValue(kReservationsOffset + space); | |
| 677 } | |
| 678 | |
| 679 private: | 721 private: |
| 680 void SetHeaderValue(int offset, int value) { | 722 void SetHeaderValue(int offset, int value) { |
| 681 reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] = | 723 reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] = |
| 682 value; | 724 value; |
| 683 } | 725 } |
| 684 | 726 |
| 685 int GetHeaderValue(int offset) const { | 727 int GetHeaderValue(int offset) const { |
| 686 return reinterpret_cast<const int*>(script_data_->data())[offset]; | 728 return reinterpret_cast<const int*>(script_data_->data())[offset]; |
| 687 } | 729 } |
| 688 | 730 |
| 689 bool IsSane(String* source); | 731 bool IsSane(String* source); |
| 690 | 732 |
| 691 int CheckSum(String* source); | 733 int CheckSum(String* source); |
| 692 | 734 |
| 693 // The data header consists of int-sized entries: | 735 // The data header consists of int-sized entries: |
| 694 // [0] version hash | 736 // [0] version hash |
| 695 // [1] number of code stub keys | 737 // [1] number of code stub keys |
| 696 // [2] payload length | 738 // [2] payload length |
| 697 // [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE. | 739 // [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE. |
| 698 static const int kCheckSumOffset = 0; | 740 static const int kCheckSumOffset = 0; |
| 699 static const int kNumCodeStubKeysOffset = 1; | 741 static const int kReservationsOffset = 1; |
| 700 static const int kPayloadLengthOffset = 2; | 742 static const int kNumCodeStubKeysOffset = 2; |
| 701 static const int kReservationsOffset = 3; | 743 static const int kPayloadLengthOffset = 3; |
| 744 static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize; |
| 702 | 745 |
| 703 static const int kHeaderEntries = | 746 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {}; |
| 704 kReservationsOffset + SerializerDeserializer::kNumberOfSpaces; | 747 class IsLastChunkBits : public BitField<bool, 31, 1> {}; |
| 705 static const int kHeaderSize = kHeaderEntries * kIntSize; | |
| 706 | 748 |
| 707 // Following the header, we store, in sequential order | 749 // Following the header, we store, in sequential order |
| 708 // - code stub keys | 750 // - code stub keys |
| 709 // - serialization payload | 751 // - serialization payload |
| 710 | 752 |
| 711 ScriptData* script_data_; | 753 ScriptData* script_data_; |
| 712 bool owns_script_data_; | 754 bool owns_script_data_; |
| 713 }; | 755 }; |
| 714 } } // namespace v8::internal | 756 } } // namespace v8::internal |
| 715 | 757 |
| 716 #endif // V8_SERIALIZE_H_ | 758 #endif // V8_SERIALIZE_H_ |
| OLD | NEW |