Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_SERIALIZE_H_ | 5 #ifndef V8_SERIALIZE_H_ |
| 6 #define V8_SERIALIZE_H_ | 6 #define V8_SERIALIZE_H_ |
| 7 | 7 |
| 8 #include "src/compiler.h" | 8 #include "src/compiler.h" |
| 9 #include "src/hashmap.h" | 9 #include "src/hashmap.h" |
| 10 #include "src/heap-profiler.h" | 10 #include "src/heap-profiler.h" |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 132 } | 132 } |
| 133 | 133 |
| 134 void Put(uint32_t key, Address value) { | 134 void Put(uint32_t key, Address value) { |
| 135 *Lookup(key) = value; | 135 *Lookup(key) = value; |
| 136 } | 136 } |
| 137 | 137 |
| 138 Isolate* isolate_; | 138 Isolate* isolate_; |
| 139 }; | 139 }; |
| 140 | 140 |
| 141 | 141 |
| 142 class AddressMapBase { | |
| 143 protected: | |
| 144 static void SetValue(HashMap::Entry* entry, uint32_t v) { | |
| 145 entry->value = reinterpret_cast<void*>(v); | |
| 146 } | |
| 147 | |
| 148 static uint32_t GetValue(HashMap::Entry* entry) { | |
| 149 return reinterpret_cast<uint32_t>(entry->value); | |
| 150 } | |
| 151 | |
| 152 static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj, | |
| 153 bool insert) { | |
| 154 return map->Lookup(Key(obj), Hash(obj), insert); | |
| 155 } | |
| 156 | |
| 157 private: | |
| 158 static uint32_t Hash(HeapObject* obj) { | |
| 159 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); | |
| 160 } | |
| 161 | |
| 162 static void* Key(HeapObject* obj) { | |
| 163 return reinterpret_cast<void*>(obj->address()); | |
| 164 } | |
| 165 }; | |
| 166 | |
| 167 | |
| 168 class RootIndexMap : public AddressMapBase { | |
| 169 public: | |
| 170 explicit RootIndexMap(Isolate* isolate); | |
| 171 | |
| 172 static const int kInvalidRootIndex = -1; | |
| 173 int Lookup(HeapObject* obj) { | |
| 174 HashMap::Entry* entry = LookupEntry(map_, obj, false); | |
| 175 if (entry) return GetValue(entry); | |
| 176 return kInvalidRootIndex; | |
| 177 } | |
| 178 | |
| 179 private: | |
| 180 HashMap* map_; | |
| 181 | |
| 182 DISALLOW_COPY_AND_ASSIGN(RootIndexMap); | |
| 183 }; | |
| 184 | |
| 185 | |
| 186 class BackReference { | |
| 187 public: | |
| 188 explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {} | |
| 189 | |
| 190 BackReference(AllocationSpace space, uint32_t chunk_index, | |
| 191 uint32_t chunk_offset) { | |
| 192 DCHECK(IsAligned(chunk_offset, kObjectAlignment)); | |
| 193 bitfield_ = SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) | | |
| 194 ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits); | |
| 195 } | |
| 196 | |
| 197 BackReference() : bitfield_(kInvalidValue) {} | |
| 198 | |
| 199 bool is_valid() const { return bitfield_ != kInvalidValue; } | |
| 200 | |
| 201 AllocationSpace space() const { | |
| 202 DCHECK(is_valid()); | |
| 203 return SpaceBits::decode(bitfield_); | |
| 204 } | |
| 205 | |
| 206 uint32_t chunk_offset() const { | |
| 207 DCHECK(is_valid()); | |
| 208 return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits; | |
| 209 } | |
| 210 | |
| 211 uint32_t chunk_index() const { | |
| 212 DCHECK(is_valid()); | |
| 213 return ChunkIndexBits::decode(bitfield_); | |
| 214 } | |
| 215 | |
| 216 uint32_t reference() const { | |
| 217 DCHECK(is_valid()); | |
| 218 return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask); | |
| 219 } | |
| 220 | |
| 221 uint32_t bitfield() const { return bitfield_; } | |
| 222 | |
| 223 private: | |
| 224 static const uint32_t kInvalidValue = 0xFFFFFFFF; | |
| 225 static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits; | |
| 226 static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize; | |
| 227 | |
| 228 public: | |
| 229 static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1; | |
| 230 | |
| 231 private: | |
| 232 class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {}; | |
| 233 class ChunkIndexBits | |
| 234 : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {}; | |
| 235 class SpaceBits | |
| 236 : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> { | |
| 237 }; | |
| 238 | |
| 239 uint32_t bitfield_; | |
| 240 }; | |
| 241 | |
| 242 | |
| 243 // Mapping objects to their location after deserialization. | |
| 244 // This is used during building, but not at runtime by V8. | |
| 245 class BackReferenceMap : public AddressMapBase { | |
| 246 public: | |
| 247 BackReferenceMap() | |
| 248 : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {} | |
| 249 | |
| 250 ~BackReferenceMap() { delete map_; } | |
| 251 | |
| 252 BackReference Lookup(HeapObject* obj) { | |
| 253 HashMap::Entry* entry = LookupEntry(map_, obj, false); | |
| 254 return entry ? BackReference(GetValue(entry)) : BackReference(); | |
| 255 } | |
| 256 | |
| 257 void Add(HeapObject* obj, BackReference b) { | |
| 258 DCHECK(b.is_valid()); | |
| 259 DCHECK_EQ(NULL, LookupEntry(map_, obj, false)); | |
| 260 HashMap::Entry* entry = LookupEntry(map_, obj, true); | |
| 261 SetValue(entry, b.bitfield()); | |
| 262 } | |
| 263 | |
| 264 private: | |
| 265 DisallowHeapAllocation no_allocation_; | |
|
mvstanton
2014/10/23 08:02:15
I like this use of DisallowHeapAllocation, neat!
| |
| 266 HashMap* map_; | |
| 267 DISALLOW_COPY_AND_ASSIGN(BackReferenceMap); | |
| 268 }; | |
| 269 | |
| 270 | |
| 142 // The Serializer/Deserializer class is a common superclass for Serializer and | 271 // The Serializer/Deserializer class is a common superclass for Serializer and |
| 143 // Deserializer which is used to store common constants and methods used by | 272 // Deserializer which is used to store common constants and methods used by |
| 144 // both. | 273 // both. |
| 145 class SerializerDeserializer: public ObjectVisitor { | 274 class SerializerDeserializer: public ObjectVisitor { |
| 146 public: | 275 public: |
| 147 static void Iterate(Isolate* isolate, ObjectVisitor* visitor); | 276 static void Iterate(Isolate* isolate, ObjectVisitor* visitor); |
| 148 | 277 |
| 149 static int nop() { return kNop; } | 278 static int nop() { return kNop; } |
| 150 | 279 |
| 151 // No reservation for large object space necessary. | 280 // No reservation for large object space necessary. |
| 152 static const int kNumberOfPreallocatedSpaces = LO_SPACE; | 281 static const int kNumberOfPreallocatedSpaces = LO_SPACE; |
| 153 static const int kNumberOfSpaces = INVALID_SPACE; | 282 static const int kNumberOfSpaces = INVALID_SPACE; |
| 154 | 283 |
| 155 // To encode object for back-references. | |
| 156 class OffsetBits : public BitField<uint32_t, 0, kPageSizeBits> {}; | |
| 157 class ChunkIndexBits | |
| 158 : public BitField<uint32_t, kPageSizeBits, 32 - kPageSizeBits> {}; | |
| 159 | |
| 160 protected: | 284 protected: |
| 161 // Where the pointed-to object can be found: | 285 // Where the pointed-to object can be found: |
| 162 enum Where { | 286 enum Where { |
| 163 kNewObject = 0, // Object is next in snapshot. | 287 kNewObject = 0, // Object is next in snapshot. |
| 164 // 1-7 One per space. | 288 // 1-7 One per space. |
| 165 kRootArray = 0x9, // Object is found in root array. | 289 kRootArray = 0x9, // Object is found in root array. |
| 166 kPartialSnapshotCache = 0xa, // Object is in the cache. | 290 kPartialSnapshotCache = 0xa, // Object is in the cache. |
| 167 kExternalReference = 0xb, // Pointer to an external reference. | 291 kExternalReference = 0xb, // Pointer to an external reference. |
| 168 kSkip = 0xc, // Skip n bytes. | 292 kSkip = 0xc, // Skip n bytes. |
| 169 kBuiltin = 0xd, // Builtin code object. | 293 kBuiltin = 0xd, // Builtin code object. |
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 305 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); | 429 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); |
| 306 Object* ProcessBackRefInSerializedCode(Object* obj); | 430 Object* ProcessBackRefInSerializedCode(Object* obj); |
| 307 | 431 |
| 308 // This returns the address of an object that has been described in the | 432 // This returns the address of an object that has been described in the |
| 309 // snapshot by chunk index and offset. | 433 // snapshot by chunk index and offset. |
| 310 HeapObject* GetBackReferencedObject(int space) { | 434 HeapObject* GetBackReferencedObject(int space) { |
| 311 if (space == LO_SPACE) { | 435 if (space == LO_SPACE) { |
| 312 uint32_t index = source_->GetInt(); | 436 uint32_t index = source_->GetInt(); |
| 313 return deserialized_large_objects_[index]; | 437 return deserialized_large_objects_[index]; |
| 314 } else { | 438 } else { |
| 315 uint32_t allocation = source_->GetInt() << kObjectAlignmentBits; | 439 BackReference back_reference(source_->GetInt()); |
| 316 DCHECK(space < kNumberOfPreallocatedSpaces); | 440 DCHECK(space < kNumberOfPreallocatedSpaces); |
| 317 uint32_t chunk_index = ChunkIndexBits::decode(allocation); | 441 uint32_t chunk_index = back_reference.chunk_index(); |
| 318 uint32_t offset = OffsetBits::decode(allocation); | |
| 319 DCHECK_LE(chunk_index, current_chunk_[space]); | 442 DCHECK_LE(chunk_index, current_chunk_[space]); |
| 443 uint32_t chunk_offset = back_reference.chunk_offset(); | |
| 320 return HeapObject::FromAddress(reservations_[space][chunk_index].start + | 444 return HeapObject::FromAddress(reservations_[space][chunk_index].start + |
| 321 offset); | 445 chunk_offset); |
| 322 } | 446 } |
| 323 } | 447 } |
| 324 | 448 |
| 325 // Cached current isolate. | 449 // Cached current isolate. |
| 326 Isolate* isolate_; | 450 Isolate* isolate_; |
| 327 | 451 |
| 328 // Objects from the attached object descriptions in the serialized user code. | 452 // Objects from the attached object descriptions in the serialized user code. |
| 329 Vector<Handle<Object> >* attached_objects_; | 453 Vector<Handle<Object> >* attached_objects_; |
| 330 | 454 |
| 331 SnapshotByteSource* source_; | 455 SnapshotByteSource* source_; |
| 332 // The address of the next object that will be allocated in each space. | 456 // The address of the next object that will be allocated in each space. |
| 333 // Each space has a number of chunks reserved by the GC, with each chunk | 457 // Each space has a number of chunks reserved by the GC, with each chunk |
| 334 // fitting into a page. Deserialized objects are allocated into the | 458 // fitting into a page. Deserialized objects are allocated into the |
| 335 // current chunk of the target space by bumping up high water mark. | 459 // current chunk of the target space by bumping up high water mark. |
| 336 Heap::Reservation reservations_[kNumberOfSpaces]; | 460 Heap::Reservation reservations_[kNumberOfSpaces]; |
| 337 uint32_t current_chunk_[kNumberOfPreallocatedSpaces]; | 461 uint32_t current_chunk_[kNumberOfPreallocatedSpaces]; |
| 338 Address high_water_[kNumberOfPreallocatedSpaces]; | 462 Address high_water_[kNumberOfPreallocatedSpaces]; |
| 339 | 463 |
| 340 ExternalReferenceDecoder* external_reference_decoder_; | 464 ExternalReferenceDecoder* external_reference_decoder_; |
| 341 | 465 |
| 342 List<HeapObject*> deserialized_large_objects_; | 466 List<HeapObject*> deserialized_large_objects_; |
| 343 | 467 |
| 344 DISALLOW_COPY_AND_ASSIGN(Deserializer); | 468 DISALLOW_COPY_AND_ASSIGN(Deserializer); |
| 345 }; | 469 }; |
| 346 | 470 |
| 347 | 471 |
| 348 // Mapping objects to their location after deserialization. | |
| 349 // This is used during building, but not at runtime by V8. | |
| 350 class SerializationAddressMapper { | |
| 351 public: | |
| 352 SerializationAddressMapper() | |
| 353 : no_allocation_(), | |
| 354 serialization_map_(new HashMap(HashMap::PointersMatch)) { } | |
| 355 | |
| 356 ~SerializationAddressMapper() { | |
| 357 delete serialization_map_; | |
| 358 } | |
| 359 | |
| 360 bool IsMapped(HeapObject* obj) { | |
| 361 return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL; | |
| 362 } | |
| 363 | |
| 364 int MappedTo(HeapObject* obj) { | |
| 365 DCHECK(IsMapped(obj)); | |
| 366 return static_cast<int>(reinterpret_cast<intptr_t>( | |
| 367 serialization_map_->Lookup(Key(obj), Hash(obj), false)->value)); | |
| 368 } | |
| 369 | |
| 370 void AddMapping(HeapObject* obj, int to) { | |
| 371 DCHECK(!IsMapped(obj)); | |
| 372 HashMap::Entry* entry = | |
| 373 serialization_map_->Lookup(Key(obj), Hash(obj), true); | |
| 374 entry->value = Value(to); | |
| 375 } | |
| 376 | |
| 377 private: | |
| 378 static uint32_t Hash(HeapObject* obj) { | |
| 379 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); | |
| 380 } | |
| 381 | |
| 382 static void* Key(HeapObject* obj) { | |
| 383 return reinterpret_cast<void*>(obj->address()); | |
| 384 } | |
| 385 | |
| 386 static void* Value(int v) { | |
| 387 return reinterpret_cast<void*>(v); | |
| 388 } | |
| 389 | |
| 390 DisallowHeapAllocation no_allocation_; | |
| 391 HashMap* serialization_map_; | |
| 392 DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper); | |
| 393 }; | |
| 394 | |
| 395 | |
| 396 class CodeAddressMap; | 472 class CodeAddressMap; |
| 397 | 473 |
| 398 // There can be only one serializer per V8 process. | 474 // There can be only one serializer per V8 process. |
| 399 class Serializer : public SerializerDeserializer { | 475 class Serializer : public SerializerDeserializer { |
| 400 public: | 476 public: |
| 401 Serializer(Isolate* isolate, SnapshotByteSink* sink); | 477 Serializer(Isolate* isolate, SnapshotByteSink* sink); |
| 402 ~Serializer(); | 478 ~Serializer(); |
| 403 void VisitPointers(Object** start, Object** end); | 479 virtual void VisitPointers(Object** start, Object** end) OVERRIDE; |
| 404 | 480 |
| 405 void FinalizeAllocation(); | 481 void FinalizeAllocation(); |
| 406 | 482 |
| 407 Vector<const uint32_t> FinalAllocationChunks(int space) const { | 483 Vector<const uint32_t> FinalAllocationChunks(int space) const { |
| 408 DCHECK_EQ(1, completed_chunks_[LO_SPACE].length()); // Already finalized. | 484 DCHECK_EQ(1, completed_chunks_[LO_SPACE].length()); // Already finalized. |
| 409 DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks. | 485 DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks. |
| 410 return completed_chunks_[space].ToConstVector(); | 486 return completed_chunks_[space].ToConstVector(); |
| 411 } | 487 } |
| 412 | 488 |
| 413 Isolate* isolate() const { return isolate_; } | 489 Isolate* isolate() const { return isolate_; } |
| 414 | 490 |
| 415 SerializationAddressMapper* address_mapper() { return &address_mapper_; } | 491 BackReferenceMap* back_reference_map() { return &back_reference_map_; } |
| 416 void PutRoot(int index, | 492 RootIndexMap* root_index_map() { return &root_index_map_; } |
| 417 HeapObject* object, | |
| 418 HowToCode how, | |
| 419 WhereToPoint where, | |
| 420 int skip); | |
| 421 | 493 |
| 422 protected: | 494 protected: |
| 423 static const int kInvalidRootIndex = -1; | |
| 424 | |
| 425 int RootIndex(HeapObject* heap_object, HowToCode from); | |
| 426 intptr_t root_index_wave_front() { return root_index_wave_front_; } | |
| 427 void set_root_index_wave_front(intptr_t value) { | |
| 428 DCHECK(value >= root_index_wave_front_); | |
| 429 root_index_wave_front_ = value; | |
| 430 } | |
| 431 | |
| 432 class ObjectSerializer : public ObjectVisitor { | 495 class ObjectSerializer : public ObjectVisitor { |
| 433 public: | 496 public: |
| 434 ObjectSerializer(Serializer* serializer, | 497 ObjectSerializer(Serializer* serializer, |
| 435 Object* o, | 498 Object* o, |
| 436 SnapshotByteSink* sink, | 499 SnapshotByteSink* sink, |
| 437 HowToCode how_to_code, | 500 HowToCode how_to_code, |
| 438 WhereToPoint where_to_point) | 501 WhereToPoint where_to_point) |
| 439 : serializer_(serializer), | 502 : serializer_(serializer), |
| 440 object_(HeapObject::cast(o)), | 503 object_(HeapObject::cast(o)), |
| 441 sink_(sink), | 504 sink_(sink), |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 455 // Used for seralizing the external strings that hold the natives source. | 518 // Used for seralizing the external strings that hold the natives source. |
| 456 void VisitExternalOneByteString( | 519 void VisitExternalOneByteString( |
| 457 v8::String::ExternalOneByteStringResource** resource); | 520 v8::String::ExternalOneByteStringResource** resource); |
| 458 // We can't serialize a heap with external two byte strings. | 521 // We can't serialize a heap with external two byte strings. |
| 459 void VisitExternalTwoByteString( | 522 void VisitExternalTwoByteString( |
| 460 v8::String::ExternalStringResource** resource) { | 523 v8::String::ExternalStringResource** resource) { |
| 461 UNREACHABLE(); | 524 UNREACHABLE(); |
| 462 } | 525 } |
| 463 | 526 |
| 464 private: | 527 private: |
| 465 void SerializePrologue(int space, int size, Map* map); | 528 void SerializePrologue(AllocationSpace space, int size, Map* map); |
|
mvstanton
2014/10/23 08:02:15
+1 for use of the enum instead of int.
| |
| 466 | 529 |
| 467 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; | 530 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; |
| 468 // This function outputs or skips the raw data between the last pointer and | 531 // This function outputs or skips the raw data between the last pointer and |
| 469 // up to the current position. It optionally can just return the number of | 532 // up to the current position. It optionally can just return the number of |
| 470 // bytes to skip instead of performing a skip instruction, in case the skip | 533 // bytes to skip instead of performing a skip instruction, in case the skip |
| 471 // can be merged into the next instruction. | 534 // can be merged into the next instruction. |
| 472 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); | 535 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); |
| 473 // External strings are serialized in a way to resemble sequential strings. | 536 // External strings are serialized in a way to resemble sequential strings. |
| 474 void SerializeExternalString(); | 537 void SerializeExternalString(); |
| 475 | 538 |
| 476 Serializer* serializer_; | 539 Serializer* serializer_; |
| 477 HeapObject* object_; | 540 HeapObject* object_; |
| 478 SnapshotByteSink* sink_; | 541 SnapshotByteSink* sink_; |
| 479 int reference_representation_; | 542 int reference_representation_; |
| 480 int bytes_processed_so_far_; | 543 int bytes_processed_so_far_; |
| 481 bool code_object_; | 544 bool code_object_; |
| 482 bool code_has_been_output_; | 545 bool code_has_been_output_; |
| 483 }; | 546 }; |
| 484 | 547 |
| 485 virtual void SerializeObject(Object* o, | 548 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, |
| 486 HowToCode how_to_code, | 549 WhereToPoint where_to_point, int skip) = 0; |
| 487 WhereToPoint where_to_point, | 550 |
| 488 int skip) = 0; | 551 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, |
| 489 void SerializeReferenceToPreviousObject(HeapObject* heap_object, | 552 int skip); |
| 490 HowToCode how_to_code, | 553 |
| 491 WhereToPoint where_to_point, | 554 void SerializeBackReference(BackReference back_reference, |
| 492 int skip); | 555 HowToCode how_to_code, |
| 556 WhereToPoint where_to_point, int skip); | |
| 493 void InitializeAllocators(); | 557 void InitializeAllocators(); |
| 494 // This will return the space for an object. | 558 // This will return the space for an object. |
| 495 static int SpaceOfObject(HeapObject* object); | 559 static AllocationSpace SpaceOfObject(HeapObject* object); |
| 496 uint32_t AllocateLargeObject(int size); | 560 BackReference AllocateLargeObject(int size); |
| 497 uint32_t Allocate(int space, int size); | 561 BackReference Allocate(AllocationSpace space, int size); |
| 498 int EncodeExternalReference(Address addr) { | 562 int EncodeExternalReference(Address addr) { |
| 499 return external_reference_encoder_->Encode(addr); | 563 return external_reference_encoder_->Encode(addr); |
| 500 } | 564 } |
| 501 | 565 |
| 502 int SpaceAreaSize(int space); | 566 int SpaceAreaSize(int space); |
| 503 | 567 |
| 504 // Some roots should not be serialized, because their actual value depends on | 568 // Some roots should not be serialized, because their actual value depends on |
| 505 // absolute addresses and they are reset after deserialization, anyway. | 569 // absolute addresses and they are reset after deserialization, anyway. |
| 506 bool ShouldBeSkipped(Object** current); | 570 bool ShouldBeSkipped(Object** current); |
| 507 | 571 |
| 508 Isolate* isolate_; | 572 Isolate* isolate_; |
| 509 | 573 |
| 510 // Objects from the same space are put into chunks for bulk-allocation | 574 // Objects from the same space are put into chunks for bulk-allocation |
| 511 // when deserializing. We have to make sure that each chunk fits into a | 575 // when deserializing. We have to make sure that each chunk fits into a |
| 512 // page. So we track the chunk size in pending_chunk_ of a space, but | 576 // page. So we track the chunk size in pending_chunk_ of a space, but |
| 513 // when it exceeds a page, we complete the current chunk and start a new one. | 577 // when it exceeds a page, we complete the current chunk and start a new one. |
| 514 uint32_t pending_chunk_[kNumberOfSpaces]; | 578 uint32_t pending_chunk_[kNumberOfSpaces]; |
| 515 List<uint32_t> completed_chunks_[kNumberOfSpaces]; | 579 List<uint32_t> completed_chunks_[kNumberOfSpaces]; |
| 516 | 580 |
| 517 SnapshotByteSink* sink_; | 581 SnapshotByteSink* sink_; |
| 518 ExternalReferenceEncoder* external_reference_encoder_; | 582 ExternalReferenceEncoder* external_reference_encoder_; |
| 519 | 583 |
| 520 SerializationAddressMapper address_mapper_; | 584 BackReferenceMap back_reference_map_; |
| 521 intptr_t root_index_wave_front_; | 585 RootIndexMap root_index_map_; |
| 522 void Pad(); | 586 void Pad(); |
| 523 | 587 |
| 524 friend class ObjectSerializer; | 588 friend class ObjectSerializer; |
| 525 friend class Deserializer; | 589 friend class Deserializer; |
| 526 | 590 |
| 527 // We may not need the code address map for logging for every instance | 591 // We may not need the code address map for logging for every instance |
| 528 // of the serializer. Initialize it on demand. | 592 // of the serializer. Initialize it on demand. |
| 529 void InitializeCodeAddressMap(); | 593 void InitializeCodeAddressMap(); |
| 530 | 594 |
| 531 private: | 595 private: |
| 532 CodeAddressMap* code_address_map_; | 596 CodeAddressMap* code_address_map_; |
| 533 // We map serialized large objects to indexes for back-referencing. | 597 // We map serialized large objects to indexes for back-referencing. |
| 534 uint32_t seen_large_objects_index_; | 598 uint32_t seen_large_objects_index_; |
| 535 DISALLOW_COPY_AND_ASSIGN(Serializer); | 599 DISALLOW_COPY_AND_ASSIGN(Serializer); |
| 536 }; | 600 }; |
| 537 | 601 |
| 538 | 602 |
| 539 class PartialSerializer : public Serializer { | 603 class PartialSerializer : public Serializer { |
| 540 public: | 604 public: |
| 541 PartialSerializer(Isolate* isolate, | 605 PartialSerializer(Isolate* isolate, |
| 542 Serializer* startup_snapshot_serializer, | 606 Serializer* startup_snapshot_serializer, |
| 543 SnapshotByteSink* sink) | 607 SnapshotByteSink* sink) |
| 544 : Serializer(isolate, sink), | 608 : Serializer(isolate, sink), |
| 545 startup_serializer_(startup_snapshot_serializer) { | 609 startup_serializer_(startup_snapshot_serializer) { |
| 546 set_root_index_wave_front(Heap::kStrongRootListLength); | |
| 547 InitializeCodeAddressMap(); | 610 InitializeCodeAddressMap(); |
| 548 } | 611 } |
| 549 | 612 |
| 550 // Serialize the objects reachable from a single object pointer. | 613 // Serialize the objects reachable from a single object pointer. |
| 551 void Serialize(Object** o); | 614 void Serialize(Object** o); |
| 552 virtual void SerializeObject(Object* o, | 615 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, |
| 553 HowToCode how_to_code, | 616 WhereToPoint where_to_point, int skip); |
| 554 WhereToPoint where_to_point, | |
| 555 int skip); | |
| 556 | 617 |
| 557 private: | 618 private: |
| 558 int PartialSnapshotCacheIndex(HeapObject* o); | 619 int PartialSnapshotCacheIndex(HeapObject* o); |
| 559 bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | 620 bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
| 560 // Scripts should be referred only through shared function infos. We can't | 621 // Scripts should be referred only through shared function infos. We can't |
| 561 // allow them to be part of the partial snapshot because they contain a | 622 // allow them to be part of the partial snapshot because they contain a |
| 562 // unique ID, and deserializing several partial snapshots containing script | 623 // unique ID, and deserializing several partial snapshots containing script |
| 563 // would cause dupes. | 624 // would cause dupes. |
| 564 DCHECK(!o->IsScript()); | 625 DCHECK(!o->IsScript()); |
| 565 return o->IsName() || o->IsSharedFunctionInfo() || | 626 return o->IsName() || o->IsSharedFunctionInfo() || |
| 566 o->IsHeapNumber() || o->IsCode() || | 627 o->IsHeapNumber() || o->IsCode() || |
| 567 o->IsScopeInfo() || | 628 o->IsScopeInfo() || |
| 568 o->map() == | 629 o->map() == |
| 569 startup_serializer_->isolate()->heap()->fixed_cow_array_map(); | 630 startup_serializer_->isolate()->heap()->fixed_cow_array_map(); |
| 570 } | 631 } |
| 571 | 632 |
| 572 | 633 |
| 573 Serializer* startup_serializer_; | 634 Serializer* startup_serializer_; |
| 574 DISALLOW_COPY_AND_ASSIGN(PartialSerializer); | 635 DISALLOW_COPY_AND_ASSIGN(PartialSerializer); |
| 575 }; | 636 }; |
| 576 | 637 |
| 577 | 638 |
| 578 class StartupSerializer : public Serializer { | 639 class StartupSerializer : public Serializer { |
| 579 public: | 640 public: |
| 580 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink) | 641 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink) |
| 581 : Serializer(isolate, sink) { | 642 : Serializer(isolate, sink), root_index_wave_front_(0) { |
| 582 // Clear the cache of objects used by the partial snapshot. After the | 643 // Clear the cache of objects used by the partial snapshot. After the |
| 583 // strong roots have been serialized we can create a partial snapshot | 644 // strong roots have been serialized we can create a partial snapshot |
| 584 // which will repopulate the cache with objects needed by that partial | 645 // which will repopulate the cache with objects needed by that partial |
| 585 // snapshot. | 646 // snapshot. |
| 586 isolate->set_serialize_partial_snapshot_cache_length(0); | 647 isolate->set_serialize_partial_snapshot_cache_length(0); |
| 587 InitializeCodeAddressMap(); | 648 InitializeCodeAddressMap(); |
| 588 } | 649 } |
| 650 | |
| 651 // The StartupSerializer has to serialize the root array, which is slightly | |
| 652 // different. | |
| 653 virtual void VisitPointers(Object** start, Object** end) OVERRIDE; | |
| 654 | |
| 589 // Serialize the current state of the heap. The order is: | 655 // Serialize the current state of the heap. The order is: |
| 590 // 1) Strong references. | 656 // 1) Strong references. |
| 591 // 2) Partial snapshot cache. | 657 // 2) Partial snapshot cache. |
| 592 // 3) Weak references (e.g. the string table). | 658 // 3) Weak references (e.g. the string table). |
| 593 virtual void SerializeStrongReferences(); | 659 virtual void SerializeStrongReferences(); |
| 594 virtual void SerializeObject(Object* o, | 660 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, |
| 595 HowToCode how_to_code, | 661 WhereToPoint where_to_point, int skip); |
| 596 WhereToPoint where_to_point, | |
| 597 int skip); | |
| 598 void SerializeWeakReferences(); | 662 void SerializeWeakReferences(); |
| 599 void Serialize() { | 663 void Serialize() { |
| 600 SerializeStrongReferences(); | 664 SerializeStrongReferences(); |
| 601 SerializeWeakReferences(); | 665 SerializeWeakReferences(); |
| 602 Pad(); | 666 Pad(); |
| 603 } | 667 } |
| 604 | 668 |
| 605 private: | 669 private: |
| 670 int root_index_wave_front_; | |
| 606 DISALLOW_COPY_AND_ASSIGN(StartupSerializer); | 671 DISALLOW_COPY_AND_ASSIGN(StartupSerializer); |
| 607 }; | 672 }; |
| 608 | 673 |
| 609 | 674 |
| 610 class CodeSerializer : public Serializer { | 675 class CodeSerializer : public Serializer { |
| 611 public: | 676 public: |
| 612 static ScriptData* Serialize(Isolate* isolate, | 677 static ScriptData* Serialize(Isolate* isolate, |
| 613 Handle<SharedFunctionInfo> info, | 678 Handle<SharedFunctionInfo> info, |
| 614 Handle<String> source); | 679 Handle<String> source); |
| 615 | 680 |
| 616 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize( | 681 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize( |
| 617 Isolate* isolate, ScriptData* data, Handle<String> source); | 682 Isolate* isolate, ScriptData* data, Handle<String> source); |
| 618 | 683 |
| 619 static const int kSourceObjectIndex = 0; | 684 static const int kSourceObjectIndex = 0; |
| 620 static const int kCodeStubsBaseIndex = 1; | 685 static const int kCodeStubsBaseIndex = 1; |
| 621 | 686 |
| 622 String* source() { | 687 String* source() { |
| 623 DCHECK(!AllowHeapAllocation::IsAllowed()); | 688 DCHECK(!AllowHeapAllocation::IsAllowed()); |
| 624 return source_; | 689 return source_; |
| 625 } | 690 } |
| 626 | 691 |
| 627 List<uint32_t>* stub_keys() { return &stub_keys_; } | 692 List<uint32_t>* stub_keys() { return &stub_keys_; } |
| 628 | 693 |
| 629 private: | 694 private: |
| 630 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source, | 695 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source, |
| 631 Code* main_code) | 696 Code* main_code) |
| 632 : Serializer(isolate, sink), source_(source), main_code_(main_code) { | 697 : Serializer(isolate, sink), source_(source), main_code_(main_code) { |
| 633 set_root_index_wave_front(Heap::kStrongRootListLength); | |
| 634 InitializeCodeAddressMap(); | 698 InitializeCodeAddressMap(); |
| 635 } | 699 } |
| 636 | 700 |
| 637 virtual void SerializeObject(Object* o, HowToCode how_to_code, | 701 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, |
| 638 WhereToPoint where_to_point, int skip); | 702 WhereToPoint where_to_point, int skip); |
| 639 | 703 |
| 640 void SerializeBuiltin(int builtin_index, HowToCode how_to_code, | 704 void SerializeBuiltin(int builtin_index, HowToCode how_to_code, |
| 641 WhereToPoint where_to_point); | 705 WhereToPoint where_to_point); |
| 642 void SerializeIC(Code* ic, HowToCode how_to_code, | 706 void SerializeIC(Code* ic, HowToCode how_to_code, |
| 643 WhereToPoint where_to_point); | 707 WhereToPoint where_to_point); |
| 644 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code, | 708 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code, |
| 645 WhereToPoint where_to_point); | 709 WhereToPoint where_to_point); |
| 646 void SerializeSourceObject(HowToCode how_to_code, | 710 void SerializeSourceObject(HowToCode how_to_code, |
| 647 WhereToPoint where_to_point); | 711 WhereToPoint where_to_point); |
| 648 void SerializeHeapObject(HeapObject* heap_object, HowToCode how_to_code, | 712 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code, |
| 649 WhereToPoint where_to_point); | 713 WhereToPoint where_to_point); |
| 650 int AddCodeStubKey(uint32_t stub_key); | 714 int AddCodeStubKey(uint32_t stub_key); |
| 651 | 715 |
| 652 DisallowHeapAllocation no_gc_; | 716 DisallowHeapAllocation no_gc_; |
| 653 String* source_; | 717 String* source_; |
| 654 Code* main_code_; | 718 Code* main_code_; |
| 655 List<uint32_t> stub_keys_; | 719 List<uint32_t> stub_keys_; |
| 656 DISALLOW_COPY_AND_ASSIGN(CodeSerializer); | 720 DISALLOW_COPY_AND_ASSIGN(CodeSerializer); |
| 657 }; | 721 }; |
| 658 | 722 |
| 659 | 723 |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 752 // Following the header, we store, in sequential order | 816 // Following the header, we store, in sequential order |
| 753 // - code stub keys | 817 // - code stub keys |
| 754 // - serialization payload | 818 // - serialization payload |
| 755 | 819 |
| 756 ScriptData* script_data_; | 820 ScriptData* script_data_; |
| 757 bool owns_script_data_; | 821 bool owns_script_data_; |
| 758 }; | 822 }; |
| 759 } } // namespace v8::internal | 823 } } // namespace v8::internal |
| 760 | 824 |
| 761 #endif // V8_SERIALIZE_H_ | 825 #endif // V8_SERIALIZE_H_ |
| OLD | NEW |