OLD | NEW |
1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #ifndef RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ | 5 #ifndef RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ |
6 #define RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ | 6 #define RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ |
7 | 7 |
8 #include "platform/assert.h" | 8 #include "platform/assert.h" |
9 #include "vm/allocation.h" | 9 #include "vm/allocation.h" |
10 #include "vm/bitfield.h" | 10 #include "vm/bitfield.h" |
(...skipping 28 matching lines...) Expand all Loading... |
39 // is then read for each cluster, filling the reference array. Then the | 39 // is then read for each cluster, filling the reference array. Then the |
40 // initialization/fill secton is read for each cluster, using the indices into | 40 // initialization/fill secton is read for each cluster, using the indices into |
41 // the reference array to fill pointers. At this point, every object has been | 41 // the reference array to fill pointers. At this point, every object has been |
42 // touched exactly once and in order, making this approach very cache friendly. | 42 // touched exactly once and in order, making this approach very cache friendly. |
43 // Finally, each cluster is given an opportunity to perform some fix-ups that | 43 // Finally, each cluster is given an opportunity to perform some fix-ups that |
44 // require the graph has been fully loaded, such as rehashing, though most | 44 // require the graph has been fully loaded, such as rehashing, though most |
45 // clusters do not require fixups. | 45 // clusters do not require fixups. |
46 | 46 |
47 class SerializationCluster : public ZoneAllocated { | 47 class SerializationCluster : public ZoneAllocated { |
48 public: | 48 public: |
49 virtual ~SerializationCluster() { } | 49 virtual ~SerializationCluster() {} |
50 | 50 |
51 // Add [object] to the cluster and push its outgoing references. | 51 // Add [object] to the cluster and push its outgoing references. |
52 virtual void Trace(Serializer* serializer, RawObject* object) = 0; | 52 virtual void Trace(Serializer* serializer, RawObject* object) = 0; |
53 | 53 |
54 // Write the cluster type and information needed to allocate the cluster's | 54 // Write the cluster type and information needed to allocate the cluster's |
55 // objects. For fixed sized objects, this is just the object count. For | 55 // objects. For fixed sized objects, this is just the object count. For |
56 // variable sized objects, this is the object count and length of each object. | 56 // variable sized objects, this is the object count and length of each object. |
57 virtual void WriteAlloc(Serializer* serializer) = 0; | 57 virtual void WriteAlloc(Serializer* serializer) = 0; |
58 | 58 |
59 // Write the byte and reference data of the cluster's objects. | 59 // Write the byte and reference data of the cluster's objects. |
60 virtual void WriteFill(Serializer* serializer) = 0; | 60 virtual void WriteFill(Serializer* serializer) = 0; |
61 }; | 61 }; |
62 | 62 |
63 | 63 |
64 class DeserializationCluster : public ZoneAllocated { | 64 class DeserializationCluster : public ZoneAllocated { |
65 public: | 65 public: |
66 DeserializationCluster() : start_index_(-1), stop_index_(-1) { } | 66 DeserializationCluster() : start_index_(-1), stop_index_(-1) {} |
67 virtual ~DeserializationCluster() { } | 67 virtual ~DeserializationCluster() {} |
68 | 68 |
69 // Allocate memory for all objects in the cluster and write their addresses | 69 // Allocate memory for all objects in the cluster and write their addresses |
70 // into the ref array. Do not touch this memory. | 70 // into the ref array. Do not touch this memory. |
71 virtual void ReadAlloc(Deserializer* deserializer) = 0; | 71 virtual void ReadAlloc(Deserializer* deserializer) = 0; |
72 | 72 |
73 // Initialize the cluster's objects. Do not touch the memory of other objects. | 73 // Initialize the cluster's objects. Do not touch the memory of other objects. |
74 virtual void ReadFill(Deserializer* deserializer) = 0; | 74 virtual void ReadFill(Deserializer* deserializer) = 0; |
75 | 75 |
76 // Complete any action that requires the full graph to be deserialized, such | 76 // Complete any action that requires the full graph to be deserialized, such |
77 // as rehashing. | 77 // as rehashing. |
78 virtual void PostLoad(const Array& refs, Snapshot::Kind kind, Zone* zone) { } | 78 virtual void PostLoad(const Array& refs, Snapshot::Kind kind, Zone* zone) {} |
79 | 79 |
80 protected: | 80 protected: |
81 // The range of the ref array that belongs to this cluster. | 81 // The range of the ref array that belongs to this cluster. |
82 intptr_t start_index_; | 82 intptr_t start_index_; |
83 intptr_t stop_index_; | 83 intptr_t stop_index_; |
84 }; | 84 }; |
85 | 85 |
86 | 86 |
87 class SmiObjectIdPair { | 87 class SmiObjectIdPair { |
88 public: | 88 public: |
89 SmiObjectIdPair() : smi_(NULL), id_(0) { } | 89 SmiObjectIdPair() : smi_(NULL), id_(0) {} |
90 RawSmi* smi_; | 90 RawSmi* smi_; |
91 intptr_t id_; | 91 intptr_t id_; |
92 | 92 |
93 bool operator==(const SmiObjectIdPair& other) const { | 93 bool operator==(const SmiObjectIdPair& other) const { |
94 return (smi_ == other.smi_) && (id_ == other.id_); | 94 return (smi_ == other.smi_) && (id_ == other.id_); |
95 } | 95 } |
96 }; | 96 }; |
97 | 97 |
98 | 98 |
99 class SmiObjectIdPairTrait { | 99 class SmiObjectIdPairTrait { |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
179 | 179 |
180 intptr_t id = heap_->GetObjectId(object); | 180 intptr_t id = heap_->GetObjectId(object); |
181 if (id == 0) { | 181 if (id == 0) { |
182 heap_->SetObjectId(object, 1); | 182 heap_->SetObjectId(object, 1); |
183 ASSERT(heap_->GetObjectId(object) != 0); | 183 ASSERT(heap_->GetObjectId(object) != 0); |
184 stack_.Add(object); | 184 stack_.Add(object); |
185 num_written_objects_++; | 185 num_written_objects_++; |
186 } | 186 } |
187 } | 187 } |
188 | 188 |
189 void AddUntracedRef() { | 189 void AddUntracedRef() { num_written_objects_++; } |
190 num_written_objects_++; | |
191 } | |
192 | 190 |
193 void Trace(RawObject* object); | 191 void Trace(RawObject* object); |
194 | 192 |
195 SerializationCluster* NewClusterForClass(intptr_t cid); | 193 SerializationCluster* NewClusterForClass(intptr_t cid); |
196 | 194 |
197 void ReserveHeader() { | 195 void ReserveHeader() { |
198 // Make room for recording snapshot buffer size. | 196 // Make room for recording snapshot buffer size. |
199 stream_.set_current(stream_.buffer() + Snapshot::kHeaderSize); | 197 stream_.set_current(stream_.buffer() + Snapshot::kHeaderSize); |
200 } | 198 } |
201 | 199 |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
307 bool is_vm_isolate, | 305 bool is_vm_isolate, |
308 bool is_canonical = false); | 306 bool is_canonical = false); |
309 | 307 |
310 // Reads raw data (for basic types). | 308 // Reads raw data (for basic types). |
311 // sizeof(T) must be in {1,2,4,8}. | 309 // sizeof(T) must be in {1,2,4,8}. |
312 template <typename T> | 310 template <typename T> |
313 T Read() { | 311 T Read() { |
314 return ReadStream::Raw<sizeof(T), T>::Read(&stream_); | 312 return ReadStream::Raw<sizeof(T), T>::Read(&stream_); |
315 } | 313 } |
316 | 314 |
317 void ReadBytes(uint8_t* addr, intptr_t len) { | 315 void ReadBytes(uint8_t* addr, intptr_t len) { stream_.ReadBytes(addr, len); } |
318 stream_.ReadBytes(addr, len); | |
319 } | |
320 | 316 |
321 const uint8_t* CurrentBufferAddress() const { | 317 const uint8_t* CurrentBufferAddress() const { |
322 return stream_.AddressOfCurrentPosition(); | 318 return stream_.AddressOfCurrentPosition(); |
323 } | 319 } |
324 | 320 |
325 void Advance(intptr_t value) { | 321 void Advance(intptr_t value) { stream_.Advance(value); } |
326 stream_.Advance(value); | |
327 } | |
328 | 322 |
329 intptr_t PendingBytes() const { | 323 intptr_t PendingBytes() const { return stream_.PendingBytes(); } |
330 return stream_.PendingBytes(); | |
331 } | |
332 | 324 |
333 void AddBaseObject(RawObject* base_object) { | 325 void AddBaseObject(RawObject* base_object) { AssignRef(base_object); } |
334 AssignRef(base_object); | |
335 } | |
336 | 326 |
337 void AssignRef(RawObject* object) { | 327 void AssignRef(RawObject* object) { |
338 ASSERT(next_ref_index_ <= num_objects_); | 328 ASSERT(next_ref_index_ <= num_objects_); |
339 refs_->ptr()->data()[next_ref_index_] = object; | 329 refs_->ptr()->data()[next_ref_index_] = object; |
340 next_ref_index_++; | 330 next_ref_index_++; |
341 } | 331 } |
342 | 332 |
343 RawObject* Ref(intptr_t index) const { | 333 RawObject* Ref(intptr_t index) const { |
344 ASSERT(index > 0); | 334 ASSERT(index > 0); |
345 ASSERT(index <= num_objects_); | 335 ASSERT(index <= num_objects_); |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
400 uint8_t** vm_isolate_snapshot_buffer, | 390 uint8_t** vm_isolate_snapshot_buffer, |
401 uint8_t** isolate_snapshot_buffer, | 391 uint8_t** isolate_snapshot_buffer, |
402 ReAlloc alloc, | 392 ReAlloc alloc, |
403 InstructionsWriter* instructions_writer); | 393 InstructionsWriter* instructions_writer); |
404 ~FullSnapshotWriter(); | 394 ~FullSnapshotWriter(); |
405 | 395 |
406 uint8_t** vm_isolate_snapshot_buffer() const { | 396 uint8_t** vm_isolate_snapshot_buffer() const { |
407 return vm_isolate_snapshot_buffer_; | 397 return vm_isolate_snapshot_buffer_; |
408 } | 398 } |
409 | 399 |
410 uint8_t** isolate_snapshot_buffer() const { | 400 uint8_t** isolate_snapshot_buffer() const { return isolate_snapshot_buffer_; } |
411 return isolate_snapshot_buffer_; | |
412 } | |
413 | 401 |
414 Thread* thread() const { return thread_; } | 402 Thread* thread() const { return thread_; } |
415 Zone* zone() const { return thread_->zone(); } | 403 Zone* zone() const { return thread_->zone(); } |
416 Isolate* isolate() const { return thread_->isolate(); } | 404 Isolate* isolate() const { return thread_->isolate(); } |
417 Heap* heap() const { return isolate()->heap(); } | 405 Heap* heap() const { return isolate()->heap(); } |
418 | 406 |
419 // Writes a full snapshot of the Isolate. | 407 // Writes a full snapshot of the Isolate. |
420 void WriteFullSnapshot(); | 408 void WriteFullSnapshot(); |
421 | 409 |
422 intptr_t VmIsolateSnapshotSize() const { | 410 intptr_t VmIsolateSnapshotSize() const { return vm_isolate_snapshot_size_; } |
423 return vm_isolate_snapshot_size_; | 411 intptr_t IsolateSnapshotSize() const { return isolate_snapshot_size_; } |
424 } | |
425 intptr_t IsolateSnapshotSize() const { | |
426 return isolate_snapshot_size_; | |
427 } | |
428 | 412 |
429 private: | 413 private: |
430 // Writes a snapshot of the VM Isolate. | 414 // Writes a snapshot of the VM Isolate. |
431 intptr_t WriteVmIsolateSnapshot(); | 415 intptr_t WriteVmIsolateSnapshot(); |
432 | 416 |
433 // Writes a full snapshot of a regular Dart Isolate. | 417 // Writes a full snapshot of a regular Dart Isolate. |
434 void WriteIsolateFullSnapshot(intptr_t num_base_objects); | 418 void WriteIsolateFullSnapshot(intptr_t num_base_objects); |
435 | 419 |
436 Thread* thread_; | 420 Thread* thread_; |
437 Snapshot::Kind kind_; | 421 Snapshot::Kind kind_; |
(...skipping 12 matching lines...) Expand all Loading... |
450 }; | 434 }; |
451 | 435 |
452 | 436 |
453 class VmIsolateSnapshotReader { | 437 class VmIsolateSnapshotReader { |
454 public: | 438 public: |
455 VmIsolateSnapshotReader(Snapshot::Kind kind, | 439 VmIsolateSnapshotReader(Snapshot::Kind kind, |
456 const uint8_t* buffer, | 440 const uint8_t* buffer, |
457 intptr_t size, | 441 intptr_t size, |
458 const uint8_t* instructions_buffer, | 442 const uint8_t* instructions_buffer, |
459 const uint8_t* data_buffer, | 443 const uint8_t* data_buffer, |
460 Thread* thread) : | 444 Thread* thread) |
461 kind_(kind), | 445 : kind_(kind), |
462 thread_(thread), | 446 thread_(thread), |
463 buffer_(buffer), | 447 buffer_(buffer), |
464 size_(size), | 448 size_(size), |
465 instructions_buffer_(instructions_buffer), | 449 instructions_buffer_(instructions_buffer), |
466 data_buffer_(data_buffer) { | 450 data_buffer_(data_buffer) { |
467 thread->isolate()->set_compilation_allowed(kind != Snapshot::kAppNoJIT); | 451 thread->isolate()->set_compilation_allowed(kind != Snapshot::kAppNoJIT); |
468 } | 452 } |
469 | 453 |
470 ~VmIsolateSnapshotReader() { } | 454 ~VmIsolateSnapshotReader() {} |
471 | 455 |
472 RawApiError* ReadVmIsolateSnapshot(); | 456 RawApiError* ReadVmIsolateSnapshot(); |
473 | 457 |
474 private: | 458 private: |
475 Snapshot::Kind kind_; | 459 Snapshot::Kind kind_; |
476 Thread* thread_; | 460 Thread* thread_; |
477 const uint8_t* buffer_; | 461 const uint8_t* buffer_; |
478 intptr_t size_; | 462 intptr_t size_; |
479 const uint8_t* instructions_buffer_; | 463 const uint8_t* instructions_buffer_; |
480 const uint8_t* data_buffer_; | 464 const uint8_t* data_buffer_; |
481 | 465 |
482 DISALLOW_COPY_AND_ASSIGN(VmIsolateSnapshotReader); | 466 DISALLOW_COPY_AND_ASSIGN(VmIsolateSnapshotReader); |
483 }; | 467 }; |
484 | 468 |
485 | 469 |
486 class IsolateSnapshotReader { | 470 class IsolateSnapshotReader { |
487 public: | 471 public: |
488 IsolateSnapshotReader(Snapshot::Kind kind, | 472 IsolateSnapshotReader(Snapshot::Kind kind, |
489 const uint8_t* buffer, | 473 const uint8_t* buffer, |
490 intptr_t size, | 474 intptr_t size, |
491 const uint8_t* instructions_buffer, | 475 const uint8_t* instructions_buffer, |
492 const uint8_t* data_buffer, | 476 const uint8_t* data_buffer, |
493 Thread* thread) : | 477 Thread* thread) |
494 kind_(kind), | 478 : kind_(kind), |
495 thread_(thread), | 479 thread_(thread), |
496 buffer_(buffer), | 480 buffer_(buffer), |
497 size_(size), | 481 size_(size), |
498 instructions_buffer_(instructions_buffer), | 482 instructions_buffer_(instructions_buffer), |
499 data_buffer_(data_buffer) { | 483 data_buffer_(data_buffer) { |
500 thread->isolate()->set_compilation_allowed(kind != Snapshot::kAppNoJIT); | 484 thread->isolate()->set_compilation_allowed(kind != Snapshot::kAppNoJIT); |
501 } | 485 } |
502 | 486 |
503 ~IsolateSnapshotReader() {} | 487 ~IsolateSnapshotReader() {} |
504 | 488 |
505 RawApiError* ReadFullSnapshot(); | 489 RawApiError* ReadFullSnapshot(); |
506 | 490 |
507 private: | 491 private: |
508 Snapshot::Kind kind_; | 492 Snapshot::Kind kind_; |
509 Thread* thread_; | 493 Thread* thread_; |
510 const uint8_t* buffer_; | 494 const uint8_t* buffer_; |
511 intptr_t size_; | 495 intptr_t size_; |
512 const uint8_t* instructions_buffer_; | 496 const uint8_t* instructions_buffer_; |
513 const uint8_t* data_buffer_; | 497 const uint8_t* data_buffer_; |
514 | 498 |
515 DISALLOW_COPY_AND_ASSIGN(IsolateSnapshotReader); | 499 DISALLOW_COPY_AND_ASSIGN(IsolateSnapshotReader); |
516 }; | 500 }; |
517 | 501 |
518 } // namespace dart | 502 } // namespace dart |
519 | 503 |
520 #endif // RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ | 504 #endif // RUNTIME_VM_CLUSTERED_SNAPSHOT_H_ |
OLD | NEW |