OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/snapshot/deserializer.h" | 5 #include "src/snapshot/deserializer.h" |
6 | 6 |
7 #include "src/bootstrapper.h" | 7 #include "src/bootstrapper.h" |
8 #include "src/external-reference-table.h" | 8 #include "src/external-reference-table.h" |
9 #include "src/heap/heap.h" | 9 #include "src/heap/heap.h" |
10 #include "src/isolate.h" | 10 #include "src/isolate.h" |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 code->instruction_size()); | 48 code->instruction_size()); |
49 } | 49 } |
50 } | 50 } |
51 | 51 |
52 bool Deserializer::ReserveSpace() { | 52 bool Deserializer::ReserveSpace() { |
53 #ifdef DEBUG | 53 #ifdef DEBUG |
54 for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) { | 54 for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) { |
55 CHECK(reservations_[i].length() > 0); | 55 CHECK(reservations_[i].length() > 0); |
56 } | 56 } |
57 #endif // DEBUG | 57 #endif // DEBUG |
58 if (!isolate_->heap()->ReserveSpace(reservations_)) return false; | 58 DCHECK(allocated_maps_.is_empty()); |
| 59 if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_)) |
| 60 return false; |
59 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | 61 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { |
60 high_water_[i] = reservations_[i][0].start; | 62 high_water_[i] = reservations_[i][0].start; |
61 } | 63 } |
62 return true; | 64 return true; |
63 } | 65 } |
64 | 66 |
65 void Deserializer::Initialize(Isolate* isolate) { | 67 void Deserializer::Initialize(Isolate* isolate) { |
66 DCHECK_NULL(isolate_); | 68 DCHECK_NULL(isolate_); |
67 DCHECK_NOT_NULL(isolate); | 69 DCHECK_NOT_NULL(isolate); |
68 isolate_ = isolate; | 70 isolate_ = isolate; |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
155 isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); | 157 isolate->heap()->RegisterReservationsForBlackAllocation(reservations_); |
156 } | 158 } |
157 CommitPostProcessedObjects(isolate); | 159 CommitPostProcessedObjects(isolate); |
158 return scope.CloseAndEscape(result); | 160 return scope.CloseAndEscape(result); |
159 } | 161 } |
160 } | 162 } |
161 | 163 |
162 Deserializer::~Deserializer() { | 164 Deserializer::~Deserializer() { |
163 // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. | 165 // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. |
164 // DCHECK(source_.AtEOF()); | 166 // DCHECK(source_.AtEOF()); |
| 167 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) { |
| 168 int chunk_index = current_chunk_[space]; |
| 169 CHECK_EQ(reservations_[space].length(), chunk_index + 1); |
| 170 CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]); |
| 171 } |
| 172 CHECK_EQ(allocated_maps_.length(), next_map_index_); |
165 } | 173 } |
166 | 174 |
167 // This is called on the roots. It is the driver of the deserialization | 175 // This is called on the roots. It is the driver of the deserialization |
168 // process. It is also called on the body of each function. | 176 // process. It is also called on the body of each function. |
169 void Deserializer::VisitPointers(Object** start, Object** end) { | 177 void Deserializer::VisitPointers(Object** start, Object** end) { |
170 // The space must be new space. Any other space would cause ReadChunk to try | 178 // The space must be new space. Any other space would cause ReadChunk to try |
171 // to update the remembered using NULL as the address. | 179 // to update the remembered using NULL as the address. |
172 ReadData(start, end, NEW_SPACE, NULL); | 180 ReadData(start, end, NEW_SPACE, NULL); |
173 } | 181 } |
174 | 182 |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
305 Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script); | 313 Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script); |
306 heap->SetRootScriptList(*list); | 314 heap->SetRootScriptList(*list); |
307 } | 315 } |
308 } | 316 } |
309 | 317 |
310 HeapObject* Deserializer::GetBackReferencedObject(int space) { | 318 HeapObject* Deserializer::GetBackReferencedObject(int space) { |
311 HeapObject* obj; | 319 HeapObject* obj; |
312 SerializerReference back_reference = | 320 SerializerReference back_reference = |
313 SerializerReference::FromBitfield(source_.GetInt()); | 321 SerializerReference::FromBitfield(source_.GetInt()); |
314 if (space == LO_SPACE) { | 322 if (space == LO_SPACE) { |
315 CHECK(back_reference.chunk_index() == 0); | |
316 uint32_t index = back_reference.large_object_index(); | 323 uint32_t index = back_reference.large_object_index(); |
317 obj = deserialized_large_objects_[index]; | 324 obj = deserialized_large_objects_[index]; |
| 325 } else if (space == MAP_SPACE) { |
| 326 int index = back_reference.map_index(); |
| 327 DCHECK(index < next_map_index_); |
| 328 obj = HeapObject::FromAddress(allocated_maps_[index]); |
318 } else { | 329 } else { |
319 DCHECK(space < kNumberOfPreallocatedSpaces); | 330 DCHECK(space < kNumberOfPreallocatedSpaces); |
320 uint32_t chunk_index = back_reference.chunk_index(); | 331 uint32_t chunk_index = back_reference.chunk_index(); |
321 DCHECK_LE(chunk_index, current_chunk_[space]); | 332 DCHECK_LE(chunk_index, current_chunk_[space]); |
322 uint32_t chunk_offset = back_reference.chunk_offset(); | 333 uint32_t chunk_offset = back_reference.chunk_offset(); |
323 Address address = reservations_[space][chunk_index].start + chunk_offset; | 334 Address address = reservations_[space][chunk_index].start + chunk_offset; |
324 if (next_alignment_ != kWordAligned) { | 335 if (next_alignment_ != kWordAligned) { |
325 int padding = Heap::GetFillToAlign(address, next_alignment_); | 336 int padding = Heap::GetFillToAlign(address, next_alignment_); |
326 next_alignment_ = kWordAligned; | 337 next_alignment_ = kWordAligned; |
327 DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller()); | 338 DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller()); |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
398 // reference large objects by index. | 409 // reference large objects by index. |
399 Address Deserializer::Allocate(int space_index, int size) { | 410 Address Deserializer::Allocate(int space_index, int size) { |
400 if (space_index == LO_SPACE) { | 411 if (space_index == LO_SPACE) { |
401 AlwaysAllocateScope scope(isolate_); | 412 AlwaysAllocateScope scope(isolate_); |
402 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); | 413 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); |
403 Executability exec = static_cast<Executability>(source_.Get()); | 414 Executability exec = static_cast<Executability>(source_.Get()); |
404 AllocationResult result = lo_space->AllocateRaw(size, exec); | 415 AllocationResult result = lo_space->AllocateRaw(size, exec); |
405 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); | 416 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); |
406 deserialized_large_objects_.Add(obj); | 417 deserialized_large_objects_.Add(obj); |
407 return obj->address(); | 418 return obj->address(); |
| 419 } else if (space_index == MAP_SPACE) { |
| 420 DCHECK_EQ(Map::kSize, size); |
| 421 return allocated_maps_[next_map_index_++]; |
408 } else { | 422 } else { |
409 DCHECK(space_index < kNumberOfPreallocatedSpaces); | 423 DCHECK(space_index < kNumberOfPreallocatedSpaces); |
410 Address address = high_water_[space_index]; | 424 Address address = high_water_[space_index]; |
411 DCHECK_NOT_NULL(address); | 425 DCHECK_NOT_NULL(address); |
412 high_water_[space_index] += size; | 426 high_water_[space_index] += size; |
413 #ifdef DEBUG | 427 #ifdef DEBUG |
414 // Assert that the current reserved chunk is still big enough. | 428 // Assert that the current reserved chunk is still big enough. |
415 const Heap::Reservation& reservation = reservations_[space_index]; | 429 const Heap::Reservation& reservation = reservations_[space_index]; |
416 int chunk_index = current_chunk_[space_index]; | 430 int chunk_index = current_chunk_[space_index]; |
417 CHECK_LE(high_water_[space_index], reservation[chunk_index].end); | 431 CHECK_LE(high_water_[space_index], reservation[chunk_index].end); |
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
809 | 823 |
810 default: | 824 default: |
811 CHECK(false); | 825 CHECK(false); |
812 } | 826 } |
813 } | 827 } |
814 CHECK_EQ(limit, current); | 828 CHECK_EQ(limit, current); |
815 return true; | 829 return true; |
816 } | 830 } |
817 } // namespace internal | 831 } // namespace internal |
818 } // namespace v8 | 832 } // namespace v8 |
OLD | NEW |