OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 the V8 project authors. All rights reserved. | |
Michael Achenbach
2016/03/01 20:02:03
This has no proper license header and blocks the c
| |
2 | |
3 #include "src/snapshot/deserializer.h" | |
4 | |
5 #include "src/bootstrapper.h" | |
6 #include "src/heap/heap.h" | |
7 #include "src/isolate.h" | |
8 #include "src/macro-assembler.h" | |
9 #include "src/snapshot/natives.h" | |
10 #include "src/v8.h" | |
11 | |
12 namespace v8 { | |
13 namespace internal { | |
14 | |
15 void Deserializer::DecodeReservation( | |
16 Vector<const SerializedData::Reservation> res) { | |
17 DCHECK_EQ(0, reservations_[NEW_SPACE].length()); | |
18 STATIC_ASSERT(NEW_SPACE == 0); | |
19 int current_space = NEW_SPACE; | |
20 for (auto& r : res) { | |
21 reservations_[current_space].Add({r.chunk_size(), NULL, NULL}); | |
22 if (r.is_last()) current_space++; | |
23 } | |
24 DCHECK_EQ(kNumberOfSpaces, current_space); | |
25 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0; | |
26 } | |
27 | |
28 void Deserializer::FlushICacheForNewIsolate() { | |
29 DCHECK(!deserializing_user_code_); | |
30 // The entire isolate is newly deserialized. Simply flush all code pages. | |
31 PageIterator it(isolate_->heap()->code_space()); | |
32 while (it.has_next()) { | |
33 Page* p = it.next(); | |
34 Assembler::FlushICache(isolate_, p->area_start(), | |
35 p->area_end() - p->area_start()); | |
36 } | |
37 } | |
38 | |
39 void Deserializer::FlushICacheForNewCodeObjects() { | |
40 DCHECK(deserializing_user_code_); | |
41 for (Code* code : new_code_objects_) { | |
42 Assembler::FlushICache(isolate_, code->instruction_start(), | |
43 code->instruction_size()); | |
44 } | |
45 } | |
46 | |
47 bool Deserializer::ReserveSpace() { | |
48 #ifdef DEBUG | |
49 for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) { | |
50 CHECK(reservations_[i].length() > 0); | |
51 } | |
52 #endif // DEBUG | |
53 if (!isolate_->heap()->ReserveSpace(reservations_)) return false; | |
54 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | |
55 high_water_[i] = reservations_[i][0].start; | |
56 } | |
57 return true; | |
58 } | |
59 | |
60 void Deserializer::Initialize(Isolate* isolate) { | |
61 DCHECK_NULL(isolate_); | |
62 DCHECK_NOT_NULL(isolate); | |
63 isolate_ = isolate; | |
64 DCHECK_NULL(external_reference_table_); | |
65 external_reference_table_ = ExternalReferenceTable::instance(isolate); | |
66 CHECK_EQ(magic_number_, | |
67 SerializedData::ComputeMagicNumber(external_reference_table_)); | |
68 } | |
69 | |
70 void Deserializer::Deserialize(Isolate* isolate) { | |
71 Initialize(isolate); | |
72 if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context"); | |
73 // No active threads. | |
74 DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); | |
75 // No active handles. | |
76 DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); | |
77 | |
78 { | |
79 DisallowHeapAllocation no_gc; | |
80 isolate_->heap()->IterateSmiRoots(this); | |
81 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | |
82 isolate_->heap()->RepairFreeListsAfterDeserialization(); | |
83 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); | |
84 DeserializeDeferredObjects(); | |
85 FlushICacheForNewIsolate(); | |
86 } | |
87 | |
88 isolate_->heap()->set_native_contexts_list( | |
89 isolate_->heap()->undefined_value()); | |
90 // The allocation site list is build during root iteration, but if no sites | |
91 // were encountered then it needs to be initialized to undefined. | |
92 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { | |
93 isolate_->heap()->set_allocation_sites_list( | |
94 isolate_->heap()->undefined_value()); | |
95 } | |
96 | |
97 // Update data pointers to the external strings containing natives sources. | |
98 Natives::UpdateSourceCache(isolate_->heap()); | |
99 ExtraNatives::UpdateSourceCache(isolate_->heap()); | |
100 | |
101 // Issue code events for newly deserialized code objects. | |
102 LOG_CODE_EVENT(isolate_, LogCodeObjects()); | |
103 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); | |
104 } | |
105 | |
106 MaybeHandle<Object> Deserializer::DeserializePartial( | |
107 Isolate* isolate, Handle<JSGlobalProxy> global_proxy) { | |
108 Initialize(isolate); | |
109 if (!ReserveSpace()) { | |
110 V8::FatalProcessOutOfMemory("deserialize context"); | |
111 return MaybeHandle<Object>(); | |
112 } | |
113 | |
114 Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1); | |
115 attached_objects[kGlobalProxyReference] = global_proxy; | |
116 SetAttachedObjects(attached_objects); | |
117 | |
118 DisallowHeapAllocation no_gc; | |
119 // Keep track of the code space start and end pointers in case new | |
120 // code objects were unserialized | |
121 OldSpace* code_space = isolate_->heap()->code_space(); | |
122 Address start_address = code_space->top(); | |
123 Object* root; | |
124 VisitPointer(&root); | |
125 DeserializeDeferredObjects(); | |
126 | |
127 // There's no code deserialized here. If this assert fires then that's | |
128 // changed and logging should be added to notify the profiler et al of the | |
129 // new code, which also has to be flushed from instruction cache. | |
130 CHECK_EQ(start_address, code_space->top()); | |
131 return Handle<Object>(root, isolate); | |
132 } | |
133 | |
134 MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode( | |
135 Isolate* isolate) { | |
136 Initialize(isolate); | |
137 if (!ReserveSpace()) { | |
138 return Handle<SharedFunctionInfo>(); | |
139 } else { | |
140 deserializing_user_code_ = true; | |
141 HandleScope scope(isolate); | |
142 Handle<SharedFunctionInfo> result; | |
143 { | |
144 DisallowHeapAllocation no_gc; | |
145 Object* root; | |
146 VisitPointer(&root); | |
147 DeserializeDeferredObjects(); | |
148 FlushICacheForNewCodeObjects(); | |
149 result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root)); | |
150 } | |
151 CommitPostProcessedObjects(isolate); | |
152 return scope.CloseAndEscape(result); | |
153 } | |
154 } | |
155 | |
156 Deserializer::~Deserializer() { | |
157 // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. | |
158 // DCHECK(source_.AtEOF()); | |
159 attached_objects_.Dispose(); | |
160 } | |
161 | |
162 // This is called on the roots. It is the driver of the deserialization | |
163 // process. It is also called on the body of each function. | |
164 void Deserializer::VisitPointers(Object** start, Object** end) { | |
165 // The space must be new space. Any other space would cause ReadChunk to try | |
166 // to update the remembered using NULL as the address. | |
167 ReadData(start, end, NEW_SPACE, NULL); | |
168 } | |
169 | |
170 void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) { | |
171 static const byte expected = kSynchronize; | |
172 CHECK_EQ(expected, source_.Get()); | |
173 } | |
174 | |
175 void Deserializer::DeserializeDeferredObjects() { | |
176 for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) { | |
177 switch (code) { | |
178 case kAlignmentPrefix: | |
179 case kAlignmentPrefix + 1: | |
180 case kAlignmentPrefix + 2: | |
181 SetAlignment(code); | |
182 break; | |
183 default: { | |
184 int space = code & kSpaceMask; | |
185 DCHECK(space <= kNumberOfSpaces); | |
186 DCHECK(code - space == kNewObject); | |
187 HeapObject* object = GetBackReferencedObject(space); | |
188 int size = source_.GetInt() << kPointerSizeLog2; | |
189 Address obj_address = object->address(); | |
190 Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize); | |
191 Object** end = reinterpret_cast<Object**>(obj_address + size); | |
192 bool filled = ReadData(start, end, space, obj_address); | |
193 CHECK(filled); | |
194 DCHECK(CanBeDeferred(object)); | |
195 PostProcessNewObject(object, space); | |
196 } | |
197 } | |
198 } | |
199 } | |
200 | |
201 // Used to insert a deserialized internalized string into the string table. | |
202 class StringTableInsertionKey : public HashTableKey { | |
203 public: | |
204 explicit StringTableInsertionKey(String* string) | |
205 : string_(string), hash_(HashForObject(string)) { | |
206 DCHECK(string->IsInternalizedString()); | |
207 } | |
208 | |
209 bool IsMatch(Object* string) override { | |
210 // We know that all entries in a hash table had their hash keys created. | |
211 // Use that knowledge to have fast failure. | |
212 if (hash_ != HashForObject(string)) return false; | |
213 // We want to compare the content of two internalized strings here. | |
214 return string_->SlowEquals(String::cast(string)); | |
215 } | |
216 | |
217 uint32_t Hash() override { return hash_; } | |
218 | |
219 uint32_t HashForObject(Object* key) override { | |
220 return String::cast(key)->Hash(); | |
221 } | |
222 | |
223 MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override { | |
224 return handle(string_, isolate); | |
225 } | |
226 | |
227 private: | |
228 String* string_; | |
229 uint32_t hash_; | |
230 DisallowHeapAllocation no_gc; | |
231 }; | |
232 | |
233 HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) { | |
234 if (deserializing_user_code()) { | |
235 if (obj->IsString()) { | |
236 String* string = String::cast(obj); | |
237 // Uninitialize hash field as the hash seed may have changed. | |
238 string->set_hash_field(String::kEmptyHashField); | |
239 if (string->IsInternalizedString()) { | |
240 // Canonicalize the internalized string. If it already exists in the | |
241 // string table, set it to forward to the existing one. | |
242 StringTableInsertionKey key(string); | |
243 String* canonical = StringTable::LookupKeyIfExists(isolate_, &key); | |
244 if (canonical == NULL) { | |
245 new_internalized_strings_.Add(handle(string)); | |
246 return string; | |
247 } else { | |
248 string->SetForwardedInternalizedString(canonical); | |
249 return canonical; | |
250 } | |
251 } | |
252 } else if (obj->IsScript()) { | |
253 new_scripts_.Add(handle(Script::cast(obj))); | |
254 } else { | |
255 DCHECK(CanBeDeferred(obj)); | |
256 } | |
257 } | |
258 if (obj->IsAllocationSite()) { | |
259 DCHECK(obj->IsAllocationSite()); | |
260 // Allocation sites are present in the snapshot, and must be linked into | |
261 // a list at deserialization time. | |
262 AllocationSite* site = AllocationSite::cast(obj); | |
263 // TODO(mvstanton): consider treating the heap()->allocation_sites_list() | |
264 // as a (weak) root. If this root is relocated correctly, this becomes | |
265 // unnecessary. | |
266 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { | |
267 site->set_weak_next(isolate_->heap()->undefined_value()); | |
268 } else { | |
269 site->set_weak_next(isolate_->heap()->allocation_sites_list()); | |
270 } | |
271 isolate_->heap()->set_allocation_sites_list(site); | |
272 } else if (obj->IsCode()) { | |
273 // We flush all code pages after deserializing the startup snapshot. In that | |
274 // case, we only need to remember code objects in the large object space. | |
275 // When deserializing user code, remember each individual code object. | |
276 if (deserializing_user_code() || space == LO_SPACE) { | |
277 new_code_objects_.Add(Code::cast(obj)); | |
278 } | |
279 } | |
280 // Check alignment. | |
281 DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment())); | |
282 return obj; | |
283 } | |
284 | |
285 void Deserializer::CommitPostProcessedObjects(Isolate* isolate) { | |
286 StringTable::EnsureCapacityForDeserialization( | |
287 isolate, new_internalized_strings_.length()); | |
288 for (Handle<String> string : new_internalized_strings_) { | |
289 StringTableInsertionKey key(*string); | |
290 DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key)); | |
291 StringTable::LookupKey(isolate, &key); | |
292 } | |
293 | |
294 Heap* heap = isolate->heap(); | |
295 Factory* factory = isolate->factory(); | |
296 for (Handle<Script> script : new_scripts_) { | |
297 // Assign a new script id to avoid collision. | |
298 script->set_id(isolate_->heap()->NextScriptId()); | |
299 // Add script to list. | |
300 Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script); | |
301 heap->SetRootScriptList(*list); | |
302 } | |
303 } | |
304 | |
305 HeapObject* Deserializer::GetBackReferencedObject(int space) { | |
306 HeapObject* obj; | |
307 BackReference back_reference(source_.GetInt()); | |
308 if (space == LO_SPACE) { | |
309 CHECK(back_reference.chunk_index() == 0); | |
310 uint32_t index = back_reference.large_object_index(); | |
311 obj = deserialized_large_objects_[index]; | |
312 } else { | |
313 DCHECK(space < kNumberOfPreallocatedSpaces); | |
314 uint32_t chunk_index = back_reference.chunk_index(); | |
315 DCHECK_LE(chunk_index, current_chunk_[space]); | |
316 uint32_t chunk_offset = back_reference.chunk_offset(); | |
317 Address address = reservations_[space][chunk_index].start + chunk_offset; | |
318 if (next_alignment_ != kWordAligned) { | |
319 int padding = Heap::GetFillToAlign(address, next_alignment_); | |
320 next_alignment_ = kWordAligned; | |
321 DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller()); | |
322 address += padding; | |
323 } | |
324 obj = HeapObject::FromAddress(address); | |
325 } | |
326 if (deserializing_user_code() && obj->IsInternalizedString()) { | |
327 obj = String::cast(obj)->GetForwardedInternalizedString(); | |
328 } | |
329 hot_objects_.Add(obj); | |
330 return obj; | |
331 } | |
332 | |
333 // This routine writes the new object into the pointer provided and then | |
334 // returns true if the new object was in young space and false otherwise. | |
335 // The reason for this strange interface is that otherwise the object is | |
336 // written very late, which means the FreeSpace map is not set up by the | |
337 // time we need to use it to mark the space at the end of a page free. | |
338 void Deserializer::ReadObject(int space_number, Object** write_back) { | |
339 Address address; | |
340 HeapObject* obj; | |
341 int size = source_.GetInt() << kObjectAlignmentBits; | |
342 | |
343 if (next_alignment_ != kWordAligned) { | |
344 int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_); | |
345 address = Allocate(space_number, reserved); | |
346 obj = HeapObject::FromAddress(address); | |
347 // If one of the following assertions fails, then we are deserializing an | |
348 // aligned object when the filler maps have not been deserialized yet. | |
349 // We require filler maps as padding to align the object. | |
350 Heap* heap = isolate_->heap(); | |
351 DCHECK(heap->free_space_map()->IsMap()); | |
352 DCHECK(heap->one_pointer_filler_map()->IsMap()); | |
353 DCHECK(heap->two_pointer_filler_map()->IsMap()); | |
354 obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_); | |
355 address = obj->address(); | |
356 next_alignment_ = kWordAligned; | |
357 } else { | |
358 address = Allocate(space_number, size); | |
359 obj = HeapObject::FromAddress(address); | |
360 } | |
361 | |
362 isolate_->heap()->OnAllocationEvent(obj, size); | |
363 Object** current = reinterpret_cast<Object**>(address); | |
364 Object** limit = current + (size >> kPointerSizeLog2); | |
365 if (FLAG_log_snapshot_positions) { | |
366 LOG(isolate_, SnapshotPositionEvent(address, source_.position())); | |
367 } | |
368 | |
369 if (ReadData(current, limit, space_number, address)) { | |
370 // Only post process if object content has not been deferred. | |
371 obj = PostProcessNewObject(obj, space_number); | |
372 } | |
373 | |
374 Object* write_back_obj = obj; | |
375 UnalignedCopy(write_back, &write_back_obj); | |
376 #ifdef DEBUG | |
377 if (obj->IsCode()) { | |
378 DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); | |
379 } else { | |
380 DCHECK(space_number != CODE_SPACE); | |
381 } | |
382 #endif // DEBUG | |
383 } | |
384 | |
385 // We know the space requirements before deserialization and can | |
386 // pre-allocate that reserved space. During deserialization, all we need | |
387 // to do is to bump up the pointer for each space in the reserved | |
388 // space. This is also used for fixing back references. | |
389 // We may have to split up the pre-allocation into several chunks | |
390 // because it would not fit onto a single page. We do not have to keep | |
391 // track of when to move to the next chunk. An opcode will signal this. | |
392 // Since multiple large objects cannot be folded into one large object | |
393 // space allocation, we have to do an actual allocation when deserializing | |
394 // each large object. Instead of tracking offset for back references, we | |
395 // reference large objects by index. | |
396 Address Deserializer::Allocate(int space_index, int size) { | |
397 if (space_index == LO_SPACE) { | |
398 AlwaysAllocateScope scope(isolate_); | |
399 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); | |
400 Executability exec = static_cast<Executability>(source_.Get()); | |
401 AllocationResult result = lo_space->AllocateRaw(size, exec); | |
402 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); | |
403 deserialized_large_objects_.Add(obj); | |
404 return obj->address(); | |
405 } else { | |
406 DCHECK(space_index < kNumberOfPreallocatedSpaces); | |
407 Address address = high_water_[space_index]; | |
408 DCHECK_NOT_NULL(address); | |
409 high_water_[space_index] += size; | |
410 #ifdef DEBUG | |
411 // Assert that the current reserved chunk is still big enough. | |
412 const Heap::Reservation& reservation = reservations_[space_index]; | |
413 int chunk_index = current_chunk_[space_index]; | |
414 CHECK_LE(high_water_[space_index], reservation[chunk_index].end); | |
415 #endif | |
416 return address; | |
417 } | |
418 } | |
419 | |
420 Object** Deserializer::CopyInNativesSource(Vector<const char> source_vector, | |
421 Object** current) { | |
422 DCHECK(!isolate_->heap()->deserialization_complete()); | |
423 NativesExternalStringResource* resource = new NativesExternalStringResource( | |
424 source_vector.start(), source_vector.length()); | |
425 Object* resource_obj = reinterpret_cast<Object*>(resource); | |
426 UnalignedCopy(current++, &resource_obj); | |
427 return current; | |
428 } | |
429 | |
430 bool Deserializer::ReadData(Object** current, Object** limit, int source_space, | |
431 Address current_object_address) { | |
432 Isolate* const isolate = isolate_; | |
433 // Write barrier support costs around 1% in startup time. In fact there | |
434 // are no new space objects in current boot snapshots, so it's not needed, | |
435 // but that may change. | |
436 bool write_barrier_needed = | |
437 (current_object_address != NULL && source_space != NEW_SPACE && | |
438 source_space != CODE_SPACE); | |
439 while (current < limit) { | |
440 byte data = source_.Get(); | |
441 switch (data) { | |
442 #define CASE_STATEMENT(where, how, within, space_number) \ | |
443 case where + how + within + space_number: \ | |
444 STATIC_ASSERT((where & ~kWhereMask) == 0); \ | |
445 STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \ | |
446 STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \ | |
447 STATIC_ASSERT((space_number & ~kSpaceMask) == 0); | |
448 | |
449 #define CASE_BODY(where, how, within, space_number_if_any) \ | |
450 { \ | |
451 bool emit_write_barrier = false; \ | |
452 bool current_was_incremented = false; \ | |
453 int space_number = space_number_if_any == kAnyOldSpace \ | |
454 ? (data & kSpaceMask) \ | |
455 : space_number_if_any; \ | |
456 if (where == kNewObject && how == kPlain && within == kStartOfObject) { \ | |
457 ReadObject(space_number, current); \ | |
458 emit_write_barrier = (space_number == NEW_SPACE); \ | |
459 } else { \ | |
460 Object* new_object = NULL; /* May not be a real Object pointer. */ \ | |
461 if (where == kNewObject) { \ | |
462 ReadObject(space_number, &new_object); \ | |
463 } else if (where == kBackref) { \ | |
464 emit_write_barrier = (space_number == NEW_SPACE); \ | |
465 new_object = GetBackReferencedObject(data & kSpaceMask); \ | |
466 } else if (where == kBackrefWithSkip) { \ | |
467 int skip = source_.GetInt(); \ | |
468 current = reinterpret_cast<Object**>( \ | |
469 reinterpret_cast<Address>(current) + skip); \ | |
470 emit_write_barrier = (space_number == NEW_SPACE); \ | |
471 new_object = GetBackReferencedObject(data & kSpaceMask); \ | |
472 } else if (where == kRootArray) { \ | |
473 int id = source_.GetInt(); \ | |
474 Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \ | |
475 new_object = isolate->heap()->root(root_index); \ | |
476 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
477 } else if (where == kPartialSnapshotCache) { \ | |
478 int cache_index = source_.GetInt(); \ | |
479 new_object = isolate->partial_snapshot_cache()->at(cache_index); \ | |
480 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
481 } else if (where == kExternalReference) { \ | |
482 int skip = source_.GetInt(); \ | |
483 current = reinterpret_cast<Object**>( \ | |
484 reinterpret_cast<Address>(current) + skip); \ | |
485 int reference_id = source_.GetInt(); \ | |
486 Address address = external_reference_table_->address(reference_id); \ | |
487 new_object = reinterpret_cast<Object*>(address); \ | |
488 } else if (where == kAttachedReference) { \ | |
489 int index = source_.GetInt(); \ | |
490 DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \ | |
491 new_object = *attached_objects_[index]; \ | |
492 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
493 } else { \ | |
494 DCHECK(where == kBuiltin); \ | |
495 DCHECK(deserializing_user_code()); \ | |
496 int builtin_id = source_.GetInt(); \ | |
497 DCHECK_LE(0, builtin_id); \ | |
498 DCHECK_LT(builtin_id, Builtins::builtin_count); \ | |
499 Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ | |
500 new_object = isolate->builtins()->builtin(name); \ | |
501 emit_write_barrier = false; \ | |
502 } \ | |
503 if (within == kInnerPointer) { \ | |
504 if (space_number != CODE_SPACE || new_object->IsCode()) { \ | |
505 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | |
506 new_object = \ | |
507 reinterpret_cast<Object*>(new_code_object->instruction_start()); \ | |
508 } else { \ | |
509 DCHECK(space_number == CODE_SPACE); \ | |
510 Cell* cell = Cell::cast(new_object); \ | |
511 new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \ | |
512 } \ | |
513 } \ | |
514 if (how == kFromCode) { \ | |
515 Address location_of_branch_data = reinterpret_cast<Address>(current); \ | |
516 Assembler::deserialization_set_special_target_at( \ | |
517 isolate, location_of_branch_data, \ | |
518 Code::cast(HeapObject::FromAddress(current_object_address)), \ | |
519 reinterpret_cast<Address>(new_object)); \ | |
520 location_of_branch_data += Assembler::kSpecialTargetSize; \ | |
521 current = reinterpret_cast<Object**>(location_of_branch_data); \ | |
522 current_was_incremented = true; \ | |
523 } else { \ | |
524 UnalignedCopy(current, &new_object); \ | |
525 } \ | |
526 } \ | |
527 if (emit_write_barrier && write_barrier_needed) { \ | |
528 Address current_address = reinterpret_cast<Address>(current); \ | |
529 SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \ | |
530 isolate->heap()->RecordWrite( \ | |
531 HeapObject::FromAddress(current_object_address), \ | |
532 static_cast<int>(current_address - current_object_address), \ | |
533 *reinterpret_cast<Object**>(current_address)); \ | |
534 } \ | |
535 if (!current_was_incremented) { \ | |
536 current++; \ | |
537 } \ | |
538 break; \ | |
539 } | |
540 | |
541 // This generates a case and a body for the new space (which has to do extra | |
542 // write barrier handling) and handles the other spaces with fall-through cases | |
543 // and one body. | |
544 #define ALL_SPACES(where, how, within) \ | |
545 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | |
546 CASE_BODY(where, how, within, NEW_SPACE) \ | |
547 CASE_STATEMENT(where, how, within, OLD_SPACE) \ | |
548 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
549 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | |
550 CASE_STATEMENT(where, how, within, LO_SPACE) \ | |
551 CASE_BODY(where, how, within, kAnyOldSpace) | |
552 | |
553 #define FOUR_CASES(byte_code) \ | |
554 case byte_code: \ | |
555 case byte_code + 1: \ | |
556 case byte_code + 2: \ | |
557 case byte_code + 3: | |
558 | |
559 #define SIXTEEN_CASES(byte_code) \ | |
560 FOUR_CASES(byte_code) \ | |
561 FOUR_CASES(byte_code + 4) \ | |
562 FOUR_CASES(byte_code + 8) \ | |
563 FOUR_CASES(byte_code + 12) | |
564 | |
565 #define SINGLE_CASE(where, how, within, space) \ | |
566 CASE_STATEMENT(where, how, within, space) \ | |
567 CASE_BODY(where, how, within, space) | |
568 | |
569 // Deserialize a new object and write a pointer to it to the current | |
570 // object. | |
571 ALL_SPACES(kNewObject, kPlain, kStartOfObject) | |
572 // Support for direct instruction pointers in functions. It's an inner | |
573 // pointer because it points at the entry point, not at the start of the | |
574 // code object. | |
575 SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE) | |
576 // Deserialize a new code object and write a pointer to its first | |
577 // instruction to the current code object. | |
578 ALL_SPACES(kNewObject, kFromCode, kInnerPointer) | |
579 // Find a recently deserialized object using its offset from the current | |
580 // allocation point and write a pointer to it to the current object. | |
581 ALL_SPACES(kBackref, kPlain, kStartOfObject) | |
582 ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) | |
583 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ | |
584 defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL | |
585 // Deserialize a new object from pointer found in code and write | |
586 // a pointer to it to the current object. Required only for MIPS, PPC or | |
587 // ARM with embedded constant pool, and omitted on the other architectures | |
588 // because it is fully unrolled and would cause bloat. | |
589 ALL_SPACES(kNewObject, kFromCode, kStartOfObject) | |
590 // Find a recently deserialized code object using its offset from the | |
591 // current allocation point and write a pointer to it to the current | |
592 // object. Required only for MIPS, PPC or ARM with embedded constant pool. | |
593 ALL_SPACES(kBackref, kFromCode, kStartOfObject) | |
594 ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) | |
595 #endif | |
596 // Find a recently deserialized code object using its offset from the | |
597 // current allocation point and write a pointer to its first instruction | |
598 // to the current code object or the instruction pointer in a function | |
599 // object. | |
600 ALL_SPACES(kBackref, kFromCode, kInnerPointer) | |
601 ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer) | |
602 ALL_SPACES(kBackref, kPlain, kInnerPointer) | |
603 ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer) | |
604 // Find an object in the roots array and write a pointer to it to the | |
605 // current object. | |
606 SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0) | |
607 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ | |
608 defined(V8_TARGET_ARCH_PPC) || V8_EMBEDDED_CONSTANT_POOL | |
609 // Find an object in the roots array and write a pointer to it to in code. | |
610 SINGLE_CASE(kRootArray, kFromCode, kStartOfObject, 0) | |
611 #endif | |
612 // Find an object in the partial snapshots cache and write a pointer to it | |
613 // to the current object. | |
614 SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | |
615 // Find an code entry in the partial snapshots cache and | |
616 // write a pointer to it to the current object. | |
617 SINGLE_CASE(kPartialSnapshotCache, kPlain, kInnerPointer, 0) | |
618 // Find an external reference and write a pointer to it to the current | |
619 // object. | |
620 SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0) | |
621 // Find an external reference and write a pointer to it in the current | |
622 // code object. | |
623 SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0) | |
624 // Find an object in the attached references and write a pointer to it to | |
625 // the current object. | |
626 SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0) | |
627 SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0) | |
628 SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0) | |
629 // Find a builtin and write a pointer to it to the current object. | |
630 SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0) | |
631 SINGLE_CASE(kBuiltin, kPlain, kInnerPointer, 0) | |
632 SINGLE_CASE(kBuiltin, kFromCode, kInnerPointer, 0) | |
633 | |
634 #undef CASE_STATEMENT | |
635 #undef CASE_BODY | |
636 #undef ALL_SPACES | |
637 | |
638 case kSkip: { | |
639 int size = source_.GetInt(); | |
640 current = reinterpret_cast<Object**>( | |
641 reinterpret_cast<intptr_t>(current) + size); | |
642 break; | |
643 } | |
644 | |
645 case kInternalReferenceEncoded: | |
646 case kInternalReference: { | |
647 // Internal reference address is not encoded via skip, but by offset | |
648 // from code entry. | |
649 int pc_offset = source_.GetInt(); | |
650 int target_offset = source_.GetInt(); | |
651 Code* code = | |
652 Code::cast(HeapObject::FromAddress(current_object_address)); | |
653 DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size()); | |
654 DCHECK(0 <= target_offset && target_offset <= code->instruction_size()); | |
655 Address pc = code->entry() + pc_offset; | |
656 Address target = code->entry() + target_offset; | |
657 Assembler::deserialization_set_target_internal_reference_at( | |
658 isolate, pc, target, data == kInternalReference | |
659 ? RelocInfo::INTERNAL_REFERENCE | |
660 : RelocInfo::INTERNAL_REFERENCE_ENCODED); | |
661 break; | |
662 } | |
663 | |
664 case kNop: | |
665 break; | |
666 | |
667 case kNextChunk: { | |
668 int space = source_.Get(); | |
669 DCHECK(space < kNumberOfPreallocatedSpaces); | |
670 int chunk_index = current_chunk_[space]; | |
671 const Heap::Reservation& reservation = reservations_[space]; | |
672 // Make sure the current chunk is indeed exhausted. | |
673 CHECK_EQ(reservation[chunk_index].end, high_water_[space]); | |
674 // Move to next reserved chunk. | |
675 chunk_index = ++current_chunk_[space]; | |
676 CHECK_LT(chunk_index, reservation.length()); | |
677 high_water_[space] = reservation[chunk_index].start; | |
678 break; | |
679 } | |
680 | |
681 case kDeferred: { | |
682 // Deferred can only occur right after the heap object header. | |
683 DCHECK(current == reinterpret_cast<Object**>(current_object_address + | |
684 kPointerSize)); | |
685 HeapObject* obj = HeapObject::FromAddress(current_object_address); | |
686 // If the deferred object is a map, its instance type may be used | |
687 // during deserialization. Initialize it with a temporary value. | |
688 if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE); | |
689 current = limit; | |
690 return false; | |
691 } | |
692 | |
693 case kSynchronize: | |
694 // If we get here then that indicates that you have a mismatch between | |
695 // the number of GC roots when serializing and deserializing. | |
696 CHECK(false); | |
697 break; | |
698 | |
699 case kNativesStringResource: | |
700 current = CopyInNativesSource(Natives::GetScriptSource(source_.Get()), | |
701 current); | |
702 break; | |
703 | |
704 case kExtraNativesStringResource: | |
705 current = CopyInNativesSource( | |
706 ExtraNatives::GetScriptSource(source_.Get()), current); | |
707 break; | |
708 | |
709 // Deserialize raw data of variable length. | |
710 case kVariableRawData: { | |
711 int size_in_bytes = source_.GetInt(); | |
712 byte* raw_data_out = reinterpret_cast<byte*>(current); | |
713 source_.CopyRaw(raw_data_out, size_in_bytes); | |
714 break; | |
715 } | |
716 | |
717 case kVariableRepeat: { | |
718 int repeats = source_.GetInt(); | |
719 Object* object = current[-1]; | |
720 DCHECK(!isolate->heap()->InNewSpace(object)); | |
721 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); | |
722 break; | |
723 } | |
724 | |
725 case kAlignmentPrefix: | |
726 case kAlignmentPrefix + 1: | |
727 case kAlignmentPrefix + 2: | |
728 SetAlignment(data); | |
729 break; | |
730 | |
731 STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots); | |
732 STATIC_ASSERT(kNumberOfRootArrayConstants == 32); | |
733 SIXTEEN_CASES(kRootArrayConstantsWithSkip) | |
734 SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) { | |
735 int skip = source_.GetInt(); | |
736 current = reinterpret_cast<Object**>( | |
737 reinterpret_cast<intptr_t>(current) + skip); | |
738 // Fall through. | |
739 } | |
740 | |
741 SIXTEEN_CASES(kRootArrayConstants) | |
742 SIXTEEN_CASES(kRootArrayConstants + 16) { | |
743 int id = data & kRootArrayConstantsMask; | |
744 Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); | |
745 Object* object = isolate->heap()->root(root_index); | |
746 DCHECK(!isolate->heap()->InNewSpace(object)); | |
747 UnalignedCopy(current++, &object); | |
748 break; | |
749 } | |
750 | |
751 STATIC_ASSERT(kNumberOfHotObjects == 8); | |
752 FOUR_CASES(kHotObjectWithSkip) | |
753 FOUR_CASES(kHotObjectWithSkip + 4) { | |
754 int skip = source_.GetInt(); | |
755 current = reinterpret_cast<Object**>( | |
756 reinterpret_cast<Address>(current) + skip); | |
757 // Fall through. | |
758 } | |
759 | |
760 FOUR_CASES(kHotObject) | |
761 FOUR_CASES(kHotObject + 4) { | |
762 int index = data & kHotObjectMask; | |
763 Object* hot_object = hot_objects_.Get(index); | |
764 UnalignedCopy(current, &hot_object); | |
765 if (write_barrier_needed) { | |
766 Address current_address = reinterpret_cast<Address>(current); | |
767 SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); | |
768 isolate->heap()->RecordWrite( | |
769 HeapObject::FromAddress(current_object_address), | |
770 static_cast<int>(current_address - current_object_address), | |
771 hot_object); | |
772 } | |
773 current++; | |
774 break; | |
775 } | |
776 | |
777 // Deserialize raw data of fixed length from 1 to 32 words. | |
778 STATIC_ASSERT(kNumberOfFixedRawData == 32); | |
779 SIXTEEN_CASES(kFixedRawData) | |
780 SIXTEEN_CASES(kFixedRawData + 16) { | |
781 byte* raw_data_out = reinterpret_cast<byte*>(current); | |
782 int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2; | |
783 source_.CopyRaw(raw_data_out, size_in_bytes); | |
784 current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes); | |
785 break; | |
786 } | |
787 | |
788 STATIC_ASSERT(kNumberOfFixedRepeat == 16); | |
789 SIXTEEN_CASES(kFixedRepeat) { | |
790 int repeats = data - kFixedRepeatStart; | |
791 Object* object; | |
792 UnalignedCopy(&object, current - 1); | |
793 DCHECK(!isolate->heap()->InNewSpace(object)); | |
794 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); | |
795 break; | |
796 } | |
797 | |
798 #undef SIXTEEN_CASES | |
799 #undef FOUR_CASES | |
800 #undef SINGLE_CASE | |
801 | |
802 default: | |
803 CHECK(false); | |
804 } | |
805 } | |
806 CHECK_EQ(limit, current); | |
807 return true; | |
808 } | |
809 } // namespace internal | |
810 } // namespace v8 | |
OLD | NEW |