Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(42)

Side by Side Diff: src/snapshot/serializer.cc

Issue 1751863002: [serializer] split up src/snapshot/serialize.* (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: fix Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/snapshot/serializer.h"
6
7 #include "src/macro-assembler.h"
8 #include "src/snapshot/natives.h"
9
10 namespace v8 {
11 namespace internal {
12
13 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
14 : isolate_(isolate),
15 sink_(sink),
16 external_reference_encoder_(isolate),
17 root_index_map_(isolate),
18 recursion_depth_(0),
19 code_address_map_(NULL),
20 large_objects_total_size_(0),
21 seen_large_objects_index_(0) {
22 // The serializer is meant to be used only to generate initial heap images
23 // from a context in which there is only one isolate.
24 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
25 pending_chunk_[i] = 0;
26 max_chunk_size_[i] = static_cast<uint32_t>(
27 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
28 }
29
30 #ifdef OBJECT_PRINT
31 if (FLAG_serialization_statistics) {
32 instance_type_count_ = NewArray<int>(kInstanceTypes);
33 instance_type_size_ = NewArray<size_t>(kInstanceTypes);
34 for (int i = 0; i < kInstanceTypes; i++) {
35 instance_type_count_[i] = 0;
36 instance_type_size_[i] = 0;
37 }
38 } else {
39 instance_type_count_ = NULL;
40 instance_type_size_ = NULL;
41 }
42 #endif // OBJECT_PRINT
43 }
44
45 Serializer::~Serializer() {
46 if (code_address_map_ != NULL) delete code_address_map_;
47 #ifdef OBJECT_PRINT
48 if (instance_type_count_ != NULL) {
49 DeleteArray(instance_type_count_);
50 DeleteArray(instance_type_size_);
51 }
52 #endif // OBJECT_PRINT
53 }
54
55 #ifdef OBJECT_PRINT
56 void Serializer::CountInstanceType(Map* map, int size) {
57 int instance_type = map->instance_type();
58 instance_type_count_[instance_type]++;
59 instance_type_size_[instance_type] += size;
60 }
61 #endif // OBJECT_PRINT
62
63 void Serializer::OutputStatistics(const char* name) {
64 if (!FLAG_serialization_statistics) return;
65 PrintF("%s:\n", name);
66 PrintF(" Spaces (bytes):\n");
67 for (int space = 0; space < kNumberOfSpaces; space++) {
68 PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
69 }
70 PrintF("\n");
71 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
72 size_t s = pending_chunk_[space];
73 for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
74 PrintF("%16" V8_PTR_PREFIX "d", s);
75 }
76 PrintF("%16d\n", large_objects_total_size_);
77 #ifdef OBJECT_PRINT
78 PrintF(" Instance types (count and bytes):\n");
79 #define PRINT_INSTANCE_TYPE(Name) \
80 if (instance_type_count_[Name]) { \
81 PrintF("%10d %10" V8_PTR_PREFIX "d %s\n", instance_type_count_[Name], \
82 instance_type_size_[Name], #Name); \
83 }
84 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
85 #undef PRINT_INSTANCE_TYPE
86 PrintF("\n");
87 #endif // OBJECT_PRINT
88 }
89
90 void Serializer::SerializeDeferredObjects() {
91 while (deferred_objects_.length() > 0) {
92 HeapObject* obj = deferred_objects_.RemoveLast();
93 ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
94 obj_serializer.SerializeDeferred();
95 }
96 sink_->Put(kSynchronize, "Finished with deferred objects");
97 }
98
99 bool Serializer::ShouldBeSkipped(Object** current) {
100 Object** roots = isolate()->heap()->roots_array_start();
101 return current == &roots[Heap::kStoreBufferTopRootIndex] ||
102 current == &roots[Heap::kStackLimitRootIndex] ||
103 current == &roots[Heap::kRealStackLimitRootIndex];
104 }
105
106 void Serializer::VisitPointers(Object** start, Object** end) {
107 for (Object** current = start; current < end; current++) {
108 if ((*current)->IsSmi()) {
109 sink_->Put(kOnePointerRawData, "Smi");
110 for (int i = 0; i < kPointerSize; i++) {
111 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
112 }
113 } else {
114 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
115 }
116 }
117 }
118
119 void Serializer::EncodeReservations(
120 List<SerializedData::Reservation>* out) const {
121 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
122 for (int j = 0; j < completed_chunks_[i].length(); j++) {
123 out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
124 }
125
126 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
127 out->Add(SerializedData::Reservation(pending_chunk_[i]));
128 }
129 out->last().mark_as_last();
130 }
131
132 out->Add(SerializedData::Reservation(large_objects_total_size_));
133 out->last().mark_as_last();
134 }
135
136 #ifdef DEBUG
137 bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
138 DCHECK(reference.is_valid());
139 DCHECK(!reference.is_source());
140 DCHECK(!reference.is_global_proxy());
141 AllocationSpace space = reference.space();
142 int chunk_index = reference.chunk_index();
143 if (space == LO_SPACE) {
144 return chunk_index == 0 &&
145 reference.large_object_index() < seen_large_objects_index_;
146 } else if (chunk_index == completed_chunks_[space].length()) {
147 return reference.chunk_offset() < pending_chunk_[space];
148 } else {
149 return chunk_index < completed_chunks_[space].length() &&
150 reference.chunk_offset() < completed_chunks_[space][chunk_index];
151 }
152 }
153 #endif // DEBUG
154
155 bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
156 WhereToPoint where_to_point, int skip) {
157 if (how_to_code == kPlain && where_to_point == kStartOfObject) {
158 // Encode a reference to a hot object by its index in the working set.
159 int index = hot_objects_.Find(obj);
160 if (index != HotObjectsList::kNotFound) {
161 DCHECK(index >= 0 && index < kNumberOfHotObjects);
162 if (FLAG_trace_serializer) {
163 PrintF(" Encoding hot object %d:", index);
164 obj->ShortPrint();
165 PrintF("\n");
166 }
167 if (skip != 0) {
168 sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
169 sink_->PutInt(skip, "HotObjectSkipDistance");
170 } else {
171 sink_->Put(kHotObject + index, "HotObject");
172 }
173 return true;
174 }
175 }
176 BackReference back_reference = back_reference_map_.Lookup(obj);
177 if (back_reference.is_valid()) {
178 // Encode the location of an already deserialized object in order to write
179 // its location into a later object. We can encode the location as an
180 // offset fromthe start of the deserialized objects or as an offset
181 // backwards from thecurrent allocation pointer.
182 if (back_reference.is_source()) {
183 FlushSkip(skip);
184 if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
185 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
186 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
187 sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
188 } else if (back_reference.is_global_proxy()) {
189 FlushSkip(skip);
190 if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
191 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
192 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
193 sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
194 } else {
195 if (FLAG_trace_serializer) {
196 PrintF(" Encoding back reference to: ");
197 obj->ShortPrint();
198 PrintF("\n");
199 }
200
201 PutAlignmentPrefix(obj);
202 AllocationSpace space = back_reference.space();
203 if (skip == 0) {
204 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
205 } else {
206 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
207 "BackRefWithSkip");
208 sink_->PutInt(skip, "BackRefSkipDistance");
209 }
210 PutBackReference(obj, back_reference);
211 }
212 return true;
213 }
214 return false;
215 }
216
217 void Serializer::PutRoot(int root_index, HeapObject* object,
218 SerializerDeserializer::HowToCode how_to_code,
219 SerializerDeserializer::WhereToPoint where_to_point,
220 int skip) {
221 if (FLAG_trace_serializer) {
222 PrintF(" Encoding root %d:", root_index);
223 object->ShortPrint();
224 PrintF("\n");
225 }
226
227 if (how_to_code == kPlain && where_to_point == kStartOfObject &&
228 root_index < kNumberOfRootArrayConstants &&
229 !isolate()->heap()->InNewSpace(object)) {
230 if (skip == 0) {
231 sink_->Put(kRootArrayConstants + root_index, "RootConstant");
232 } else {
233 sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
234 sink_->PutInt(skip, "SkipInPutRoot");
235 }
236 } else {
237 FlushSkip(skip);
238 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
239 sink_->PutInt(root_index, "root_index");
240 }
241 }
242
243 void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
244 DCHECK(BackReferenceIsAlreadyAllocated(reference));
245 sink_->PutInt(reference.reference(), "BackRefValue");
246 hot_objects_.Add(object);
247 }
248
249 int Serializer::PutAlignmentPrefix(HeapObject* object) {
250 AllocationAlignment alignment = object->RequiredAlignment();
251 if (alignment != kWordAligned) {
252 DCHECK(1 <= alignment && alignment <= 3);
253 byte prefix = (kAlignmentPrefix - 1) + alignment;
254 sink_->Put(prefix, "Alignment");
255 return Heap::GetMaximumFillToAlign(alignment);
256 }
257 return 0;
258 }
259
260 BackReference Serializer::AllocateLargeObject(int size) {
261 // Large objects are allocated one-by-one when deserializing. We do not
262 // have to keep track of multiple chunks.
263 large_objects_total_size_ += size;
264 return BackReference::LargeObjectReference(seen_large_objects_index_++);
265 }
266
267 BackReference Serializer::Allocate(AllocationSpace space, int size) {
268 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
269 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
270 uint32_t new_chunk_size = pending_chunk_[space] + size;
271 if (new_chunk_size > max_chunk_size(space)) {
272 // The new chunk size would not fit onto a single page. Complete the
273 // current chunk and start a new one.
274 sink_->Put(kNextChunk, "NextChunk");
275 sink_->Put(space, "NextChunkSpace");
276 completed_chunks_[space].Add(pending_chunk_[space]);
277 DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
278 pending_chunk_[space] = 0;
279 new_chunk_size = size;
280 }
281 uint32_t offset = pending_chunk_[space];
282 pending_chunk_[space] = new_chunk_size;
283 return BackReference::Reference(space, completed_chunks_[space].length(),
284 offset);
285 }
286
287 void Serializer::Pad() {
288 // The non-branching GetInt will read up to 3 bytes too far, so we need
289 // to pad the snapshot to make sure we don't read over the end.
290 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
291 sink_->Put(kNop, "Padding");
292 }
293 // Pad up to pointer size for checksum.
294 while (!IsAligned(sink_->Position(), kPointerAlignment)) {
295 sink_->Put(kNop, "Padding");
296 }
297 }
298
299 void Serializer::InitializeCodeAddressMap() {
300 isolate_->InitializeLoggingAndCounters();
301 code_address_map_ = new CodeAddressMap(isolate_);
302 }
303
304 Code* Serializer::CopyCode(Code* code) {
305 code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
306 int size = code->CodeSize();
307 code_buffer_.AddAll(Vector<byte>(code->address(), size));
308 return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
309 }
310
311 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
312 int size, Map* map) {
313 if (serializer_->code_address_map_) {
314 const char* code_name =
315 serializer_->code_address_map_->Lookup(object_->address());
316 LOG(serializer_->isolate_,
317 CodeNameEvent(object_->address(), sink_->Position(), code_name));
318 LOG(serializer_->isolate_,
319 SnapshotPositionEvent(object_->address(), sink_->Position()));
320 }
321
322 BackReference back_reference;
323 if (space == LO_SPACE) {
324 sink_->Put(kNewObject + reference_representation_ + space,
325 "NewLargeObject");
326 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
327 if (object_->IsCode()) {
328 sink_->Put(EXECUTABLE, "executable large object");
329 } else {
330 sink_->Put(NOT_EXECUTABLE, "not executable large object");
331 }
332 back_reference = serializer_->AllocateLargeObject(size);
333 } else {
334 int fill = serializer_->PutAlignmentPrefix(object_);
335 back_reference = serializer_->Allocate(space, size + fill);
336 sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
337 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
338 }
339
340 #ifdef OBJECT_PRINT
341 if (FLAG_serialization_statistics) {
342 serializer_->CountInstanceType(map, size);
343 }
344 #endif // OBJECT_PRINT
345
346 // Mark this object as already serialized.
347 serializer_->back_reference_map()->Add(object_, back_reference);
348
349 // Serialize the map (first word of the object).
350 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
351 }
352
353 void Serializer::ObjectSerializer::SerializeExternalString() {
354 // Instead of serializing this as an external string, we serialize
355 // an imaginary sequential string with the same content.
356 Isolate* isolate = serializer_->isolate();
357 DCHECK(object_->IsExternalString());
358 DCHECK(object_->map() != isolate->heap()->native_source_string_map());
359 ExternalString* string = ExternalString::cast(object_);
360 int length = string->length();
361 Map* map;
362 int content_size;
363 int allocation_size;
364 const byte* resource;
365 // Find the map and size for the imaginary sequential string.
366 bool internalized = object_->IsInternalizedString();
367 if (object_->IsExternalOneByteString()) {
368 map = internalized ? isolate->heap()->one_byte_internalized_string_map()
369 : isolate->heap()->one_byte_string_map();
370 allocation_size = SeqOneByteString::SizeFor(length);
371 content_size = length * kCharSize;
372 resource = reinterpret_cast<const byte*>(
373 ExternalOneByteString::cast(string)->resource()->data());
374 } else {
375 map = internalized ? isolate->heap()->internalized_string_map()
376 : isolate->heap()->string_map();
377 allocation_size = SeqTwoByteString::SizeFor(length);
378 content_size = length * kShortSize;
379 resource = reinterpret_cast<const byte*>(
380 ExternalTwoByteString::cast(string)->resource()->data());
381 }
382
383 AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
384 ? LO_SPACE
385 : OLD_SPACE;
386 SerializePrologue(space, allocation_size, map);
387
388 // Output the rest of the imaginary string.
389 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
390
391 // Output raw data header. Do not bother with common raw length cases here.
392 sink_->Put(kVariableRawData, "RawDataForString");
393 sink_->PutInt(bytes_to_output, "length");
394
395 // Serialize string header (except for map).
396 Address string_start = string->address();
397 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
398 sink_->PutSection(string_start[i], "StringHeader");
399 }
400
401 // Serialize string content.
402 sink_->PutRaw(resource, content_size, "StringContent");
403
404 // Since the allocation size is rounded up to object alignment, there
405 // maybe left-over bytes that need to be padded.
406 int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
407 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
408 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
409
410 sink_->Put(kSkip, "SkipAfterString");
411 sink_->PutInt(bytes_to_output, "SkipDistance");
412 }
413
414 // Clear and later restore the next link in the weak cell or allocation site.
415 // TODO(all): replace this with proper iteration of weak slots in serializer.
416 class UnlinkWeakNextScope {
417 public:
418 explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
419 if (object->IsWeakCell()) {
420 object_ = object;
421 next_ = WeakCell::cast(object)->next();
422 WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
423 } else if (object->IsAllocationSite()) {
424 object_ = object;
425 next_ = AllocationSite::cast(object)->weak_next();
426 AllocationSite::cast(object)->set_weak_next(
427 object->GetHeap()->undefined_value());
428 }
429 }
430
431 ~UnlinkWeakNextScope() {
432 if (object_ != nullptr) {
433 if (object_->IsWeakCell()) {
434 WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
435 } else {
436 AllocationSite::cast(object_)->set_weak_next(next_,
437 UPDATE_WEAK_WRITE_BARRIER);
438 }
439 }
440 }
441
442 private:
443 HeapObject* object_;
444 Object* next_;
445 DisallowHeapAllocation no_gc_;
446 };
447
448 void Serializer::ObjectSerializer::Serialize() {
449 if (FLAG_trace_serializer) {
450 PrintF(" Encoding heap object: ");
451 object_->ShortPrint();
452 PrintF("\n");
453 }
454
455 // We cannot serialize typed array objects correctly.
456 DCHECK(!object_->IsJSTypedArray());
457
458 // We don't expect fillers.
459 DCHECK(!object_->IsFiller());
460
461 if (object_->IsScript()) {
462 // Clear cached line ends.
463 Object* undefined = serializer_->isolate()->heap()->undefined_value();
464 Script::cast(object_)->set_line_ends(undefined);
465 }
466
467 if (object_->IsExternalString()) {
468 Heap* heap = serializer_->isolate()->heap();
469 if (object_->map() != heap->native_source_string_map()) {
470 // Usually we cannot recreate resources for external strings. To work
471 // around this, external strings are serialized to look like ordinary
472 // sequential strings.
473 // The exception are native source code strings, since we can recreate
474 // their resources. In that case we fall through and leave it to
475 // VisitExternalOneByteString further down.
476 SerializeExternalString();
477 return;
478 }
479 }
480
481 int size = object_->Size();
482 Map* map = object_->map();
483 AllocationSpace space =
484 MemoryChunk::FromAddress(object_->address())->owner()->identity();
485 SerializePrologue(space, size, map);
486
487 // Serialize the rest of the object.
488 CHECK_EQ(0, bytes_processed_so_far_);
489 bytes_processed_so_far_ = kPointerSize;
490
491 RecursionScope recursion(serializer_);
492 // Objects that are immediately post processed during deserialization
493 // cannot be deferred, since post processing requires the object content.
494 if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
495 serializer_->QueueDeferredObject(object_);
496 sink_->Put(kDeferred, "Deferring object content");
497 return;
498 }
499
500 UnlinkWeakNextScope unlink_weak_next(object_);
501
502 object_->IterateBody(map->instance_type(), size, this);
503 OutputRawData(object_->address() + size);
504 }
505
506 void Serializer::ObjectSerializer::SerializeDeferred() {
507 if (FLAG_trace_serializer) {
508 PrintF(" Encoding deferred heap object: ");
509 object_->ShortPrint();
510 PrintF("\n");
511 }
512
513 int size = object_->Size();
514 Map* map = object_->map();
515 BackReference reference = serializer_->back_reference_map()->Lookup(object_);
516
517 // Serialize the rest of the object.
518 CHECK_EQ(0, bytes_processed_so_far_);
519 bytes_processed_so_far_ = kPointerSize;
520
521 serializer_->PutAlignmentPrefix(object_);
522 sink_->Put(kNewObject + reference.space(), "deferred object");
523 serializer_->PutBackReference(object_, reference);
524 sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
525
526 UnlinkWeakNextScope unlink_weak_next(object_);
527
528 object_->IterateBody(map->instance_type(), size, this);
529 OutputRawData(object_->address() + size);
530 }
531
532 void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
533 Object** current = start;
534 while (current < end) {
535 while (current < end && (*current)->IsSmi()) current++;
536 if (current < end) OutputRawData(reinterpret_cast<Address>(current));
537
538 while (current < end && !(*current)->IsSmi()) {
539 HeapObject* current_contents = HeapObject::cast(*current);
540 int root_index = serializer_->root_index_map()->Lookup(current_contents);
541 // Repeats are not subject to the write barrier so we can only use
542 // immortal immovable root members. They are never in new space.
543 if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
544 Heap::RootIsImmortalImmovable(root_index) &&
545 current_contents == current[-1]) {
546 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
547 int repeat_count = 1;
548 while (&current[repeat_count] < end - 1 &&
549 current[repeat_count] == current_contents) {
550 repeat_count++;
551 }
552 current += repeat_count;
553 bytes_processed_so_far_ += repeat_count * kPointerSize;
554 if (repeat_count > kNumberOfFixedRepeat) {
555 sink_->Put(kVariableRepeat, "VariableRepeat");
556 sink_->PutInt(repeat_count, "repeat count");
557 } else {
558 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
559 }
560 } else {
561 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
562 0);
563 bytes_processed_so_far_ += kPointerSize;
564 current++;
565 }
566 }
567 }
568 }
569
570 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
571 int skip = OutputRawData(rinfo->target_address_address(),
572 kCanReturnSkipInsteadOfSkipping);
573 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
574 Object* object = rinfo->target_object();
575 serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
576 kStartOfObject, skip);
577 bytes_processed_so_far_ += rinfo->target_address_size();
578 }
579
580 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
581 int skip = OutputRawData(reinterpret_cast<Address>(p),
582 kCanReturnSkipInsteadOfSkipping);
583 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
584 sink_->PutInt(skip, "SkipB4ExternalRef");
585 Address target = *p;
586 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
587 bytes_processed_so_far_ += kPointerSize;
588 }
589
590 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
591 int skip = OutputRawData(rinfo->target_address_address(),
592 kCanReturnSkipInsteadOfSkipping);
593 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
594 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
595 sink_->PutInt(skip, "SkipB4ExternalRef");
596 Address target = rinfo->target_external_reference();
597 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
598 bytes_processed_so_far_ += rinfo->target_address_size();
599 }
600
601 void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
602 // We can only reference to internal references of code that has been output.
603 DCHECK(object_->IsCode() && code_has_been_output_);
604 // We do not use skip from last patched pc to find the pc to patch, since
605 // target_address_address may not return addresses in ascending order when
606 // used for internal references. External references may be stored at the
607 // end of the code in the constant pool, whereas internal references are
608 // inline. That would cause the skip to be negative. Instead, we store the
609 // offset from code entry.
610 Address entry = Code::cast(object_)->entry();
611 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
612 intptr_t target_offset = rinfo->target_internal_reference() - entry;
613 DCHECK(0 <= pc_offset &&
614 pc_offset <= Code::cast(object_)->instruction_size());
615 DCHECK(0 <= target_offset &&
616 target_offset <= Code::cast(object_)->instruction_size());
617 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
618 ? kInternalReference
619 : kInternalReferenceEncoded,
620 "InternalRef");
621 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
622 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
623 }
624
625 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
626 int skip = OutputRawData(rinfo->target_address_address(),
627 kCanReturnSkipInsteadOfSkipping);
628 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
629 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
630 sink_->PutInt(skip, "SkipB4ExternalRef");
631 Address target = rinfo->target_address();
632 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
633 bytes_processed_so_far_ += rinfo->target_address_size();
634 }
635
636 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
637 int skip = OutputRawData(rinfo->target_address_address(),
638 kCanReturnSkipInsteadOfSkipping);
639 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
640 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
641 bytes_processed_so_far_ += rinfo->target_address_size();
642 }
643
644 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
645 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
646 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
647 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
648 bytes_processed_so_far_ += kPointerSize;
649 }
650
651 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
652 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
653 Cell* object = Cell::cast(rinfo->target_cell());
654 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
655 bytes_processed_so_far_ += kPointerSize;
656 }
657
658 bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
659 int builtin_count,
660 v8::String::ExternalOneByteStringResource** resource_pointer,
661 FixedArray* source_cache, int resource_index) {
662 for (int i = 0; i < builtin_count; i++) {
663 Object* source = source_cache->get(i);
664 if (!source->IsUndefined()) {
665 ExternalOneByteString* string = ExternalOneByteString::cast(source);
666 typedef v8::String::ExternalOneByteStringResource Resource;
667 const Resource* resource = string->resource();
668 if (resource == *resource_pointer) {
669 sink_->Put(resource_index, "NativesStringResource");
670 sink_->PutSection(i, "NativesStringResourceEnd");
671 bytes_processed_so_far_ += sizeof(resource);
672 return true;
673 }
674 }
675 }
676 return false;
677 }
678
679 void Serializer::ObjectSerializer::VisitExternalOneByteString(
680 v8::String::ExternalOneByteStringResource** resource_pointer) {
681 Address references_start = reinterpret_cast<Address>(resource_pointer);
682 OutputRawData(references_start);
683 if (SerializeExternalNativeSourceString(
684 Natives::GetBuiltinsCount(), resource_pointer,
685 Natives::GetSourceCache(serializer_->isolate()->heap()),
686 kNativesStringResource)) {
687 return;
688 }
689 if (SerializeExternalNativeSourceString(
690 ExtraNatives::GetBuiltinsCount(), resource_pointer,
691 ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
692 kExtraNativesStringResource)) {
693 return;
694 }
695 // One of the strings in the natives cache should match the resource. We
696 // don't expect any other kinds of external strings here.
697 UNREACHABLE();
698 }
699
700 Address Serializer::ObjectSerializer::PrepareCode() {
701 // To make snapshots reproducible, we make a copy of the code object
702 // and wipe all pointers in the copy, which we then serialize.
703 Code* original = Code::cast(object_);
704 Code* code = serializer_->CopyCode(original);
705 // Code age headers are not serializable.
706 code->MakeYoung(serializer_->isolate());
707 int mode_mask = RelocInfo::kCodeTargetMask |
708 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
709 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
710 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
711 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
712 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
713 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
714 RelocInfo* rinfo = it.rinfo();
715 rinfo->WipeOut();
716 }
717 // We need to wipe out the header fields *after* wiping out the
718 // relocations, because some of these fields are needed for the latter.
719 code->WipeOutHeader();
720 return code->address();
721 }
722
723 int Serializer::ObjectSerializer::OutputRawData(
724 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
725 Address object_start = object_->address();
726 int base = bytes_processed_so_far_;
727 int up_to_offset = static_cast<int>(up_to - object_start);
728 int to_skip = up_to_offset - bytes_processed_so_far_;
729 int bytes_to_output = to_skip;
730 bytes_processed_so_far_ += to_skip;
731 // This assert will fail if the reloc info gives us the target_address_address
732 // locations in a non-ascending order. Luckily that doesn't happen.
733 DCHECK(to_skip >= 0);
734 bool outputting_code = false;
735 bool is_code_object = object_->IsCode();
736 if (to_skip != 0 && is_code_object && !code_has_been_output_) {
737 // Output the code all at once and fix later.
738 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
739 outputting_code = true;
740 code_has_been_output_ = true;
741 }
742 if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
743 if (!outputting_code && bytes_to_output == to_skip &&
744 IsAligned(bytes_to_output, kPointerAlignment) &&
745 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
746 int size_in_words = bytes_to_output >> kPointerSizeLog2;
747 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
748 to_skip = 0; // This instruction includes skip.
749 } else {
750 // We always end up here if we are outputting the code of a code object.
751 sink_->Put(kVariableRawData, "VariableRawData");
752 sink_->PutInt(bytes_to_output, "length");
753 }
754
755 if (is_code_object) object_start = PrepareCode();
756
757 const char* description = is_code_object ? "Code" : "Byte";
758 sink_->PutRaw(object_start + base, bytes_to_output, description);
759 }
760 if (to_skip != 0 && return_skip == kIgnoringReturn) {
761 sink_->Put(kSkip, "Skip");
762 sink_->PutInt(to_skip, "SkipDistance");
763 to_skip = 0;
764 }
765 return to_skip;
766 }
767
768 } // namespace internal
769 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698