OLD | NEW |
1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/snapshot/serializer.h" | 5 #include "src/snapshot/serializer.h" |
6 | 6 |
7 #include "src/macro-assembler.h" | 7 #include "src/macro-assembler.h" |
8 #include "src/snapshot/natives.h" | 8 #include "src/snapshot/natives.h" |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
11 namespace internal { | 11 namespace internal { |
12 | 12 |
13 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) | 13 Serializer::Serializer(Isolate* isolate) |
14 : isolate_(isolate), | 14 : isolate_(isolate), |
15 sink_(sink), | |
16 external_reference_encoder_(isolate), | 15 external_reference_encoder_(isolate), |
17 root_index_map_(isolate), | 16 root_index_map_(isolate), |
18 recursion_depth_(0), | 17 recursion_depth_(0), |
19 code_address_map_(NULL), | 18 code_address_map_(NULL), |
20 large_objects_total_size_(0), | 19 large_objects_total_size_(0), |
21 seen_large_objects_index_(0) { | 20 seen_large_objects_index_(0) { |
22 // The serializer is meant to be used only to generate initial heap images | 21 // The serializer is meant to be used only to generate initial heap images |
23 // from a context in which there is only one isolate. | 22 // from a context in which there is only one isolate. |
24 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | 23 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { |
25 pending_chunk_[i] = 0; | 24 pending_chunk_[i] = 0; |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
83 } | 82 } |
84 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE) | 83 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE) |
85 #undef PRINT_INSTANCE_TYPE | 84 #undef PRINT_INSTANCE_TYPE |
86 PrintF("\n"); | 85 PrintF("\n"); |
87 #endif // OBJECT_PRINT | 86 #endif // OBJECT_PRINT |
88 } | 87 } |
89 | 88 |
90 void Serializer::SerializeDeferredObjects() { | 89 void Serializer::SerializeDeferredObjects() { |
91 while (deferred_objects_.length() > 0) { | 90 while (deferred_objects_.length() > 0) { |
92 HeapObject* obj = deferred_objects_.RemoveLast(); | 91 HeapObject* obj = deferred_objects_.RemoveLast(); |
93 ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject); | 92 ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject); |
94 obj_serializer.SerializeDeferred(); | 93 obj_serializer.SerializeDeferred(); |
95 } | 94 } |
96 sink_->Put(kSynchronize, "Finished with deferred objects"); | 95 sink_.Put(kSynchronize, "Finished with deferred objects"); |
97 } | 96 } |
98 | 97 |
99 void Serializer::VisitPointers(Object** start, Object** end) { | 98 void Serializer::VisitPointers(Object** start, Object** end) { |
100 for (Object** current = start; current < end; current++) { | 99 for (Object** current = start; current < end; current++) { |
101 if ((*current)->IsSmi()) { | 100 if ((*current)->IsSmi()) { |
102 PutSmi(Smi::cast(*current)); | 101 PutSmi(Smi::cast(*current)); |
103 } else { | 102 } else { |
104 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0); | 103 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0); |
105 } | 104 } |
106 } | 105 } |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
147 // Encode a reference to a hot object by its index in the working set. | 146 // Encode a reference to a hot object by its index in the working set. |
148 int index = hot_objects_.Find(obj); | 147 int index = hot_objects_.Find(obj); |
149 if (index != HotObjectsList::kNotFound) { | 148 if (index != HotObjectsList::kNotFound) { |
150 DCHECK(index >= 0 && index < kNumberOfHotObjects); | 149 DCHECK(index >= 0 && index < kNumberOfHotObjects); |
151 if (FLAG_trace_serializer) { | 150 if (FLAG_trace_serializer) { |
152 PrintF(" Encoding hot object %d:", index); | 151 PrintF(" Encoding hot object %d:", index); |
153 obj->ShortPrint(); | 152 obj->ShortPrint(); |
154 PrintF("\n"); | 153 PrintF("\n"); |
155 } | 154 } |
156 if (skip != 0) { | 155 if (skip != 0) { |
157 sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip"); | 156 sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip"); |
158 sink_->PutInt(skip, "HotObjectSkipDistance"); | 157 sink_.PutInt(skip, "HotObjectSkipDistance"); |
159 } else { | 158 } else { |
160 sink_->Put(kHotObject + index, "HotObject"); | 159 sink_.Put(kHotObject + index, "HotObject"); |
161 } | 160 } |
162 return true; | 161 return true; |
163 } | 162 } |
164 } | 163 } |
165 SerializerReference reference = reference_map_.Lookup(obj); | 164 SerializerReference reference = reference_map_.Lookup(obj); |
166 if (reference.is_valid()) { | 165 if (reference.is_valid()) { |
167 // Encode the location of an already deserialized object in order to write | 166 // Encode the location of an already deserialized object in order to write |
168 // its location into a later object. We can encode the location as an | 167 // its location into a later object. We can encode the location as an |
169 // offset fromthe start of the deserialized objects or as an offset | 168 // offset fromthe start of the deserialized objects or as an offset |
170 // backwards from thecurrent allocation pointer. | 169 // backwards from thecurrent allocation pointer. |
171 if (reference.is_attached_reference()) { | 170 if (reference.is_attached_reference()) { |
172 FlushSkip(skip); | 171 FlushSkip(skip); |
173 if (FLAG_trace_serializer) { | 172 if (FLAG_trace_serializer) { |
174 PrintF(" Encoding attached reference %d\n", | 173 PrintF(" Encoding attached reference %d\n", |
175 reference.attached_reference_index()); | 174 reference.attached_reference_index()); |
176 } | 175 } |
177 PutAttachedReference(reference, how_to_code, where_to_point); | 176 PutAttachedReference(reference, how_to_code, where_to_point); |
178 } else { | 177 } else { |
179 DCHECK(reference.is_back_reference()); | 178 DCHECK(reference.is_back_reference()); |
180 if (FLAG_trace_serializer) { | 179 if (FLAG_trace_serializer) { |
181 PrintF(" Encoding back reference to: "); | 180 PrintF(" Encoding back reference to: "); |
182 obj->ShortPrint(); | 181 obj->ShortPrint(); |
183 PrintF("\n"); | 182 PrintF("\n"); |
184 } | 183 } |
185 | 184 |
186 PutAlignmentPrefix(obj); | 185 PutAlignmentPrefix(obj); |
187 AllocationSpace space = reference.space(); | 186 AllocationSpace space = reference.space(); |
188 if (skip == 0) { | 187 if (skip == 0) { |
189 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef"); | 188 sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef"); |
190 } else { | 189 } else { |
191 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, | 190 sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space, |
192 "BackRefWithSkip"); | 191 "BackRefWithSkip"); |
193 sink_->PutInt(skip, "BackRefSkipDistance"); | 192 sink_.PutInt(skip, "BackRefSkipDistance"); |
194 } | 193 } |
195 PutBackReference(obj, reference); | 194 PutBackReference(obj, reference); |
196 } | 195 } |
197 return true; | 196 return true; |
198 } | 197 } |
199 return false; | 198 return false; |
200 } | 199 } |
201 | 200 |
202 void Serializer::PutRoot(int root_index, HeapObject* object, | 201 void Serializer::PutRoot(int root_index, HeapObject* object, |
203 SerializerDeserializer::HowToCode how_to_code, | 202 SerializerDeserializer::HowToCode how_to_code, |
204 SerializerDeserializer::WhereToPoint where_to_point, | 203 SerializerDeserializer::WhereToPoint where_to_point, |
205 int skip) { | 204 int skip) { |
206 if (FLAG_trace_serializer) { | 205 if (FLAG_trace_serializer) { |
207 PrintF(" Encoding root %d:", root_index); | 206 PrintF(" Encoding root %d:", root_index); |
208 object->ShortPrint(); | 207 object->ShortPrint(); |
209 PrintF("\n"); | 208 PrintF("\n"); |
210 } | 209 } |
211 | 210 |
212 if (how_to_code == kPlain && where_to_point == kStartOfObject && | 211 if (how_to_code == kPlain && where_to_point == kStartOfObject && |
213 root_index < kNumberOfRootArrayConstants && | 212 root_index < kNumberOfRootArrayConstants && |
214 !isolate()->heap()->InNewSpace(object)) { | 213 !isolate()->heap()->InNewSpace(object)) { |
215 if (skip == 0) { | 214 if (skip == 0) { |
216 sink_->Put(kRootArrayConstants + root_index, "RootConstant"); | 215 sink_.Put(kRootArrayConstants + root_index, "RootConstant"); |
217 } else { | 216 } else { |
218 sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant"); | 217 sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant"); |
219 sink_->PutInt(skip, "SkipInPutRoot"); | 218 sink_.PutInt(skip, "SkipInPutRoot"); |
220 } | 219 } |
221 } else { | 220 } else { |
222 FlushSkip(skip); | 221 FlushSkip(skip); |
223 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); | 222 sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); |
224 sink_->PutInt(root_index, "root_index"); | 223 sink_.PutInt(root_index, "root_index"); |
225 } | 224 } |
226 } | 225 } |
227 | 226 |
228 void Serializer::PutSmi(Smi* smi) { | 227 void Serializer::PutSmi(Smi* smi) { |
229 sink_->Put(kOnePointerRawData, "Smi"); | 228 sink_.Put(kOnePointerRawData, "Smi"); |
230 byte* bytes = reinterpret_cast<byte*>(&smi); | 229 byte* bytes = reinterpret_cast<byte*>(&smi); |
231 for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte"); | 230 for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte"); |
232 } | 231 } |
233 | 232 |
234 void Serializer::PutBackReference(HeapObject* object, | 233 void Serializer::PutBackReference(HeapObject* object, |
235 SerializerReference reference) { | 234 SerializerReference reference) { |
236 DCHECK(BackReferenceIsAlreadyAllocated(reference)); | 235 DCHECK(BackReferenceIsAlreadyAllocated(reference)); |
237 sink_->PutInt(reference.back_reference(), "BackRefValue"); | 236 sink_.PutInt(reference.back_reference(), "BackRefValue"); |
238 hot_objects_.Add(object); | 237 hot_objects_.Add(object); |
239 } | 238 } |
240 | 239 |
241 void Serializer::PutAttachedReference(SerializerReference reference, | 240 void Serializer::PutAttachedReference(SerializerReference reference, |
242 HowToCode how_to_code, | 241 HowToCode how_to_code, |
243 WhereToPoint where_to_point) { | 242 WhereToPoint where_to_point) { |
244 DCHECK(reference.is_attached_reference()); | 243 DCHECK(reference.is_attached_reference()); |
245 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) || | 244 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) || |
246 (how_to_code == kPlain && where_to_point == kInnerPointer) || | 245 (how_to_code == kPlain && where_to_point == kInnerPointer) || |
247 (how_to_code == kFromCode && where_to_point == kInnerPointer)); | 246 (how_to_code == kFromCode && where_to_point == kInnerPointer)); |
248 sink_->Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef"); | 247 sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef"); |
249 sink_->PutInt(reference.attached_reference_index(), "AttachedRefIndex"); | 248 sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex"); |
250 } | 249 } |
251 | 250 |
252 int Serializer::PutAlignmentPrefix(HeapObject* object) { | 251 int Serializer::PutAlignmentPrefix(HeapObject* object) { |
253 AllocationAlignment alignment = object->RequiredAlignment(); | 252 AllocationAlignment alignment = object->RequiredAlignment(); |
254 if (alignment != kWordAligned) { | 253 if (alignment != kWordAligned) { |
255 DCHECK(1 <= alignment && alignment <= 3); | 254 DCHECK(1 <= alignment && alignment <= 3); |
256 byte prefix = (kAlignmentPrefix - 1) + alignment; | 255 byte prefix = (kAlignmentPrefix - 1) + alignment; |
257 sink_->Put(prefix, "Alignment"); | 256 sink_.Put(prefix, "Alignment"); |
258 return Heap::GetMaximumFillToAlign(alignment); | 257 return Heap::GetMaximumFillToAlign(alignment); |
259 } | 258 } |
260 return 0; | 259 return 0; |
261 } | 260 } |
262 | 261 |
263 SerializerReference Serializer::AllocateLargeObject(int size) { | 262 SerializerReference Serializer::AllocateLargeObject(int size) { |
264 // Large objects are allocated one-by-one when deserializing. We do not | 263 // Large objects are allocated one-by-one when deserializing. We do not |
265 // have to keep track of multiple chunks. | 264 // have to keep track of multiple chunks. |
266 large_objects_total_size_ += size; | 265 large_objects_total_size_ += size; |
267 return SerializerReference::LargeObjectReference(seen_large_objects_index_++); | 266 return SerializerReference::LargeObjectReference(seen_large_objects_index_++); |
268 } | 267 } |
269 | 268 |
270 SerializerReference Serializer::Allocate(AllocationSpace space, int size) { | 269 SerializerReference Serializer::Allocate(AllocationSpace space, int size) { |
271 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); | 270 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); |
272 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space))); | 271 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space))); |
273 uint32_t new_chunk_size = pending_chunk_[space] + size; | 272 uint32_t new_chunk_size = pending_chunk_[space] + size; |
274 if (new_chunk_size > max_chunk_size(space)) { | 273 if (new_chunk_size > max_chunk_size(space)) { |
275 // The new chunk size would not fit onto a single page. Complete the | 274 // The new chunk size would not fit onto a single page. Complete the |
276 // current chunk and start a new one. | 275 // current chunk and start a new one. |
277 sink_->Put(kNextChunk, "NextChunk"); | 276 sink_.Put(kNextChunk, "NextChunk"); |
278 sink_->Put(space, "NextChunkSpace"); | 277 sink_.Put(space, "NextChunkSpace"); |
279 completed_chunks_[space].Add(pending_chunk_[space]); | 278 completed_chunks_[space].Add(pending_chunk_[space]); |
280 pending_chunk_[space] = 0; | 279 pending_chunk_[space] = 0; |
281 new_chunk_size = size; | 280 new_chunk_size = size; |
282 } | 281 } |
283 uint32_t offset = pending_chunk_[space]; | 282 uint32_t offset = pending_chunk_[space]; |
284 pending_chunk_[space] = new_chunk_size; | 283 pending_chunk_[space] = new_chunk_size; |
285 return SerializerReference::BackReference( | 284 return SerializerReference::BackReference( |
286 space, completed_chunks_[space].length(), offset); | 285 space, completed_chunks_[space].length(), offset); |
287 } | 286 } |
288 | 287 |
289 void Serializer::Pad() { | 288 void Serializer::Pad() { |
290 // The non-branching GetInt will read up to 3 bytes too far, so we need | 289 // The non-branching GetInt will read up to 3 bytes too far, so we need |
291 // to pad the snapshot to make sure we don't read over the end. | 290 // to pad the snapshot to make sure we don't read over the end. |
292 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) { | 291 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) { |
293 sink_->Put(kNop, "Padding"); | 292 sink_.Put(kNop, "Padding"); |
294 } | 293 } |
295 // Pad up to pointer size for checksum. | 294 // Pad up to pointer size for checksum. |
296 while (!IsAligned(sink_->Position(), kPointerAlignment)) { | 295 while (!IsAligned(sink_.Position(), kPointerAlignment)) { |
297 sink_->Put(kNop, "Padding"); | 296 sink_.Put(kNop, "Padding"); |
298 } | 297 } |
299 } | 298 } |
300 | 299 |
301 void Serializer::InitializeCodeAddressMap() { | 300 void Serializer::InitializeCodeAddressMap() { |
302 isolate_->InitializeLoggingAndCounters(); | 301 isolate_->InitializeLoggingAndCounters(); |
303 code_address_map_ = new CodeAddressMap(isolate_); | 302 code_address_map_ = new CodeAddressMap(isolate_); |
304 } | 303 } |
305 | 304 |
306 Code* Serializer::CopyCode(Code* code) { | 305 Code* Serializer::CopyCode(Code* code) { |
307 code_buffer_.Rewind(0); // Clear buffer without deleting backing store. | 306 code_buffer_.Rewind(0); // Clear buffer without deleting backing store. |
(...skipping 467 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
775 if (to_skip != 0 && return_skip == kIgnoringReturn) { | 774 if (to_skip != 0 && return_skip == kIgnoringReturn) { |
776 sink_->Put(kSkip, "Skip"); | 775 sink_->Put(kSkip, "Skip"); |
777 sink_->PutInt(to_skip, "SkipDistance"); | 776 sink_->PutInt(to_skip, "SkipDistance"); |
778 to_skip = 0; | 777 to_skip = 0; |
779 } | 778 } |
780 return to_skip; | 779 return to_skip; |
781 } | 780 } |
782 | 781 |
783 } // namespace internal | 782 } // namespace internal |
784 } // namespace v8 | 783 } // namespace v8 |
OLD | NEW |