Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: src/snapshot/serializer.h

Issue 1811913002: [serializer] ensure that immortal immovable roots are correctly deserialized. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 the V8 project authors. All rights reserved. 1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_SNAPSHOT_SERIALIZER_H_ 5 #ifndef V8_SNAPSHOT_SERIALIZER_H_
6 #define V8_SNAPSHOT_SERIALIZER_H_ 6 #define V8_SNAPSHOT_SERIALIZER_H_
7 7
8 #include "src/isolate.h" 8 #include "src/isolate.h"
9 #include "src/log.h" 9 #include "src/log.h"
10 #include "src/objects.h" 10 #include "src/objects.h"
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
148 } 148 }
149 149
150 private: 150 private:
151 static const int kMaxRecursionDepth = 32; 151 static const int kMaxRecursionDepth = 32;
152 Serializer* serializer_; 152 Serializer* serializer_;
153 }; 153 };
154 154
155 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, 155 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
156 WhereToPoint where_to_point, int skip) = 0; 156 WhereToPoint where_to_point, int skip) = 0;
157 157
158 void VisitPointers(Object** start, Object** end) override;
159
158 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, 160 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
159 int skip); 161 int skip);
160 162
163 void PutSmi(Smi* smi);
164
161 void PutBackReference(HeapObject* object, BackReference reference); 165 void PutBackReference(HeapObject* object, BackReference reference);
162 166
163 // Emit alignment prefix if necessary, return required padding space in bytes. 167 // Emit alignment prefix if necessary, return required padding space in bytes.
164 int PutAlignmentPrefix(HeapObject* object); 168 int PutAlignmentPrefix(HeapObject* object);
165 169
166 // Returns true if the object was successfully serialized. 170 // Returns true if the object was successfully serialized.
167 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, 171 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
168 WhereToPoint where_to_point, int skip); 172 WhereToPoint where_to_point, int skip);
169 173
170 inline void FlushSkip(int skip) { 174 inline void FlushSkip(int skip) {
171 if (skip != 0) { 175 if (skip != 0) {
172 sink_->Put(kSkip, "SkipFromSerializeObject"); 176 sink_->Put(kSkip, "SkipFromSerializeObject");
173 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); 177 sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
174 } 178 }
175 } 179 }
176 180
177 bool BackReferenceIsAlreadyAllocated(BackReference back_reference); 181 bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
178 182
179 // This will return the space for an object. 183 // This will return the space for an object.
180 BackReference AllocateLargeObject(int size); 184 BackReference AllocateLargeObject(int size);
181 BackReference Allocate(AllocationSpace space, int size); 185 BackReference Allocate(AllocationSpace space, int size);
182 int EncodeExternalReference(Address addr) { 186 int EncodeExternalReference(Address addr) {
183 return external_reference_encoder_.Encode(addr); 187 return external_reference_encoder_.Encode(addr);
184 } 188 }
185 189
190 bool HasNotExceededFirstPageOfEachSpace();
191
186 // GetInt reads 4 bytes at once, requiring padding at the end. 192 // GetInt reads 4 bytes at once, requiring padding at the end.
187 void Pad(); 193 void Pad();
188 194
189 // Some roots should not be serialized, because their actual value depends on
190 // absolute addresses and they are reset after deserialization, anyway.
191 bool ShouldBeSkipped(Object** current);
192
193 // We may not need the code address map for logging for every instance 195 // We may not need the code address map for logging for every instance
194 // of the serializer. Initialize it on demand. 196 // of the serializer. Initialize it on demand.
195 void InitializeCodeAddressMap(); 197 void InitializeCodeAddressMap();
196 198
197 Code* CopyCode(Code* code); 199 Code* CopyCode(Code* code);
198 200
199 inline uint32_t max_chunk_size(int space) const { 201 inline uint32_t max_chunk_size(int space) const {
200 DCHECK_LE(0, space); 202 DCHECK_LE(0, space);
201 DCHECK_LT(space, kNumberOfSpaces); 203 DCHECK_LT(space, kNumberOfSpaces);
202 return max_chunk_size_[space]; 204 return max_chunk_size_[space];
(...skipping 17 matching lines...) Expand all
220 RootIndexMap root_index_map_; 222 RootIndexMap root_index_map_;
221 223
222 int recursion_depth_; 224 int recursion_depth_;
223 225
224 friend class Deserializer; 226 friend class Deserializer;
225 friend class ObjectSerializer; 227 friend class ObjectSerializer;
226 friend class RecursionScope; 228 friend class RecursionScope;
227 friend class SnapshotData; 229 friend class SnapshotData;
228 230
229 private: 231 private:
230 void VisitPointers(Object** start, Object** end) override;
231
232 CodeAddressMap* code_address_map_; 232 CodeAddressMap* code_address_map_;
233 // Objects from the same space are put into chunks for bulk-allocation 233 // Objects from the same space are put into chunks for bulk-allocation
234 // when deserializing. We have to make sure that each chunk fits into a 234 // when deserializing. We have to make sure that each chunk fits into a
235 // page. So we track the chunk size in pending_chunk_ of a space, but 235 // page. So we track the chunk size in pending_chunk_ of a space, but
236 // when it exceeds a page, we complete the current chunk and start a new one. 236 // when it exceeds a page, we complete the current chunk and start a new one.
237 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces]; 237 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
238 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces]; 238 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
239 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces]; 239 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
240 240
241 // We map serialized large objects to indexes for back-referencing. 241 // We map serialized large objects to indexes for back-referencing.
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
312 SnapshotByteSink* sink_; 312 SnapshotByteSink* sink_;
313 int reference_representation_; 313 int reference_representation_;
314 int bytes_processed_so_far_; 314 int bytes_processed_so_far_;
315 bool code_has_been_output_; 315 bool code_has_been_output_;
316 }; 316 };
317 317
318 } // namespace internal 318 } // namespace internal
319 } // namespace v8 319 } // namespace v8
320 320
321 #endif // V8_SNAPSHOT_SERIALIZER_H_ 321 #endif // V8_SNAPSHOT_SERIALIZER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698