OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
163 SnapshotByteSource(const byte* array, int length) | 163 SnapshotByteSource(const byte* array, int length) |
164 : data_(array), length_(length), position_(0) { } | 164 : data_(array), length_(length), position_(0) { } |
165 | 165 |
166 bool HasMore() { return position_ < length_; } | 166 bool HasMore() { return position_ < length_; } |
167 | 167 |
168 int Get() { | 168 int Get() { |
169 ASSERT(position_ < length_); | 169 ASSERT(position_ < length_); |
170 return data_[position_++]; | 170 return data_[position_++]; |
171 } | 171 } |
172 | 172 |
173 int32_t GetUnalignedInt() { | |
174 #ifdef V8_HOST_CAN_READ_UNALIGNED | |
175 int32_t answer; | |
176 ASSERT(position_ + sizeof(answer) <= length_ + 0u); | |
177 answer = *reinterpret_cast<const int32_t*>(data_ + position_); | |
Yang
2012/09/13 08:47:59
Since GetInt() uses this: we assume the least sign
Erik Corry
2012/09/13 12:13:35
Fixed to go into the slow case for big endian.
| |
178 #else | |
179 int32_t answer = data_[position_]; | |
180 answer |= data_[position_ + 1] << 8; | |
181 answer |= data_[position_ + 2] << 16; | |
182 answer |= data_[position_ + 3] << 24; | |
Yang
2012/09/13 08:47:59
In PutInt we only take integers that can be encode
Erik Corry
2012/09/13 12:13:35
Otherwise it will do something different depending
| |
183 #endif | |
184 return answer; | |
185 } | |
186 | |
187 void Advance(int by) { position_ += by; } | |
188 | |
173 inline void CopyRaw(byte* to, int number_of_bytes); | 189 inline void CopyRaw(byte* to, int number_of_bytes); |
174 | 190 |
175 inline int GetInt(); | 191 inline int GetInt(); |
176 | 192 |
177 bool AtEOF() { | 193 bool AtEOF(); |
178 return position_ == length_; | |
179 } | |
180 | 194 |
181 int position() { return position_; } | 195 int position() { return position_; } |
182 | 196 |
183 private: | 197 private: |
184 const byte* data_; | 198 const byte* data_; |
185 int length_; | 199 int length_; |
186 int position_; | 200 int position_; |
187 }; | 201 }; |
188 | 202 |
189 | 203 |
190 #define COMMON_RAW_LENGTHS(f) \ | 204 #define COMMON_RAW_LENGTHS(f) \ |
191 f(1, 1) \ | 205 f(1, 4) \ |
192 f(2, 2) \ | 206 f(2, 8) \ |
193 f(3, 3) \ | 207 f(3, 12) \ |
194 f(4, 4) \ | 208 f(4, 16) \ |
195 f(5, 5) \ | 209 f(5, 20) \ |
196 f(6, 6) \ | 210 f(6, 24) \ |
197 f(7, 7) \ | 211 f(7, 28) \ |
198 f(8, 8) \ | 212 f(8, 32) \ |
199 f(9, 12) \ | 213 f(9, 36) \ |
200 f(10, 16) \ | 214 f(10, 40) \ |
201 f(11, 20) \ | 215 f(11, 44) \ |
202 f(12, 24) \ | 216 f(12, 48) \ |
203 f(13, 28) \ | 217 f(13, 52) \ |
204 f(14, 32) \ | 218 f(14, 56) \ |
205 f(15, 36) | 219 f(15, 60) \ |
220 f(16, 64) \ | |
221 f(17, 68) \ | |
222 f(18, 72) \ | |
223 f(19, 76) \ | |
224 f(20, 80) \ | |
225 f(21, 84) \ | |
226 f(22, 88) \ | |
227 f(23, 92) \ | |
228 f(24, 96) \ | |
229 f(25, 100) \ | |
230 f(26, 104) \ | |
231 f(27, 108) \ | |
232 f(28, 112) \ | |
233 f(29, 116) \ | |
234 f(30, 120) \ | |
235 f(31, 124) | |
Yang
2012/09/13 08:47:59
if the second arg is always 4x the first arg, why
Erik Corry
2012/09/13 12:13:35
Made it part of f, and moved this to the .cc file.
| |
206 | 236 |
207 // The Serializer/Deserializer class is a common superclass for Serializer and | 237 // The Serializer/Deserializer class is a common superclass for Serializer and |
208 // Deserializer which is used to store common constants and methods used by | 238 // Deserializer which is used to store common constants and methods used by |
209 // both. | 239 // both. |
210 class SerializerDeserializer: public ObjectVisitor { | 240 class SerializerDeserializer: public ObjectVisitor { |
211 public: | 241 public: |
212 static void Iterate(ObjectVisitor* visitor); | 242 static void Iterate(ObjectVisitor* visitor); |
213 | 243 |
244 static int nop() { return kNop; } | |
245 | |
214 protected: | 246 protected: |
215 // Where the pointed-to object can be found: | 247 // Where the pointed-to object can be found: |
216 enum Where { | 248 enum Where { |
217 kNewObject = 0, // Object is next in snapshot. | 249 kNewObject = 0, // Object is next in snapshot. |
218 // 1-8 One per space. | 250 // 1-6 One per space. |
219 kRootArray = 0x9, // Object is found in root array. | 251 kRootArray = 0x9, // Object is found in root array. |
220 kPartialSnapshotCache = 0xa, // Object is in the cache. | 252 kPartialSnapshotCache = 0xa, // Object is in the cache. |
221 kExternalReference = 0xb, // Pointer to an external reference. | 253 kExternalReference = 0xb, // Pointer to an external reference. |
222 kSkip = 0xc, // Skip a pointer sized cell. | 254 kSkip = 0xc, // Skip n bytes. |
223 // 0xd-0xf Free. | 255 kNop = 0xd, // Does nothing, used to pad. |
224 kBackref = 0x10, // Object is described relative to end. | 256 // 0xe-0xf Free. |
225 // 0x11-0x18 One per space. | 257 kBackref = 0x10, // Object is described relative to end. |
226 // 0x19-0x1f Free. | 258 // 0x11-0x16 One per space. |
227 kFromStart = 0x20, // Object is described relative to start. | 259 kBackrefWithSkip = 0x18, // Object is described relative to end. |
228 // 0x21-0x28 One per space. | 260 // 0x19-0x1e One per space. |
229 // 0x29-0x2f Free. | 261 // 0x20-0x3f Used by misc. tags below. |
230 // 0x30-0x3f Used by misc. tags below. | |
231 kPointedToMask = 0x3f | 262 kPointedToMask = 0x3f |
232 }; | 263 }; |
233 | 264 |
234 // How to code the pointer to the object. | 265 // How to code the pointer to the object. |
235 enum HowToCode { | 266 enum HowToCode { |
236 kPlain = 0, // Straight pointer. | 267 kPlain = 0, // Straight pointer. |
237 // What this means depends on the architecture: | 268 // What this means depends on the architecture: |
238 kFromCode = 0x40, // A pointer inlined in code. | 269 kFromCode = 0x40, // A pointer inlined in code. |
239 kHowToCodeMask = 0x40 | 270 kHowToCodeMask = 0x40 |
240 }; | 271 }; |
241 | 272 |
273 // For kRootArrayConstants | |
274 enum WithSkip { | |
275 kNoSkipDistance = 0, | |
276 kHasSkipDistance = 0x40, | |
277 kWithSkipMask = 0x40 | |
278 }; | |
279 | |
242 // Where to point within the object. | 280 // Where to point within the object. |
243 enum WhereToPoint { | 281 enum WhereToPoint { |
244 kStartOfObject = 0, | 282 kStartOfObject = 0, |
245 kInnerPointer = 0x80, // First insn in code object or payload of cell. | 283 kInnerPointer = 0x80, // First insn in code object or payload of cell. |
246 kWhereToPointMask = 0x80 | 284 kWhereToPointMask = 0x80 |
247 }; | 285 }; |
248 | 286 |
249 // Misc. | 287 // Misc. |
250 // Raw data to be copied from the snapshot. | 288 // Raw data to be copied from the snapshot. This byte code does not advance |
251 static const int kRawData = 0x30; | 289 // the current pointer, which is used for code objects, where we write the |
252 // Some common raw lengths: 0x31-0x3f | 290 // entire code in one memcpy, then fix up stuff with kSkip and other byte |
291 // codes that overwrite data. | |
292 static const int kRawData = 0x20; | |
293 // Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer. | |
253 // A tag emitted at strategic points in the snapshot to delineate sections. | 294 // A tag emitted at strategic points in the snapshot to delineate sections. |
254 // If the deserializer does not find these at the expected moments then it | 295 // If the deserializer does not find these at the expected moments then it |
255 // is an indication that the snapshot and the VM do not fit together. | 296 // is an indication that the snapshot and the VM do not fit together. |
256 // Examine the build process for architecture, version or configuration | 297 // Examine the build process for architecture, version or configuration |
257 // mismatches. | 298 // mismatches. |
258 static const int kSynchronize = 0x70; | 299 static const int kSynchronize = 0x70; |
259 // Used for the source code of the natives, which is in the executable, but | 300 // Used for the source code of the natives, which is in the executable, but |
260 // is referred to from external strings in the snapshot. | 301 // is referred to from external strings in the snapshot. |
261 static const int kNativesStringResource = 0x71; | 302 static const int kNativesStringResource = 0x71; |
262 static const int kNewPage = 0x72; | 303 static const int kRepeat = 0x72; |
263 static const int kRepeat = 0x73; | 304 static const int kConstantRepeat = 0x73; |
264 static const int kConstantRepeat = 0x74; | 305 // 0x73-0x7f Repeat last word (subtract 0x72 to get the count). |
265 // 0x74-0x7f Repeat last word (subtract 0x73 to get the count). | 306 static const int kMaxRepeats = 0x7f - 0x72; |
266 static const int kMaxRepeats = 0x7f - 0x73; | |
267 static int CodeForRepeats(int repeats) { | 307 static int CodeForRepeats(int repeats) { |
268 ASSERT(repeats >= 1 && repeats <= kMaxRepeats); | 308 ASSERT(repeats >= 1 && repeats <= kMaxRepeats); |
269 return 0x73 + repeats; | 309 return 0x72 + repeats; |
270 } | 310 } |
271 static int RepeatsForCode(int byte_code) { | 311 static int RepeatsForCode(int byte_code) { |
272 ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f); | 312 ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f); |
273 return byte_code - 0x73; | 313 return byte_code - 0x72; |
274 } | 314 } |
275 static const int kRootArrayLowConstants = 0xb0; | 315 static const int kRootArrayConstants = 0xa0; |
276 // 0xb0-0xbf Things from the first 16 elements of the root array. | 316 // 0xa0-0xbf Things from the first 32 elements of the root array. |
277 static const int kRootArrayHighConstants = 0xf0; | |
278 // 0xf0-0xff Things from the next 16 elements of the root array. | |
279 static const int kRootArrayNumberOfConstantEncodings = 0x20; | 317 static const int kRootArrayNumberOfConstantEncodings = 0x20; |
280 static const int kRootArrayNumberOfLowConstantEncodings = 0x10; | |
281 static int RootArrayConstantFromByteCode(int byte_code) { | 318 static int RootArrayConstantFromByteCode(int byte_code) { |
282 int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2); | 319 return byte_code & 0x1f; |
283 ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings); | |
284 return constant; | |
285 } | 320 } |
286 | 321 |
287 | 322 static const int kNumberOfSpaces = LO_SPACE; |
288 static const int kLargeData = LAST_SPACE; | |
289 static const int kLargeCode = kLargeData + 1; | |
290 static const int kLargeFixedArray = kLargeCode + 1; | |
291 static const int kNumberOfSpaces = kLargeFixedArray + 1; | |
292 static const int kAnyOldSpace = -1; | 323 static const int kAnyOldSpace = -1; |
293 | 324 |
294 // A bitmask for getting the space out of an instruction. | 325 // A bitmask for getting the space out of an instruction. |
295 static const int kSpaceMask = 15; | 326 static const int kSpaceMask = 7; |
296 | |
297 static inline bool SpaceIsLarge(int space) { return space >= kLargeData; } | |
298 static inline bool SpaceIsPaged(int space) { | |
299 return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE; | |
300 } | |
301 }; | 327 }; |
302 | 328 |
303 | 329 |
304 int SnapshotByteSource::GetInt() { | 330 int SnapshotByteSource::GetInt() { |
305 // A little unwind to catch the really small ints. | 331 // This way of variable-length encoding integers does not suffer from branch |
306 int snapshot_byte = Get(); | 332 // mispredictions. |
307 if ((snapshot_byte & 0x80) == 0) { | 333 uint32_t answer = GetUnalignedInt(); |
308 return snapshot_byte; | 334 int bytes = answer & 3; |
309 } | 335 Advance(bytes); |
310 int accumulator = (snapshot_byte & 0x7f) << 7; | 336 uint32_t mask = 0xffffffffu; |
311 while (true) { | 337 mask >>= 32 - (bytes << 3); |
312 snapshot_byte = Get(); | 338 answer &= mask; |
313 if ((snapshot_byte & 0x80) == 0) { | 339 answer >>= 2; |
314 return accumulator | snapshot_byte; | 340 return answer; |
315 } | |
316 accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7; | |
317 } | |
318 UNREACHABLE(); | |
319 return accumulator; | |
320 } | 341 } |
321 | 342 |
322 | 343 |
323 void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { | 344 void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { |
324 memcpy(to, data_ + position_, number_of_bytes); | 345 memcpy(to, data_ + position_, number_of_bytes); |
325 position_ += number_of_bytes; | 346 position_ += number_of_bytes; |
326 } | 347 } |
327 | 348 |
328 | 349 |
329 // A Deserializer reads a snapshot and reconstructs the Object graph it defines. | 350 // A Deserializer reads a snapshot and reconstructs the Object graph it defines. |
330 class Deserializer: public SerializerDeserializer { | 351 class Deserializer: public SerializerDeserializer { |
331 public: | 352 public: |
332 // Create a deserializer from a snapshot byte source. | 353 // Create a deserializer from a snapshot byte source. |
333 explicit Deserializer(SnapshotByteSource* source); | 354 explicit Deserializer(SnapshotByteSource* source); |
334 | 355 |
335 virtual ~Deserializer(); | 356 virtual ~Deserializer(); |
336 | 357 |
337 // Deserialize the snapshot into an empty heap. | 358 // Deserialize the snapshot into an empty heap. |
338 void Deserialize(); | 359 void Deserialize(); |
339 | 360 |
340 // Deserialize a single object and the objects reachable from it. | 361 // Deserialize a single object and the objects reachable from it. |
341 void DeserializePartial(Object** root); | 362 void DeserializePartial(Object** root); |
342 | 363 |
364 void set_reservation(int space_number, uintptr_t reservation) { | |
365 ASSERT(space_number >= 0); | |
366 ASSERT(space_number <= LAST_SPACE); | |
367 reservations_[space_number] = reservation; | |
368 } | |
369 | |
343 private: | 370 private: |
344 virtual void VisitPointers(Object** start, Object** end); | 371 virtual void VisitPointers(Object** start, Object** end); |
345 | 372 |
346 virtual void VisitExternalReferences(Address* start, Address* end) { | 373 virtual void VisitExternalReferences(Address* start, Address* end) { |
347 UNREACHABLE(); | 374 UNREACHABLE(); |
348 } | 375 } |
349 | 376 |
350 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { | 377 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { |
351 UNREACHABLE(); | 378 UNREACHABLE(); |
352 } | 379 } |
353 | 380 |
354 // Fills in some heap data in an area from start to end (non-inclusive). The | 381 // Fills in some heap data in an area from start to end (non-inclusive). The |
355 // space id is used for the write barrier. The object_address is the address | 382 // space id is used for the write barrier. The object_address is the address |
356 // of the object we are writing into, or NULL if we are not writing into an | 383 // of the object we are writing into, or NULL if we are not writing into an |
357 // object, i.e. if we are writing a series of tagged values that are not on | 384 // object, i.e. if we are writing a series of tagged values that are not on |
358 // the heap. | 385 // the heap. |
359 void ReadChunk( | 386 void ReadChunk( |
360 Object** start, Object** end, int space, Address object_address); | 387 Object** start, Object** end, int space, Address object_address); |
361 HeapObject* GetAddressFromStart(int space); | 388 void ReadObject(int space_number, Object** write_back); |
362 inline HeapObject* GetAddressFromEnd(int space); | 389 |
363 Address Allocate(int space_number, Space* space, int size); | 390 // This routine both allocates a new object, and also keeps |
364 void ReadObject(int space_number, Space* space, Object** write_back); | 391 // track of where objects have been allocated so that we can |
392 // fix back references when deserializing. | |
393 Address Allocate(int space_index, int size) { | |
394 Address address = high_water_[space_index]; | |
395 high_water_[space_index] = address + size; | |
396 return address; | |
397 } | |
398 | |
399 // This returns the address of an object that has been described in the | |
400 // snapshot as being offset bytes back in a particular space. | |
401 HeapObject* GetAddressFromEnd(int space) { | |
402 int offset = source_->GetInt(); | |
403 offset <<= kObjectAlignmentBits; | |
404 return HeapObject::FromAddress(high_water_[space] - offset); | |
405 } | |
406 | |
365 | 407 |
366 // Cached current isolate. | 408 // Cached current isolate. |
367 Isolate* isolate_; | 409 Isolate* isolate_; |
368 | 410 |
369 // Keep track of the pages in the paged spaces. | |
370 // (In large object space we are keeping track of individual objects | |
371 // rather than pages.) In new space we just need the address of the | |
372 // first object and the others will flow from that. | |
373 List<Address> pages_[SerializerDeserializer::kNumberOfSpaces]; | |
374 | |
375 SnapshotByteSource* source_; | 411 SnapshotByteSource* source_; |
376 // This is the address of the next object that will be allocated in each | 412 // This is the address of the next object that will be allocated in each |
377 // space. It is used to calculate the addresses of back-references. | 413 // space. It is used to calculate the addresses of back-references. |
378 Address high_water_[LAST_SPACE + 1]; | 414 Address high_water_[LAST_SPACE + 1]; |
379 // This is the address of the most recent object that was allocated. It | 415 |
380 // is used to set the location of the new page when we encounter a | 416 intptr_t reservations_[LAST_SPACE + 1]; |
381 // START_NEW_PAGE_SERIALIZATION tag. | 417 static const intptr_t kUninitializedReservation = -1; |
382 Address last_object_address_; | |
383 | 418 |
384 ExternalReferenceDecoder* external_reference_decoder_; | 419 ExternalReferenceDecoder* external_reference_decoder_; |
385 | 420 |
386 DISALLOW_COPY_AND_ASSIGN(Deserializer); | 421 DISALLOW_COPY_AND_ASSIGN(Deserializer); |
387 }; | 422 }; |
388 | 423 |
389 | 424 |
390 class SnapshotByteSink { | 425 class SnapshotByteSink { |
391 public: | 426 public: |
392 virtual ~SnapshotByteSink() { } | 427 virtual ~SnapshotByteSink() { } |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
454 | 489 |
455 // There can be only one serializer per V8 process. | 490 // There can be only one serializer per V8 process. |
456 class Serializer : public SerializerDeserializer { | 491 class Serializer : public SerializerDeserializer { |
457 public: | 492 public: |
458 explicit Serializer(SnapshotByteSink* sink); | 493 explicit Serializer(SnapshotByteSink* sink); |
459 ~Serializer(); | 494 ~Serializer(); |
460 void VisitPointers(Object** start, Object** end); | 495 void VisitPointers(Object** start, Object** end); |
461 // You can call this after serialization to find out how much space was used | 496 // You can call this after serialization to find out how much space was used |
462 // in each space. | 497 // in each space. |
463 int CurrentAllocationAddress(int space) { | 498 int CurrentAllocationAddress(int space) { |
464 if (SpaceIsLarge(space)) return large_object_total_; | 499 ASSERT(space < kNumberOfSpaces); |
465 return fullness_[space]; | 500 return fullness_[space]; |
466 } | 501 } |
467 | 502 |
468 static void Enable() { | 503 static void Enable() { |
469 if (!serialization_enabled_) { | 504 if (!serialization_enabled_) { |
470 ASSERT(!too_late_to_enable_now_); | 505 ASSERT(!too_late_to_enable_now_); |
471 } | 506 } |
472 serialization_enabled_ = true; | 507 serialization_enabled_ = true; |
473 } | 508 } |
474 | 509 |
475 static void Disable() { serialization_enabled_ = false; } | 510 static void Disable() { serialization_enabled_ = false; } |
476 // Call this when you have made use of the fact that there is no serialization | 511 // Call this when you have made use of the fact that there is no serialization |
477 // going on. | 512 // going on. |
478 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } | 513 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } |
479 static bool enabled() { return serialization_enabled_; } | 514 static bool enabled() { return serialization_enabled_; } |
480 SerializationAddressMapper* address_mapper() { return &address_mapper_; } | 515 SerializationAddressMapper* address_mapper() { return &address_mapper_; } |
481 void PutRoot( | 516 void PutRoot(int index, |
482 int index, HeapObject* object, HowToCode how, WhereToPoint where); | 517 HeapObject* object, |
518 HowToCode how, | |
519 WhereToPoint where, | |
520 int skip); | |
483 | 521 |
484 protected: | 522 protected: |
485 static const int kInvalidRootIndex = -1; | 523 static const int kInvalidRootIndex = -1; |
486 | 524 |
487 int RootIndex(HeapObject* heap_object, HowToCode from); | 525 int RootIndex(HeapObject* heap_object, HowToCode from); |
488 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; | 526 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; |
489 intptr_t root_index_wave_front() { return root_index_wave_front_; } | 527 intptr_t root_index_wave_front() { return root_index_wave_front_; } |
490 void set_root_index_wave_front(intptr_t value) { | 528 void set_root_index_wave_front(intptr_t value) { |
491 ASSERT(value >= root_index_wave_front_); | 529 ASSERT(value >= root_index_wave_front_); |
492 root_index_wave_front_ = value; | 530 root_index_wave_front_ = value; |
493 } | 531 } |
494 | 532 |
495 class ObjectSerializer : public ObjectVisitor { | 533 class ObjectSerializer : public ObjectVisitor { |
496 public: | 534 public: |
497 ObjectSerializer(Serializer* serializer, | 535 ObjectSerializer(Serializer* serializer, |
498 Object* o, | 536 Object* o, |
499 SnapshotByteSink* sink, | 537 SnapshotByteSink* sink, |
500 HowToCode how_to_code, | 538 HowToCode how_to_code, |
501 WhereToPoint where_to_point) | 539 WhereToPoint where_to_point) |
502 : serializer_(serializer), | 540 : serializer_(serializer), |
503 object_(HeapObject::cast(o)), | 541 object_(HeapObject::cast(o)), |
504 sink_(sink), | 542 sink_(sink), |
505 reference_representation_(how_to_code + where_to_point), | 543 reference_representation_(how_to_code + where_to_point), |
506 bytes_processed_so_far_(0) { } | 544 bytes_processed_so_far_(0), |
545 code_has_been_output_(false) { | |
546 code_object_ = o->IsCode(); | |
Yang
2012/09/13 08:47:59
I guess this could also be put into the initializa
Erik Corry
2012/09/13 12:13:35
Done.
| |
547 } | |
507 void Serialize(); | 548 void Serialize(); |
508 void VisitPointers(Object** start, Object** end); | 549 void VisitPointers(Object** start, Object** end); |
509 void VisitEmbeddedPointer(RelocInfo* target); | 550 void VisitEmbeddedPointer(RelocInfo* target); |
510 void VisitExternalReferences(Address* start, Address* end); | 551 void VisitExternalReferences(Address* start, Address* end); |
511 void VisitExternalReference(RelocInfo* rinfo); | 552 void VisitExternalReference(RelocInfo* rinfo); |
512 void VisitCodeTarget(RelocInfo* target); | 553 void VisitCodeTarget(RelocInfo* target); |
513 void VisitCodeEntry(Address entry_address); | 554 void VisitCodeEntry(Address entry_address); |
514 void VisitGlobalPropertyCell(RelocInfo* rinfo); | 555 void VisitGlobalPropertyCell(RelocInfo* rinfo); |
515 void VisitRuntimeEntry(RelocInfo* reloc); | 556 void VisitRuntimeEntry(RelocInfo* reloc); |
516 // Used for seralizing the external strings that hold the natives source. | 557 // Used for seralizing the external strings that hold the natives source. |
517 void VisitExternalAsciiString( | 558 void VisitExternalAsciiString( |
518 v8::String::ExternalAsciiStringResource** resource); | 559 v8::String::ExternalAsciiStringResource** resource); |
519 // We can't serialize a heap with external two byte strings. | 560 // We can't serialize a heap with external two byte strings. |
520 void VisitExternalTwoByteString( | 561 void VisitExternalTwoByteString( |
521 v8::String::ExternalStringResource** resource) { | 562 v8::String::ExternalStringResource** resource) { |
522 UNREACHABLE(); | 563 UNREACHABLE(); |
523 } | 564 } |
524 | 565 |
525 private: | 566 private: |
526 void OutputRawData(Address up_to); | 567 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; |
568 // This function outputs or skips the raw data between the last pointer and | |
569 // up to the current position. It optionally can just return the number of | |
570 // bytes to skip instead of performing a skip instruction, in case the skip | |
571 // can be merged into the next instruction. | |
572 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); | |
527 | 573 |
528 Serializer* serializer_; | 574 Serializer* serializer_; |
529 HeapObject* object_; | 575 HeapObject* object_; |
530 SnapshotByteSink* sink_; | 576 SnapshotByteSink* sink_; |
531 int reference_representation_; | 577 int reference_representation_; |
532 int bytes_processed_so_far_; | 578 int bytes_processed_so_far_; |
579 bool code_object_; | |
580 bool code_has_been_output_; | |
533 }; | 581 }; |
534 | 582 |
535 virtual void SerializeObject(Object* o, | 583 virtual void SerializeObject(Object* o, |
536 HowToCode how_to_code, | 584 HowToCode how_to_code, |
537 WhereToPoint where_to_point) = 0; | 585 WhereToPoint where_to_point, |
586 int skip) = 0; | |
538 void SerializeReferenceToPreviousObject( | 587 void SerializeReferenceToPreviousObject( |
539 int space, | 588 int space, |
540 int address, | 589 int address, |
541 HowToCode how_to_code, | 590 HowToCode how_to_code, |
542 WhereToPoint where_to_point); | 591 WhereToPoint where_to_point, |
592 int skip); | |
543 void InitializeAllocators(); | 593 void InitializeAllocators(); |
544 // This will return the space for an object. If the object is in large | 594 // This will return the space for an object. |
545 // object space it may return kLargeCode or kLargeFixedArray in order | |
546 // to indicate to the deserializer what kind of large object allocation | |
547 // to make. | |
548 static int SpaceOfObject(HeapObject* object); | 595 static int SpaceOfObject(HeapObject* object); |
549 // This just returns the space of the object. It will return LO_SPACE | 596 int Allocate(int space, int size); |
550 // for all large objects since you can't check the type of the object | |
551 // once the map has been used for the serialization address. | |
552 static int SpaceOfAlreadySerializedObject(HeapObject* object); | |
553 int Allocate(int space, int size, bool* new_page_started); | |
554 int EncodeExternalReference(Address addr) { | 597 int EncodeExternalReference(Address addr) { |
555 return external_reference_encoder_->Encode(addr); | 598 return external_reference_encoder_->Encode(addr); |
556 } | 599 } |
557 | 600 |
558 int SpaceAreaSize(int space); | 601 int SpaceAreaSize(int space); |
559 | 602 |
560 Isolate* isolate_; | 603 Isolate* isolate_; |
561 // Keep track of the fullness of each space in order to generate | 604 // Keep track of the fullness of each space in order to generate |
562 // relative addresses for back references. Large objects are | 605 // relative addresses for back references. |
563 // just numbered sequentially since relative addresses make no | |
564 // sense in large object space. | |
565 int fullness_[LAST_SPACE + 1]; | 606 int fullness_[LAST_SPACE + 1]; |
566 SnapshotByteSink* sink_; | 607 SnapshotByteSink* sink_; |
567 int current_root_index_; | 608 int current_root_index_; |
568 ExternalReferenceEncoder* external_reference_encoder_; | 609 ExternalReferenceEncoder* external_reference_encoder_; |
569 static bool serialization_enabled_; | 610 static bool serialization_enabled_; |
570 // Did we already make use of the fact that serialization was not enabled? | 611 // Did we already make use of the fact that serialization was not enabled? |
571 static bool too_late_to_enable_now_; | 612 static bool too_late_to_enable_now_; |
572 int large_object_total_; | |
573 SerializationAddressMapper address_mapper_; | 613 SerializationAddressMapper address_mapper_; |
574 intptr_t root_index_wave_front_; | 614 intptr_t root_index_wave_front_; |
615 void Pad(); | |
575 | 616 |
576 friend class ObjectSerializer; | 617 friend class ObjectSerializer; |
577 friend class Deserializer; | 618 friend class Deserializer; |
578 | 619 |
579 private: | 620 private: |
580 DISALLOW_COPY_AND_ASSIGN(Serializer); | 621 DISALLOW_COPY_AND_ASSIGN(Serializer); |
581 }; | 622 }; |
582 | 623 |
583 | 624 |
584 class PartialSerializer : public Serializer { | 625 class PartialSerializer : public Serializer { |
585 public: | 626 public: |
586 PartialSerializer(Serializer* startup_snapshot_serializer, | 627 PartialSerializer(Serializer* startup_snapshot_serializer, |
587 SnapshotByteSink* sink) | 628 SnapshotByteSink* sink) |
588 : Serializer(sink), | 629 : Serializer(sink), |
589 startup_serializer_(startup_snapshot_serializer) { | 630 startup_serializer_(startup_snapshot_serializer) { |
590 set_root_index_wave_front(Heap::kStrongRootListLength); | 631 set_root_index_wave_front(Heap::kStrongRootListLength); |
591 } | 632 } |
592 | 633 |
593 // Serialize the objects reachable from a single object pointer. | 634 // Serialize the objects reachable from a single object pointer. |
594 virtual void Serialize(Object** o); | 635 virtual void Serialize(Object** o); |
595 virtual void SerializeObject(Object* o, | 636 virtual void SerializeObject(Object* o, |
596 HowToCode how_to_code, | 637 HowToCode how_to_code, |
597 WhereToPoint where_to_point); | 638 WhereToPoint where_to_point, |
639 int skip); | |
598 | 640 |
599 protected: | 641 protected: |
600 virtual int PartialSnapshotCacheIndex(HeapObject* o); | 642 virtual int PartialSnapshotCacheIndex(HeapObject* o); |
601 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | 643 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
602 // Scripts should be referred only through shared function infos. We can't | 644 // Scripts should be referred only through shared function infos. We can't |
603 // allow them to be part of the partial snapshot because they contain a | 645 // allow them to be part of the partial snapshot because they contain a |
604 // unique ID, and deserializing several partial snapshots containing script | 646 // unique ID, and deserializing several partial snapshots containing script |
605 // would cause dupes. | 647 // would cause dupes. |
606 ASSERT(!o->IsScript()); | 648 ASSERT(!o->IsScript()); |
607 return o->IsString() || o->IsSharedFunctionInfo() || | 649 return o->IsString() || o->IsSharedFunctionInfo() || |
(...skipping 17 matching lines...) Expand all Loading... | |
625 // snapshot. | 667 // snapshot. |
626 Isolate::Current()->set_serialize_partial_snapshot_cache_length(0); | 668 Isolate::Current()->set_serialize_partial_snapshot_cache_length(0); |
627 } | 669 } |
628 // Serialize the current state of the heap. The order is: | 670 // Serialize the current state of the heap. The order is: |
629 // 1) Strong references. | 671 // 1) Strong references. |
630 // 2) Partial snapshot cache. | 672 // 2) Partial snapshot cache. |
631 // 3) Weak references (e.g. the symbol table). | 673 // 3) Weak references (e.g. the symbol table). |
632 virtual void SerializeStrongReferences(); | 674 virtual void SerializeStrongReferences(); |
633 virtual void SerializeObject(Object* o, | 675 virtual void SerializeObject(Object* o, |
634 HowToCode how_to_code, | 676 HowToCode how_to_code, |
635 WhereToPoint where_to_point); | 677 WhereToPoint where_to_point, |
678 int skip); | |
636 void SerializeWeakReferences(); | 679 void SerializeWeakReferences(); |
637 void Serialize() { | 680 void Serialize() { |
638 SerializeStrongReferences(); | 681 SerializeStrongReferences(); |
639 SerializeWeakReferences(); | 682 SerializeWeakReferences(); |
683 Pad(); | |
640 } | 684 } |
641 | 685 |
642 private: | 686 private: |
643 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | 687 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { |
644 return false; | 688 return false; |
645 } | 689 } |
646 }; | 690 }; |
647 | 691 |
648 | 692 |
649 } } // namespace v8::internal | 693 } } // namespace v8::internal |
650 | 694 |
651 #endif // V8_SERIALIZE_H_ | 695 #endif // V8_SERIALIZE_H_ |
OLD | NEW |