Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/serialize.h

Issue 6880010: Merge (7265, 7271] from bleeding_edge to experimental/gc branch.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 HashMap encodings_; 72 HashMap encodings_;
73 static uint32_t Hash(Address key) { 73 static uint32_t Hash(Address key) {
74 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2); 74 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
75 } 75 }
76 76
77 int IndexOf(Address key) const; 77 int IndexOf(Address key) const;
78 78
79 static bool Match(void* key1, void* key2) { return key1 == key2; } 79 static bool Match(void* key1, void* key2) { return key1 == key2; }
80 80
81 void Put(Address key, int index); 81 void Put(Address key, int index);
82
83 Isolate* isolate_;
82 }; 84 };
83 85
84 86
85 class ExternalReferenceDecoder { 87 class ExternalReferenceDecoder {
86 public: 88 public:
87 ExternalReferenceDecoder(); 89 ExternalReferenceDecoder();
88 ~ExternalReferenceDecoder(); 90 ~ExternalReferenceDecoder();
89 91
90 Address Decode(uint32_t key) const { 92 Address Decode(uint32_t key) const {
91 if (key == 0) return NULL; 93 if (key == 0) return NULL;
92 return *Lookup(key); 94 return *Lookup(key);
93 } 95 }
94 96
95 private: 97 private:
96 Address** encodings_; 98 Address** encodings_;
97 99
98 Address* Lookup(uint32_t key) const { 100 Address* Lookup(uint32_t key) const {
99 int type = key >> kReferenceTypeShift; 101 int type = key >> kReferenceTypeShift;
100 ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount); 102 ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
101 int id = key & kReferenceIdMask; 103 int id = key & kReferenceIdMask;
102 return &encodings_[type][id]; 104 return &encodings_[type][id];
103 } 105 }
104 106
105 void Put(uint32_t key, Address value) { 107 void Put(uint32_t key, Address value) {
106 *Lookup(key) = value; 108 *Lookup(key) = value;
107 } 109 }
110
111 Isolate* isolate_;
108 }; 112 };
109 113
110 114
111 class SnapshotByteSource { 115 class SnapshotByteSource {
112 public: 116 public:
113 SnapshotByteSource(const byte* array, int length) 117 SnapshotByteSource(const byte* array, int length)
114 : data_(array), length_(length), position_(0) { } 118 : data_(array), length_(length), position_(0) { }
115 119
116 bool HasMore() { return position_ < length_; } 120 bool HasMore() { return position_ < length_; }
117 121
(...skipping 19 matching lines...) Expand all
137 }; 141 };
138 142
139 143
140 // It is very common to have a reference to objects at certain offsets in the 144 // It is very common to have a reference to objects at certain offsets in the
141 // heap. These offsets have been determined experimentally. We code 145 // heap. These offsets have been determined experimentally. We code
142 // references to such objects in a single byte that encodes the way the pointer 146 // references to such objects in a single byte that encodes the way the pointer
143 // is written (only plain pointers allowed), the space number and the offset. 147 // is written (only plain pointers allowed), the space number and the offset.
144 // This only works for objects in the first page of a space. Don't use this for 148 // This only works for objects in the first page of a space. Don't use this for
145 // things in newspace since it bypasses the write barrier. 149 // things in newspace since it bypasses the write barrier.
146 150
147 static const int k64 = (sizeof(uintptr_t) - 4) / 4; 151 RLYSTC const int k64 = (sizeof(uintptr_t) - 4) / 4;
148 152
149 #define COMMON_REFERENCE_PATTERNS(f) \ 153 #define COMMON_REFERENCE_PATTERNS(f) \
150 f(kNumberOfSpaces, 2, (11 - k64)) \ 154 f(kNumberOfSpaces, 2, (11 - k64)) \
151 f((kNumberOfSpaces + 1), 2, 0) \ 155 f((kNumberOfSpaces + 1), 2, 0) \
152 f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \ 156 f((kNumberOfSpaces + 2), 2, (142 - 16 * k64)) \
153 f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \ 157 f((kNumberOfSpaces + 3), 2, (74 - 15 * k64)) \
154 f((kNumberOfSpaces + 4), 2, 5) \ 158 f((kNumberOfSpaces + 4), 2, 5) \
155 f((kNumberOfSpaces + 5), 1, 135) \ 159 f((kNumberOfSpaces + 5), 1, 135) \
156 f((kNumberOfSpaces + 6), 2, (228 - 39 * k64)) 160 f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
157 161
(...skipping 12 matching lines...) Expand all
170 f(12, 24) \ 174 f(12, 24) \
171 f(13, 28) \ 175 f(13, 28) \
172 f(14, 32) \ 176 f(14, 32) \
173 f(15, 36) 177 f(15, 36)
174 178
175 // The Serializer/Deserializer class is a common superclass for Serializer and 179 // The Serializer/Deserializer class is a common superclass for Serializer and
176 // Deserializer which is used to store common constants and methods used by 180 // Deserializer which is used to store common constants and methods used by
177 // both. 181 // both.
178 class SerializerDeserializer: public ObjectVisitor { 182 class SerializerDeserializer: public ObjectVisitor {
179 public: 183 public:
180 static void Iterate(ObjectVisitor* visitor); 184 RLYSTC void Iterate(ObjectVisitor* visitor);
181 static void SetSnapshotCacheSize(int size); 185 RLYSTC void SetSnapshotCacheSize(int size);
182 186
183 protected: 187 protected:
184 // Where the pointed-to object can be found: 188 // Where the pointed-to object can be found:
185 enum Where { 189 enum Where {
186 kNewObject = 0, // Object is next in snapshot. 190 kNewObject = 0, // Object is next in snapshot.
187 // 1-8 One per space. 191 // 1-8 One per space.
188 kRootArray = 0x9, // Object is found in root array. 192 kRootArray = 0x9, // Object is found in root array.
189 kPartialSnapshotCache = 0xa, // Object is in the cache. 193 kPartialSnapshotCache = 0xa, // Object is in the cache.
190 kExternalReference = 0xb, // Pointer to an external reference. 194 kExternalReference = 0xb, // Pointer to an external reference.
191 kSkip = 0xc, // Skip a pointer sized cell. 195 kSkip = 0xc, // Skip a pointer sized cell.
(...skipping 18 matching lines...) Expand all
210 214
211 // Where to point within the object. 215 // Where to point within the object.
212 enum WhereToPoint { 216 enum WhereToPoint {
213 kStartOfObject = 0, 217 kStartOfObject = 0,
214 kFirstInstruction = 0x80, 218 kFirstInstruction = 0x80,
215 kWhereToPointMask = 0x80 219 kWhereToPointMask = 0x80
216 }; 220 };
217 221
218 // Misc. 222 // Misc.
219 // Raw data to be copied from the snapshot. 223 // Raw data to be copied from the snapshot.
220 static const int kRawData = 0x30; 224 RLYSTC const int kRawData = 0x30;
221 // Some common raw lengths: 0x31-0x3f 225 // Some common raw lengths: 0x31-0x3f
222 // A tag emitted at strategic points in the snapshot to delineate sections. 226 // A tag emitted at strategic points in the snapshot to delineate sections.
223 // If the deserializer does not find these at the expected moments then it 227 // If the deserializer does not find these at the expected moments then it
224 // is an indication that the snapshot and the VM do not fit together. 228 // is an indication that the snapshot and the VM do not fit together.
225 // Examine the build process for architecture, version or configuration 229 // Examine the build process for architecture, version or configuration
226 // mismatches. 230 // mismatches.
227 static const int kSynchronize = 0x70; 231 RLYSTC const int kSynchronize = 0x70;
228 // Used for the source code of the natives, which is in the executable, but 232 // Used for the source code of the natives, which is in the executable, but
229 // is referred to from external strings in the snapshot. 233 // is referred to from external strings in the snapshot.
230 static const int kNativesStringResource = 0x71; 234 RLYSTC const int kNativesStringResource = 0x71;
231 static const int kNewPage = 0x72; 235 RLYSTC const int kNewPage = 0x72;
232 // 0x73-0x7f Free. 236 // 0x73-0x7f Free.
233 // 0xb0-0xbf Free. 237 // 0xb0-0xbf Free.
234 // 0xf0-0xff Free. 238 // 0xf0-0xff Free.
235 239
236 240
237 static const int kLargeData = LAST_SPACE; 241 RLYSTC const int kLargeData = LAST_SPACE;
238 static const int kLargeCode = kLargeData + 1; 242 RLYSTC const int kLargeCode = kLargeData + 1;
239 static const int kLargeFixedArray = kLargeCode + 1; 243 RLYSTC const int kLargeFixedArray = kLargeCode + 1;
240 static const int kNumberOfSpaces = kLargeFixedArray + 1; 244 RLYSTC const int kNumberOfSpaces = kLargeFixedArray + 1;
241 static const int kAnyOldSpace = -1; 245 RLYSTC const int kAnyOldSpace = -1;
242 246
243 // A bitmask for getting the space out of an instruction. 247 // A bitmask for getting the space out of an instruction.
244 static const int kSpaceMask = 15; 248 RLYSTC const int kSpaceMask = 15;
245 249
246 static inline bool SpaceIsLarge(int space) { return space >= kLargeData; } 250 RLYSTC inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
247 static inline bool SpaceIsPaged(int space) { 251 RLYSTC inline bool SpaceIsPaged(int space) {
248 return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE; 252 return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
249 } 253 }
250
251 static int partial_snapshot_cache_length_;
252 static const int kPartialSnapshotCacheCapacity = 1400;
253 static Object* partial_snapshot_cache_[];
254 }; 254 };
255 255
256 256
257 int SnapshotByteSource::GetInt() { 257 int SnapshotByteSource::GetInt() {
258 // A little unwind to catch the really small ints. 258 // A little unwind to catch the really small ints.
259 int snapshot_byte = Get(); 259 int snapshot_byte = Get();
260 if ((snapshot_byte & 0x80) == 0) { 260 if ((snapshot_byte & 0x80) == 0) {
261 return snapshot_byte; 261 return snapshot_byte;
262 } 262 }
263 int accumulator = (snapshot_byte & 0x7f) << 7; 263 int accumulator = (snapshot_byte & 0x7f) << 7;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
307 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { 307 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
308 UNREACHABLE(); 308 UNREACHABLE();
309 } 309 }
310 310
311 void ReadChunk(Object** start, Object** end, int space, Address address); 311 void ReadChunk(Object** start, Object** end, int space, Address address);
312 HeapObject* GetAddressFromStart(int space); 312 HeapObject* GetAddressFromStart(int space);
313 inline HeapObject* GetAddressFromEnd(int space); 313 inline HeapObject* GetAddressFromEnd(int space);
314 Address Allocate(int space_number, Space* space, int size); 314 Address Allocate(int space_number, Space* space, int size);
315 void ReadObject(int space_number, Space* space, Object** write_back); 315 void ReadObject(int space_number, Space* space, Object** write_back);
316 316
317 // Cached current isolate.
318 Isolate* isolate_;
319
317 // Keep track of the pages in the paged spaces. 320 // Keep track of the pages in the paged spaces.
318 // (In large object space we are keeping track of individual objects 321 // (In large object space we are keeping track of individual objects
319 // rather than pages.) In new space we just need the address of the 322 // rather than pages.) In new space we just need the address of the
320 // first object and the others will flow from that. 323 // first object and the others will flow from that.
321 List<Address> pages_[SerializerDeserializer::kNumberOfSpaces]; 324 List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
322 325
323 SnapshotByteSource* source_; 326 SnapshotByteSource* source_;
324 static ExternalReferenceDecoder* external_reference_decoder_;
325 // This is the address of the next object that will be allocated in each 327 // This is the address of the next object that will be allocated in each
326 // space. It is used to calculate the addresses of back-references. 328 // space. It is used to calculate the addresses of back-references.
327 Address high_water_[LAST_SPACE + 1]; 329 Address high_water_[LAST_SPACE + 1];
328 // This is the address of the most recent object that was allocated. It 330 // This is the address of the most recent object that was allocated. It
329 // is used to set the location of the new page when we encounter a 331 // is used to set the location of the new page when we encounter a
330 // START_NEW_PAGE_SERIALIZATION tag. 332 // START_NEW_PAGE_SERIALIZATION tag.
331 Address last_object_address_; 333 Address last_object_address_;
332 334
335 ExternalReferenceDecoder* external_reference_decoder_;
336
333 DISALLOW_COPY_AND_ASSIGN(Deserializer); 337 DISALLOW_COPY_AND_ASSIGN(Deserializer);
334 }; 338 };
335 339
336 340
337 class SnapshotByteSink { 341 class SnapshotByteSink {
338 public: 342 public:
339 virtual ~SnapshotByteSink() { } 343 virtual ~SnapshotByteSink() { }
340 virtual void Put(int byte, const char* description) = 0; 344 virtual void Put(int byte, const char* description) = 0;
341 virtual void PutSection(int byte, const char* description) { 345 virtual void PutSection(int byte, const char* description) {
342 Put(byte, description); 346 Put(byte, description);
(...skipping 27 matching lines...) Expand all
370 } 374 }
371 375
372 void AddMapping(HeapObject* obj, int to) { 376 void AddMapping(HeapObject* obj, int to) {
373 ASSERT(!IsMapped(obj)); 377 ASSERT(!IsMapped(obj));
374 HashMap::Entry* entry = 378 HashMap::Entry* entry =
375 serialization_map_->Lookup(Key(obj), Hash(obj), true); 379 serialization_map_->Lookup(Key(obj), Hash(obj), true);
376 entry->value = Value(to); 380 entry->value = Value(to);
377 } 381 }
378 382
379 private: 383 private:
380 static bool SerializationMatchFun(void* key1, void* key2) { 384 RLYSTC bool SerializationMatchFun(void* key1, void* key2) {
381 return key1 == key2; 385 return key1 == key2;
382 } 386 }
383 387
384 static uint32_t Hash(HeapObject* obj) { 388 RLYSTC uint32_t Hash(HeapObject* obj) {
385 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); 389 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
386 } 390 }
387 391
388 static void* Key(HeapObject* obj) { 392 RLYSTC void* Key(HeapObject* obj) {
389 return reinterpret_cast<void*>(obj->address()); 393 return reinterpret_cast<void*>(obj->address());
390 } 394 }
391 395
392 static void* Value(int v) { 396 RLYSTC void* Value(int v) {
393 return reinterpret_cast<void*>(v); 397 return reinterpret_cast<void*>(v);
394 } 398 }
395 399
396 HashMap* serialization_map_; 400 HashMap* serialization_map_;
397 AssertNoAllocation* no_allocation_; 401 AssertNoAllocation* no_allocation_;
398 DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper); 402 DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
399 }; 403 };
400 404
401 405
402 class Serializer : public SerializerDeserializer { 406 // There can be only one serializer per V8 process.
407 STATIC_CLASS Serializer : public SerializerDeserializer {
403 public: 408 public:
404 explicit Serializer(SnapshotByteSink* sink); 409 explicit Serializer(SnapshotByteSink* sink);
405 ~Serializer(); 410 ~Serializer();
406 void VisitPointers(Object** start, Object** end); 411 void VisitPointers(Object** start, Object** end);
407 // You can call this after serialization to find out how much space was used 412 // You can call this after serialization to find out how much space was used
408 // in each space. 413 // in each space.
409 int CurrentAllocationAddress(int space) { 414 int CurrentAllocationAddress(int space) {
410 if (SpaceIsLarge(space)) return large_object_total_; 415 if (SpaceIsLarge(space)) return large_object_total_;
411 return fullness_[space]; 416 return fullness_[space];
412 } 417 }
413 418
414 static void Enable() { 419 RLYSTC void Enable() {
415 if (!serialization_enabled_) { 420 if (!serialization_enabled_) {
416 ASSERT(!too_late_to_enable_now_); 421 ASSERT(!too_late_to_enable_now_);
417 } 422 }
418 serialization_enabled_ = true; 423 serialization_enabled_ = true;
419 } 424 }
420 425
421 static void Disable() { serialization_enabled_ = false; } 426 RLYSTC void Disable() { serialization_enabled_ = false; }
422 // Call this when you have made use of the fact that there is no serialization 427 // Call this when you have made use of the fact that there is no serialization
423 // going on. 428 // going on.
424 static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } 429 RLYSTC void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
425 static bool enabled() { return serialization_enabled_; } 430 RLYSTC bool enabled() { return serialization_enabled_; }
426 SerializationAddressMapper* address_mapper() { return &address_mapper_; } 431 SerializationAddressMapper* address_mapper() { return &address_mapper_; }
427 #ifdef DEBUG 432 #ifdef DEBUG
428 virtual void Synchronize(const char* tag); 433 virtual void Synchronize(const char* tag);
429 #endif 434 #endif
430 435
431 protected: 436 protected:
432 static const int kInvalidRootIndex = -1; 437 RLYSTC const int kInvalidRootIndex = -1;
433 virtual int RootIndex(HeapObject* heap_object) = 0; 438 virtual int RootIndex(HeapObject* heap_object) = 0;
434 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0; 439 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
435 440
436 class ObjectSerializer : public ObjectVisitor { 441 class ObjectSerializer : public ObjectVisitor {
437 public: 442 public:
438 ObjectSerializer(Serializer* serializer, 443 ObjectSerializer(Serializer* serializer,
439 Object* o, 444 Object* o,
440 SnapshotByteSink* sink, 445 SnapshotByteSink* sink,
441 HowToCode how_to_code, 446 HowToCode how_to_code,
442 WhereToPoint where_to_point) 447 WhereToPoint where_to_point)
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
477 void SerializeReferenceToPreviousObject( 482 void SerializeReferenceToPreviousObject(
478 int space, 483 int space,
479 int address, 484 int address,
480 HowToCode how_to_code, 485 HowToCode how_to_code,
481 WhereToPoint where_to_point); 486 WhereToPoint where_to_point);
482 void InitializeAllocators(); 487 void InitializeAllocators();
483 // This will return the space for an object. If the object is in large 488 // This will return the space for an object. If the object is in large
484 // object space it may return kLargeCode or kLargeFixedArray in order 489 // object space it may return kLargeCode or kLargeFixedArray in order
485 // to indicate to the deserializer what kind of large object allocation 490 // to indicate to the deserializer what kind of large object allocation
486 // to make. 491 // to make.
487 static int SpaceOfObject(HeapObject* object); 492 RLYSTC int SpaceOfObject(HeapObject* object);
488 // This just returns the space of the object. It will return LO_SPACE 493 // This just returns the space of the object. It will return LO_SPACE
489 // for all large objects since you can't check the type of the object 494 // for all large objects since you can't check the type of the object
490 // once the map has been used for the serialization address. 495 // once the map has been used for the serialization address.
491 static int SpaceOfAlreadySerializedObject(HeapObject* object); 496 RLYSTC int SpaceOfAlreadySerializedObject(HeapObject* object);
492 int Allocate(int space, int size, bool* new_page_started); 497 int Allocate(int space, int size, bool* new_page_started);
493 int EncodeExternalReference(Address addr) { 498 int EncodeExternalReference(Address addr) {
494 return external_reference_encoder_->Encode(addr); 499 return external_reference_encoder_->Encode(addr);
495 } 500 }
496 501
497 // Keep track of the fullness of each space in order to generate 502 // Keep track of the fullness of each space in order to generate
498 // relative addresses for back references. Large objects are 503 // relative addresses for back references. Large objects are
499 // just numbered sequentially since relative addresses make no 504 // just numbered sequentially since relative addresses make no
500 // sense in large object space. 505 // sense in large object space.
501 int fullness_[LAST_SPACE + 1]; 506 int fullness_[LAST_SPACE + 1];
502 SnapshotByteSink* sink_; 507 SnapshotByteSink* sink_;
503 int current_root_index_; 508 int current_root_index_;
504 ExternalReferenceEncoder* external_reference_encoder_; 509 ExternalReferenceEncoder* external_reference_encoder_;
505 static bool serialization_enabled_; 510 RLYSTC bool serialization_enabled_;
506 // Did we already make use of the fact that serialization was not enabled? 511 // Did we already make use of the fact that serialization was not enabled?
507 static bool too_late_to_enable_now_; 512 RLYSTC bool too_late_to_enable_now_;
508 int large_object_total_; 513 int large_object_total_;
509 SerializationAddressMapper address_mapper_; 514 SerializationAddressMapper address_mapper_;
510 515
511 friend class ObjectSerializer; 516 friend class ObjectSerializer;
512 friend class Deserializer; 517 friend class Deserializer;
513 518
514 DISALLOW_COPY_AND_ASSIGN(Serializer); 519 DISALLOW_COPY_AND_ASSIGN(Serializer);
515 }; 520 };
516 521
517 522
(...skipping 15 matching lines...) Expand all
533 virtual int RootIndex(HeapObject* o); 538 virtual int RootIndex(HeapObject* o);
534 virtual int PartialSnapshotCacheIndex(HeapObject* o); 539 virtual int PartialSnapshotCacheIndex(HeapObject* o);
535 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { 540 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
536 // Scripts should be referred only through shared function infos. We can't 541 // Scripts should be referred only through shared function infos. We can't
537 // allow them to be part of the partial snapshot because they contain a 542 // allow them to be part of the partial snapshot because they contain a
538 // unique ID, and deserializing several partial snapshots containing script 543 // unique ID, and deserializing several partial snapshots containing script
539 // would cause dupes. 544 // would cause dupes.
540 ASSERT(!o->IsScript()); 545 ASSERT(!o->IsScript());
541 return o->IsString() || o->IsSharedFunctionInfo() || 546 return o->IsString() || o->IsSharedFunctionInfo() ||
542 o->IsHeapNumber() || o->IsCode() || 547 o->IsHeapNumber() || o->IsCode() ||
543 o->map() == Heap::fixed_cow_array_map(); 548 o->map() == HEAP->fixed_cow_array_map();
544 } 549 }
545 550
546 private: 551 private:
547 Serializer* startup_serializer_; 552 Serializer* startup_serializer_;
548 DISALLOW_COPY_AND_ASSIGN(PartialSerializer); 553 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
549 }; 554 };
550 555
551 556
552 class StartupSerializer : public Serializer { 557 class StartupSerializer : public Serializer {
553 public: 558 public:
554 explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) { 559 explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
555 // Clear the cache of objects used by the partial snapshot. After the 560 // Clear the cache of objects used by the partial snapshot. After the
556 // strong roots have been serialized we can create a partial snapshot 561 // strong roots have been serialized we can create a partial snapshot
557 // which will repopulate the cache with objects neede by that partial 562 // which will repopulate the cache with objects neede by that partial
558 // snapshot. 563 // snapshot.
559 partial_snapshot_cache_length_ = 0; 564 Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
560 } 565 }
561 // Serialize the current state of the heap. The order is: 566 // Serialize the current state of the heap. The order is:
562 // 1) Strong references. 567 // 1) Strong references.
563 // 2) Partial snapshot cache. 568 // 2) Partial snapshot cache.
564 // 3) Weak references (eg the symbol table). 569 // 3) Weak references (eg the symbol table).
565 virtual void SerializeStrongReferences(); 570 virtual void SerializeStrongReferences();
566 virtual void SerializeObject(Object* o, 571 virtual void SerializeObject(Object* o,
567 HowToCode how_to_code, 572 HowToCode how_to_code,
568 WhereToPoint where_to_point); 573 WhereToPoint where_to_point);
569 void SerializeWeakReferences(); 574 void SerializeWeakReferences();
570 void Serialize() { 575 void Serialize() {
571 SerializeStrongReferences(); 576 SerializeStrongReferences();
572 SerializeWeakReferences(); 577 SerializeWeakReferences();
573 } 578 }
574 579
575 private: 580 private:
576 virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; } 581 virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
577 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { 582 virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
578 return false; 583 return false;
579 } 584 }
580 }; 585 };
581 586
582 587
583 } } // namespace v8::internal 588 } } // namespace v8::internal
584 589
585 #endif // V8_SERIALIZE_H_ 590 #endif // V8_SERIALIZE_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698