| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef V8_SERIALIZE_H_ | |
| 6 #define V8_SERIALIZE_H_ | |
| 7 | |
| 8 #include "src/hashmap.h" | |
| 9 #include "src/heap-profiler.h" | |
| 10 #include "src/isolate.h" | |
| 11 #include "src/snapshot-source-sink.h" | |
| 12 | |
| 13 namespace v8 { | |
| 14 namespace internal { | |
| 15 | |
| 16 class ScriptData; | |
| 17 | |
| 18 static const int kDeoptTableSerializeEntryCount = 64; | |
| 19 | |
| 20 // ExternalReferenceTable is a helper class that defines the relationship | |
| 21 // between external references and their encodings. It is used to build | |
| 22 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder. | |
| 23 class ExternalReferenceTable { | |
| 24 public: | |
| 25 static ExternalReferenceTable* instance(Isolate* isolate); | |
| 26 | |
| 27 int size() const { return refs_.length(); } | |
| 28 Address address(int i) { return refs_[i].address; } | |
| 29 const char* name(int i) { return refs_[i].name; } | |
| 30 | |
| 31 inline static Address NotAvailable() { return NULL; } | |
| 32 | |
| 33 private: | |
| 34 struct ExternalReferenceEntry { | |
| 35 Address address; | |
| 36 const char* name; | |
| 37 }; | |
| 38 | |
| 39 explicit ExternalReferenceTable(Isolate* isolate); | |
| 40 | |
| 41 void Add(Address address, const char* name) { | |
| 42 ExternalReferenceEntry entry = {address, name}; | |
| 43 refs_.Add(entry); | |
| 44 } | |
| 45 | |
| 46 List<ExternalReferenceEntry> refs_; | |
| 47 | |
| 48 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable); | |
| 49 }; | |
| 50 | |
| 51 | |
| 52 class ExternalReferenceEncoder { | |
| 53 public: | |
| 54 explicit ExternalReferenceEncoder(Isolate* isolate); | |
| 55 | |
| 56 uint32_t Encode(Address key) const; | |
| 57 | |
| 58 const char* NameOfAddress(Isolate* isolate, Address address) const; | |
| 59 | |
| 60 private: | |
| 61 static uint32_t Hash(Address key) { | |
| 62 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> | |
| 63 kPointerSizeLog2); | |
| 64 } | |
| 65 | |
| 66 HashMap* map_; | |
| 67 | |
| 68 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder); | |
| 69 }; | |
| 70 | |
| 71 | |
| 72 class AddressMapBase { | |
| 73 protected: | |
| 74 static void SetValue(HashMap::Entry* entry, uint32_t v) { | |
| 75 entry->value = reinterpret_cast<void*>(v); | |
| 76 } | |
| 77 | |
| 78 static uint32_t GetValue(HashMap::Entry* entry) { | |
| 79 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value)); | |
| 80 } | |
| 81 | |
| 82 inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj, | |
| 83 bool insert) { | |
| 84 return map->Lookup(Key(obj), Hash(obj), insert); | |
| 85 } | |
| 86 | |
| 87 private: | |
| 88 static uint32_t Hash(HeapObject* obj) { | |
| 89 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address())); | |
| 90 } | |
| 91 | |
| 92 static void* Key(HeapObject* obj) { | |
| 93 return reinterpret_cast<void*>(obj->address()); | |
| 94 } | |
| 95 }; | |
| 96 | |
| 97 | |
| 98 class RootIndexMap : public AddressMapBase { | |
| 99 public: | |
| 100 explicit RootIndexMap(Isolate* isolate); | |
| 101 | |
| 102 static const int kInvalidRootIndex = -1; | |
| 103 | |
| 104 int Lookup(HeapObject* obj) { | |
| 105 HashMap::Entry* entry = LookupEntry(map_, obj, false); | |
| 106 if (entry) return GetValue(entry); | |
| 107 return kInvalidRootIndex; | |
| 108 } | |
| 109 | |
| 110 private: | |
| 111 HashMap* map_; | |
| 112 | |
| 113 DISALLOW_COPY_AND_ASSIGN(RootIndexMap); | |
| 114 }; | |
| 115 | |
| 116 | |
| 117 class PartialCacheIndexMap : public AddressMapBase { | |
| 118 public: | |
| 119 PartialCacheIndexMap() : map_(HashMap::PointersMatch) {} | |
| 120 | |
| 121 static const int kInvalidIndex = -1; | |
| 122 | |
| 123 // Lookup object in the map. Return its index if found, or create | |
| 124 // a new entry with new_index as value, and return kInvalidIndex. | |
| 125 int LookupOrInsert(HeapObject* obj, int new_index) { | |
| 126 HashMap::Entry* entry = LookupEntry(&map_, obj, false); | |
| 127 if (entry != NULL) return GetValue(entry); | |
| 128 SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index)); | |
| 129 return kInvalidIndex; | |
| 130 } | |
| 131 | |
| 132 private: | |
| 133 HashMap map_; | |
| 134 | |
| 135 DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap); | |
| 136 }; | |
| 137 | |
| 138 | |
| 139 class BackReference { | |
| 140 public: | |
| 141 explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {} | |
| 142 | |
| 143 BackReference() : bitfield_(kInvalidValue) {} | |
| 144 | |
| 145 static BackReference SourceReference() { return BackReference(kSourceValue); } | |
| 146 | |
| 147 static BackReference GlobalProxyReference() { | |
| 148 return BackReference(kGlobalProxyValue); | |
| 149 } | |
| 150 | |
| 151 static BackReference LargeObjectReference(uint32_t index) { | |
| 152 return BackReference(SpaceBits::encode(LO_SPACE) | | |
| 153 ChunkOffsetBits::encode(index)); | |
| 154 } | |
| 155 | |
| 156 static BackReference Reference(AllocationSpace space, uint32_t chunk_index, | |
| 157 uint32_t chunk_offset) { | |
| 158 DCHECK(IsAligned(chunk_offset, kObjectAlignment)); | |
| 159 DCHECK_NE(LO_SPACE, space); | |
| 160 return BackReference( | |
| 161 SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) | | |
| 162 ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits)); | |
| 163 } | |
| 164 | |
| 165 bool is_valid() const { return bitfield_ != kInvalidValue; } | |
| 166 bool is_source() const { return bitfield_ == kSourceValue; } | |
| 167 bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; } | |
| 168 | |
| 169 AllocationSpace space() const { | |
| 170 DCHECK(is_valid()); | |
| 171 return SpaceBits::decode(bitfield_); | |
| 172 } | |
| 173 | |
| 174 uint32_t chunk_offset() const { | |
| 175 DCHECK(is_valid()); | |
| 176 return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits; | |
| 177 } | |
| 178 | |
| 179 uint32_t large_object_index() const { | |
| 180 DCHECK(is_valid()); | |
| 181 DCHECK(chunk_index() == 0); | |
| 182 return ChunkOffsetBits::decode(bitfield_); | |
| 183 } | |
| 184 | |
| 185 uint32_t chunk_index() const { | |
| 186 DCHECK(is_valid()); | |
| 187 return ChunkIndexBits::decode(bitfield_); | |
| 188 } | |
| 189 | |
| 190 uint32_t reference() const { | |
| 191 DCHECK(is_valid()); | |
| 192 return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask); | |
| 193 } | |
| 194 | |
| 195 uint32_t bitfield() const { return bitfield_; } | |
| 196 | |
| 197 private: | |
| 198 static const uint32_t kInvalidValue = 0xFFFFFFFF; | |
| 199 static const uint32_t kSourceValue = 0xFFFFFFFE; | |
| 200 static const uint32_t kGlobalProxyValue = 0xFFFFFFFD; | |
| 201 static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits; | |
| 202 static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize; | |
| 203 | |
| 204 public: | |
| 205 static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1; | |
| 206 | |
| 207 private: | |
| 208 class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {}; | |
| 209 class ChunkIndexBits | |
| 210 : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {}; | |
| 211 class SpaceBits | |
| 212 : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> { | |
| 213 }; | |
| 214 | |
| 215 uint32_t bitfield_; | |
| 216 }; | |
| 217 | |
| 218 | |
| 219 // Mapping objects to their location after deserialization. | |
| 220 // This is used during building, but not at runtime by V8. | |
| 221 class BackReferenceMap : public AddressMapBase { | |
| 222 public: | |
| 223 BackReferenceMap() | |
| 224 : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {} | |
| 225 | |
| 226 ~BackReferenceMap() { delete map_; } | |
| 227 | |
| 228 BackReference Lookup(HeapObject* obj) { | |
| 229 HashMap::Entry* entry = LookupEntry(map_, obj, false); | |
| 230 return entry ? BackReference(GetValue(entry)) : BackReference(); | |
| 231 } | |
| 232 | |
| 233 void Add(HeapObject* obj, BackReference b) { | |
| 234 DCHECK(b.is_valid()); | |
| 235 DCHECK_NULL(LookupEntry(map_, obj, false)); | |
| 236 HashMap::Entry* entry = LookupEntry(map_, obj, true); | |
| 237 SetValue(entry, b.bitfield()); | |
| 238 } | |
| 239 | |
| 240 void AddSourceString(String* string) { | |
| 241 Add(string, BackReference::SourceReference()); | |
| 242 } | |
| 243 | |
| 244 void AddGlobalProxy(HeapObject* global_proxy) { | |
| 245 Add(global_proxy, BackReference::GlobalProxyReference()); | |
| 246 } | |
| 247 | |
| 248 private: | |
| 249 DisallowHeapAllocation no_allocation_; | |
| 250 HashMap* map_; | |
| 251 DISALLOW_COPY_AND_ASSIGN(BackReferenceMap); | |
| 252 }; | |
| 253 | |
| 254 | |
| 255 class HotObjectsList { | |
| 256 public: | |
| 257 HotObjectsList() : index_(0) { | |
| 258 for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL; | |
| 259 } | |
| 260 | |
| 261 void Add(HeapObject* object) { | |
| 262 circular_queue_[index_] = object; | |
| 263 index_ = (index_ + 1) & kSizeMask; | |
| 264 } | |
| 265 | |
| 266 HeapObject* Get(int index) { | |
| 267 DCHECK_NOT_NULL(circular_queue_[index]); | |
| 268 return circular_queue_[index]; | |
| 269 } | |
| 270 | |
| 271 static const int kNotFound = -1; | |
| 272 | |
| 273 int Find(HeapObject* object) { | |
| 274 for (int i = 0; i < kSize; i++) { | |
| 275 if (circular_queue_[i] == object) return i; | |
| 276 } | |
| 277 return kNotFound; | |
| 278 } | |
| 279 | |
| 280 static const int kSize = 8; | |
| 281 | |
| 282 private: | |
| 283 STATIC_ASSERT(IS_POWER_OF_TWO(kSize)); | |
| 284 static const int kSizeMask = kSize - 1; | |
| 285 HeapObject* circular_queue_[kSize]; | |
| 286 int index_; | |
| 287 | |
| 288 DISALLOW_COPY_AND_ASSIGN(HotObjectsList); | |
| 289 }; | |
| 290 | |
| 291 | |
| 292 // The Serializer/Deserializer class is a common superclass for Serializer and | |
| 293 // Deserializer which is used to store common constants and methods used by | |
| 294 // both. | |
| 295 class SerializerDeserializer: public ObjectVisitor { | |
| 296 public: | |
| 297 static void Iterate(Isolate* isolate, ObjectVisitor* visitor); | |
| 298 | |
| 299 static int nop() { return kNop; } | |
| 300 | |
| 301 // No reservation for large object space necessary. | |
| 302 static const int kNumberOfPreallocatedSpaces = LO_SPACE; | |
| 303 static const int kNumberOfSpaces = LAST_SPACE + 1; | |
| 304 | |
| 305 protected: | |
| 306 // ---------- byte code range 0x00..0x7f ---------- | |
| 307 // Byte codes in this range represent Where, HowToCode and WhereToPoint. | |
| 308 // Where the pointed-to object can be found: | |
| 309 enum Where { | |
| 310 // 0x00..0x05 Allocate new object, in specified space. | |
| 311 kNewObject = 0, | |
| 312 // 0x06 Unused (including 0x26, 0x46, 0x66). | |
| 313 // 0x07 Unused (including 0x27, 0x47, 0x67). | |
| 314 // 0x08..0x0d Reference to previous object from space. | |
| 315 kBackref = 0x08, | |
| 316 // 0x0e Unused (including 0x2e, 0x4e, 0x6e). | |
| 317 // 0x0f Unused (including 0x2f, 0x4f, 0x6f). | |
| 318 // 0x10..0x15 Reference to previous object from space after skip. | |
| 319 kBackrefWithSkip = 0x10, | |
| 320 // 0x16 Unused (including 0x36, 0x56, 0x76). | |
| 321 // 0x17 Unused (including 0x37, 0x57, 0x77). | |
| 322 // 0x18 Root array item. | |
| 323 kRootArray = 0x18, | |
| 324 // 0x19 Object in the partial snapshot cache. | |
| 325 kPartialSnapshotCache = 0x19, | |
| 326 // 0x1a External reference referenced by id. | |
| 327 kExternalReference = 0x1a, | |
| 328 // 0x1b Object provided in the attached list. | |
| 329 kAttachedReference = 0x1b, | |
| 330 // 0x1c Builtin code referenced by index. | |
| 331 kBuiltin = 0x1c | |
| 332 // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f) | |
| 333 }; | |
| 334 | |
| 335 static const int kWhereMask = 0x1f; | |
| 336 static const int kSpaceMask = 7; | |
| 337 STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1); | |
| 338 | |
| 339 // How to code the pointer to the object. | |
| 340 enum HowToCode { | |
| 341 // Straight pointer. | |
| 342 kPlain = 0, | |
| 343 // A pointer inlined in code. What this means depends on the architecture. | |
| 344 kFromCode = 0x20 | |
| 345 }; | |
| 346 | |
| 347 static const int kHowToCodeMask = 0x20; | |
| 348 | |
| 349 // Where to point within the object. | |
| 350 enum WhereToPoint { | |
| 351 // Points to start of object | |
| 352 kStartOfObject = 0, | |
| 353 // Points to instruction in code object or payload of cell. | |
| 354 kInnerPointer = 0x40 | |
| 355 }; | |
| 356 | |
| 357 static const int kWhereToPointMask = 0x40; | |
| 358 | |
| 359 // ---------- Misc ---------- | |
| 360 // Skip. | |
| 361 static const int kSkip = 0x1d; | |
| 362 // Internal reference encoded as offsets of pc and target from code entry. | |
| 363 static const int kInternalReference = 0x1e; | |
| 364 static const int kInternalReferenceEncoded = 0x1f; | |
| 365 // Do nothing, used for padding. | |
| 366 static const int kNop = 0x3d; | |
| 367 // Move to next reserved chunk. | |
| 368 static const int kNextChunk = 0x3e; | |
| 369 // A tag emitted at strategic points in the snapshot to delineate sections. | |
| 370 // If the deserializer does not find these at the expected moments then it | |
| 371 // is an indication that the snapshot and the VM do not fit together. | |
| 372 // Examine the build process for architecture, version or configuration | |
| 373 // mismatches. | |
| 374 static const int kSynchronize = 0x5d; | |
| 375 // Used for the source code of the natives, which is in the executable, but | |
| 376 // is referred to from external strings in the snapshot. | |
| 377 static const int kNativesStringResource = 0x5e; | |
| 378 // Raw data of variable length. | |
| 379 static const int kVariableRawData = 0x7d; | |
| 380 // Repeats of variable length. | |
| 381 static const int kVariableRepeat = 0x7e; | |
| 382 | |
| 383 // ---------- byte code range 0x80..0xff ---------- | |
| 384 // First 32 root array items. | |
| 385 static const int kNumberOfRootArrayConstants = 0x20; | |
| 386 // 0x80..0x9f | |
| 387 static const int kRootArrayConstants = 0x80; | |
| 388 // 0xa0..0xbf | |
| 389 static const int kRootArrayConstantsWithSkip = 0xa0; | |
| 390 static const int kRootArrayConstantsMask = 0x1f; | |
| 391 | |
| 392 // 8 hot (recently seen or back-referenced) objects with optional skip. | |
| 393 static const int kNumberOfHotObjects = 0x08; | |
| 394 // 0xc0..0xc7 | |
| 395 static const int kHotObject = 0xc0; | |
| 396 // 0xc8..0xcf | |
| 397 static const int kHotObjectWithSkip = 0xc8; | |
| 398 static const int kHotObjectMask = 0x07; | |
| 399 | |
| 400 // 32 common raw data lengths. | |
| 401 static const int kNumberOfFixedRawData = 0x20; | |
| 402 // 0xd0..0xef | |
| 403 static const int kFixedRawData = 0xd0; | |
| 404 static const int kOnePointerRawData = kFixedRawData; | |
| 405 static const int kFixedRawDataStart = kFixedRawData - 1; | |
| 406 | |
| 407 // 16 repeats lengths. | |
| 408 static const int kNumberOfFixedRepeat = 0x10; | |
| 409 // 0xf0..0xff | |
| 410 static const int kFixedRepeat = 0xf0; | |
| 411 static const int kFixedRepeatStart = kFixedRepeat - 1; | |
| 412 | |
| 413 // ---------- special values ---------- | |
| 414 static const int kAnyOldSpace = -1; | |
| 415 | |
| 416 // Sentinel after a new object to indicate that double alignment is needed. | |
| 417 static const int kDoubleAlignmentSentinel = 0; | |
| 418 | |
| 419 // Used as index for the attached reference representing the source object. | |
| 420 static const int kSourceObjectReference = 0; | |
| 421 | |
| 422 // Used as index for the attached reference representing the global proxy. | |
| 423 static const int kGlobalProxyReference = 0; | |
| 424 | |
| 425 // ---------- member variable ---------- | |
| 426 HotObjectsList hot_objects_; | |
| 427 }; | |
| 428 | |
| 429 | |
| 430 class SerializedData { | |
| 431 public: | |
| 432 class Reservation { | |
| 433 public: | |
| 434 explicit Reservation(uint32_t size) | |
| 435 : reservation_(ChunkSizeBits::encode(size)) {} | |
| 436 | |
| 437 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); } | |
| 438 bool is_last() const { return IsLastChunkBits::decode(reservation_); } | |
| 439 | |
| 440 void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); } | |
| 441 | |
| 442 private: | |
| 443 uint32_t reservation_; | |
| 444 }; | |
| 445 | |
| 446 SerializedData(byte* data, int size) | |
| 447 : data_(data), size_(size), owns_data_(false) {} | |
| 448 SerializedData() : data_(NULL), size_(0), owns_data_(false) {} | |
| 449 | |
| 450 ~SerializedData() { | |
| 451 if (owns_data_) DeleteArray<byte>(data_); | |
| 452 } | |
| 453 | |
| 454 uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); } | |
| 455 | |
| 456 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {}; | |
| 457 class IsLastChunkBits : public BitField<bool, 31, 1> {}; | |
| 458 | |
| 459 static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) { | |
| 460 uint32_t external_refs = table->size(); | |
| 461 return 0xC0DE0000 ^ external_refs; | |
| 462 } | |
| 463 | |
| 464 protected: | |
| 465 void SetHeaderValue(int offset, uint32_t value) { | |
| 466 uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset); | |
| 467 memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value)); | |
| 468 } | |
| 469 | |
| 470 uint32_t GetHeaderValue(int offset) const { | |
| 471 uint32_t value; | |
| 472 memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value)); | |
| 473 return value; | |
| 474 } | |
| 475 | |
| 476 void AllocateData(int size); | |
| 477 | |
| 478 static uint32_t ComputeMagicNumber(Isolate* isolate) { | |
| 479 return ComputeMagicNumber(ExternalReferenceTable::instance(isolate)); | |
| 480 } | |
| 481 | |
| 482 void SetMagicNumber(Isolate* isolate) { | |
| 483 SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate)); | |
| 484 } | |
| 485 | |
| 486 static const int kMagicNumberOffset = 0; | |
| 487 | |
| 488 byte* data_; | |
| 489 int size_; | |
| 490 bool owns_data_; | |
| 491 }; | |
| 492 | |
| 493 | |
| 494 // A Deserializer reads a snapshot and reconstructs the Object graph it defines. | |
| 495 class Deserializer: public SerializerDeserializer { | |
| 496 public: | |
| 497 // Create a deserializer from a snapshot byte source. | |
| 498 template <class Data> | |
| 499 explicit Deserializer(Data* data) | |
| 500 : isolate_(NULL), | |
| 501 source_(data->Payload()), | |
| 502 magic_number_(data->GetMagicNumber()), | |
| 503 external_reference_table_(NULL), | |
| 504 deserialized_large_objects_(0), | |
| 505 deserializing_user_code_(false) { | |
| 506 DecodeReservation(data->Reservations()); | |
| 507 } | |
| 508 | |
| 509 virtual ~Deserializer(); | |
| 510 | |
| 511 // Deserialize the snapshot into an empty heap. | |
| 512 void Deserialize(Isolate* isolate); | |
| 513 | |
| 514 // Deserialize a single object and the objects reachable from it. | |
| 515 MaybeHandle<Object> DeserializePartial( | |
| 516 Isolate* isolate, Handle<JSGlobalProxy> global_proxy, | |
| 517 Handle<FixedArray>* outdated_contexts_out); | |
| 518 | |
| 519 // Deserialize a shared function info. Fail gracefully. | |
| 520 MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate); | |
| 521 | |
| 522 void FlushICacheForNewCodeObjects(); | |
| 523 | |
| 524 // Pass a vector of externally-provided objects referenced by the snapshot. | |
| 525 // The ownership to its backing store is handed over as well. | |
| 526 void SetAttachedObjects(Vector<Handle<Object> > attached_objects) { | |
| 527 attached_objects_ = attached_objects; | |
| 528 } | |
| 529 | |
| 530 private: | |
| 531 virtual void VisitPointers(Object** start, Object** end); | |
| 532 | |
| 533 virtual void VisitRuntimeEntry(RelocInfo* rinfo) { | |
| 534 UNREACHABLE(); | |
| 535 } | |
| 536 | |
| 537 void Initialize(Isolate* isolate); | |
| 538 | |
| 539 bool deserializing_user_code() { return deserializing_user_code_; } | |
| 540 | |
| 541 void DecodeReservation(Vector<const SerializedData::Reservation> res); | |
| 542 | |
| 543 bool ReserveSpace(); | |
| 544 | |
| 545 void UnalignedCopy(Object** dest, Object** src) { | |
| 546 memcpy(dest, src, sizeof(*src)); | |
| 547 } | |
| 548 | |
| 549 // Allocation sites are present in the snapshot, and must be linked into | |
| 550 // a list at deserialization time. | |
| 551 void RelinkAllocationSite(AllocationSite* site); | |
| 552 | |
| 553 // Fills in some heap data in an area from start to end (non-inclusive). The | |
| 554 // space id is used for the write barrier. The object_address is the address | |
| 555 // of the object we are writing into, or NULL if we are not writing into an | |
| 556 // object, i.e. if we are writing a series of tagged values that are not on | |
| 557 // the heap. | |
| 558 void ReadData(Object** start, Object** end, int space, | |
| 559 Address object_address); | |
| 560 void ReadObject(int space_number, Object** write_back); | |
| 561 Address Allocate(int space_index, int size); | |
| 562 | |
| 563 // Special handling for serialized code like hooking up internalized strings. | |
| 564 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj); | |
| 565 | |
| 566 // This returns the address of an object that has been described in the | |
| 567 // snapshot by chunk index and offset. | |
| 568 HeapObject* GetBackReferencedObject(int space); | |
| 569 | |
| 570 // Cached current isolate. | |
| 571 Isolate* isolate_; | |
| 572 | |
| 573 // Objects from the attached object descriptions in the serialized user code. | |
| 574 Vector<Handle<Object> > attached_objects_; | |
| 575 | |
| 576 SnapshotByteSource source_; | |
| 577 uint32_t magic_number_; | |
| 578 | |
| 579 // The address of the next object that will be allocated in each space. | |
| 580 // Each space has a number of chunks reserved by the GC, with each chunk | |
| 581 // fitting into a page. Deserialized objects are allocated into the | |
| 582 // current chunk of the target space by bumping up high water mark. | |
| 583 Heap::Reservation reservations_[kNumberOfSpaces]; | |
| 584 uint32_t current_chunk_[kNumberOfPreallocatedSpaces]; | |
| 585 Address high_water_[kNumberOfPreallocatedSpaces]; | |
| 586 | |
| 587 ExternalReferenceTable* external_reference_table_; | |
| 588 | |
| 589 List<HeapObject*> deserialized_large_objects_; | |
| 590 | |
| 591 bool deserializing_user_code_; | |
| 592 | |
| 593 DISALLOW_COPY_AND_ASSIGN(Deserializer); | |
| 594 }; | |
| 595 | |
| 596 | |
| 597 class CodeAddressMap; | |
| 598 | |
| 599 // There can be only one serializer per V8 process. | |
| 600 class Serializer : public SerializerDeserializer { | |
| 601 public: | |
| 602 Serializer(Isolate* isolate, SnapshotByteSink* sink); | |
| 603 ~Serializer(); | |
| 604 void VisitPointers(Object** start, Object** end) OVERRIDE; | |
| 605 | |
| 606 void EncodeReservations(List<SerializedData::Reservation>* out) const; | |
| 607 | |
| 608 Isolate* isolate() const { return isolate_; } | |
| 609 | |
| 610 BackReferenceMap* back_reference_map() { return &back_reference_map_; } | |
| 611 RootIndexMap* root_index_map() { return &root_index_map_; } | |
| 612 | |
| 613 protected: | |
| 614 class ObjectSerializer : public ObjectVisitor { | |
| 615 public: | |
| 616 ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink, | |
| 617 HowToCode how_to_code, WhereToPoint where_to_point) | |
| 618 : serializer_(serializer), | |
| 619 object_(HeapObject::cast(o)), | |
| 620 sink_(sink), | |
| 621 reference_representation_(how_to_code + where_to_point), | |
| 622 bytes_processed_so_far_(0), | |
| 623 is_code_object_(o->IsCode()), | |
| 624 code_has_been_output_(false) {} | |
| 625 void Serialize(); | |
| 626 void VisitPointers(Object** start, Object** end); | |
| 627 void VisitEmbeddedPointer(RelocInfo* target); | |
| 628 void VisitExternalReference(Address* p); | |
| 629 void VisitExternalReference(RelocInfo* rinfo); | |
| 630 void VisitInternalReference(RelocInfo* rinfo); | |
| 631 void VisitCodeTarget(RelocInfo* target); | |
| 632 void VisitCodeEntry(Address entry_address); | |
| 633 void VisitCell(RelocInfo* rinfo); | |
| 634 void VisitRuntimeEntry(RelocInfo* reloc); | |
| 635 // Used for seralizing the external strings that hold the natives source. | |
| 636 void VisitExternalOneByteString( | |
| 637 v8::String::ExternalOneByteStringResource** resource); | |
| 638 // We can't serialize a heap with external two byte strings. | |
| 639 void VisitExternalTwoByteString( | |
| 640 v8::String::ExternalStringResource** resource) { | |
| 641 UNREACHABLE(); | |
| 642 } | |
| 643 | |
| 644 private: | |
| 645 void SerializePrologue(AllocationSpace space, int size, Map* map); | |
| 646 | |
| 647 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn }; | |
| 648 // This function outputs or skips the raw data between the last pointer and | |
| 649 // up to the current position. It optionally can just return the number of | |
| 650 // bytes to skip instead of performing a skip instruction, in case the skip | |
| 651 // can be merged into the next instruction. | |
| 652 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn); | |
| 653 // External strings are serialized in a way to resemble sequential strings. | |
| 654 void SerializeExternalString(); | |
| 655 | |
| 656 Address PrepareCode(); | |
| 657 | |
| 658 Serializer* serializer_; | |
| 659 HeapObject* object_; | |
| 660 SnapshotByteSink* sink_; | |
| 661 int reference_representation_; | |
| 662 int bytes_processed_so_far_; | |
| 663 bool is_code_object_; | |
| 664 bool code_has_been_output_; | |
| 665 }; | |
| 666 | |
| 667 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, | |
| 668 WhereToPoint where_to_point, int skip) = 0; | |
| 669 | |
| 670 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where, | |
| 671 int skip); | |
| 672 | |
| 673 // Returns true if the object was successfully serialized. | |
| 674 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, | |
| 675 WhereToPoint where_to_point, int skip); | |
| 676 | |
| 677 inline void FlushSkip(int skip) { | |
| 678 if (skip != 0) { | |
| 679 sink_->Put(kSkip, "SkipFromSerializeObject"); | |
| 680 sink_->PutInt(skip, "SkipDistanceFromSerializeObject"); | |
| 681 } | |
| 682 } | |
| 683 | |
| 684 bool BackReferenceIsAlreadyAllocated(BackReference back_reference); | |
| 685 | |
| 686 // This will return the space for an object. | |
| 687 BackReference AllocateLargeObject(int size); | |
| 688 BackReference Allocate(AllocationSpace space, int size); | |
| 689 int EncodeExternalReference(Address addr) { | |
| 690 return external_reference_encoder_.Encode(addr); | |
| 691 } | |
| 692 | |
| 693 // GetInt reads 4 bytes at once, requiring padding at the end. | |
| 694 void Pad(); | |
| 695 | |
| 696 // Some roots should not be serialized, because their actual value depends on | |
| 697 // absolute addresses and they are reset after deserialization, anyway. | |
| 698 bool ShouldBeSkipped(Object** current); | |
| 699 | |
| 700 // We may not need the code address map for logging for every instance | |
| 701 // of the serializer. Initialize it on demand. | |
| 702 void InitializeCodeAddressMap(); | |
| 703 | |
| 704 Code* CopyCode(Code* code); | |
| 705 | |
| 706 inline uint32_t max_chunk_size(int space) const { | |
| 707 DCHECK_LE(0, space); | |
| 708 DCHECK_LT(space, kNumberOfSpaces); | |
| 709 return max_chunk_size_[space]; | |
| 710 } | |
| 711 | |
| 712 SnapshotByteSink* sink() const { return sink_; } | |
| 713 | |
| 714 Isolate* isolate_; | |
| 715 | |
| 716 SnapshotByteSink* sink_; | |
| 717 ExternalReferenceEncoder external_reference_encoder_; | |
| 718 | |
| 719 BackReferenceMap back_reference_map_; | |
| 720 RootIndexMap root_index_map_; | |
| 721 | |
| 722 friend class Deserializer; | |
| 723 friend class ObjectSerializer; | |
| 724 friend class SnapshotData; | |
| 725 | |
| 726 private: | |
| 727 CodeAddressMap* code_address_map_; | |
| 728 // Objects from the same space are put into chunks for bulk-allocation | |
| 729 // when deserializing. We have to make sure that each chunk fits into a | |
| 730 // page. So we track the chunk size in pending_chunk_ of a space, but | |
| 731 // when it exceeds a page, we complete the current chunk and start a new one. | |
| 732 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces]; | |
| 733 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces]; | |
| 734 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces]; | |
| 735 | |
| 736 // We map serialized large objects to indexes for back-referencing. | |
| 737 uint32_t large_objects_total_size_; | |
| 738 uint32_t seen_large_objects_index_; | |
| 739 | |
| 740 List<byte> code_buffer_; | |
| 741 | |
| 742 DISALLOW_COPY_AND_ASSIGN(Serializer); | |
| 743 }; | |
| 744 | |
| 745 | |
| 746 class PartialSerializer : public Serializer { | |
| 747 public: | |
| 748 PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer, | |
| 749 SnapshotByteSink* sink) | |
| 750 : Serializer(isolate, sink), | |
| 751 startup_serializer_(startup_snapshot_serializer), | |
| 752 outdated_contexts_(0), | |
| 753 global_object_(NULL) { | |
| 754 InitializeCodeAddressMap(); | |
| 755 } | |
| 756 | |
| 757 // Serialize the objects reachable from a single object pointer. | |
| 758 void Serialize(Object** o); | |
| 759 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, | |
| 760 WhereToPoint where_to_point, int skip) OVERRIDE; | |
| 761 | |
| 762 private: | |
| 763 int PartialSnapshotCacheIndex(HeapObject* o); | |
| 764 bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { | |
| 765 // Scripts should be referred only through shared function infos. We can't | |
| 766 // allow them to be part of the partial snapshot because they contain a | |
| 767 // unique ID, and deserializing several partial snapshots containing script | |
| 768 // would cause dupes. | |
| 769 DCHECK(!o->IsScript()); | |
| 770 return o->IsName() || o->IsSharedFunctionInfo() || | |
| 771 o->IsHeapNumber() || o->IsCode() || | |
| 772 o->IsScopeInfo() || | |
| 773 o->map() == | |
| 774 startup_serializer_->isolate()->heap()->fixed_cow_array_map(); | |
| 775 } | |
| 776 | |
| 777 void SerializeOutdatedContextsAsFixedArray(); | |
| 778 | |
| 779 Serializer* startup_serializer_; | |
| 780 List<BackReference> outdated_contexts_; | |
| 781 Object* global_object_; | |
| 782 PartialCacheIndexMap partial_cache_index_map_; | |
| 783 DISALLOW_COPY_AND_ASSIGN(PartialSerializer); | |
| 784 }; | |
| 785 | |
| 786 | |
| 787 class StartupSerializer : public Serializer { | |
| 788 public: | |
| 789 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink) | |
| 790 : Serializer(isolate, sink), root_index_wave_front_(0) { | |
| 791 // Clear the cache of objects used by the partial snapshot. After the | |
| 792 // strong roots have been serialized we can create a partial snapshot | |
| 793 // which will repopulate the cache with objects needed by that partial | |
| 794 // snapshot. | |
| 795 isolate->partial_snapshot_cache()->Clear(); | |
| 796 InitializeCodeAddressMap(); | |
| 797 } | |
| 798 | |
| 799 // The StartupSerializer has to serialize the root array, which is slightly | |
| 800 // different. | |
| 801 void VisitPointers(Object** start, Object** end) OVERRIDE; | |
| 802 | |
| 803 // Serialize the current state of the heap. The order is: | |
| 804 // 1) Strong references. | |
| 805 // 2) Partial snapshot cache. | |
| 806 // 3) Weak references (e.g. the string table). | |
| 807 virtual void SerializeStrongReferences(); | |
| 808 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, | |
| 809 WhereToPoint where_to_point, int skip) OVERRIDE; | |
| 810 void SerializeWeakReferences(); | |
| 811 void Serialize() { | |
| 812 SerializeStrongReferences(); | |
| 813 SerializeWeakReferences(); | |
| 814 Pad(); | |
| 815 } | |
| 816 | |
| 817 private: | |
| 818 intptr_t root_index_wave_front_; | |
| 819 DISALLOW_COPY_AND_ASSIGN(StartupSerializer); | |
| 820 }; | |
| 821 | |
| 822 | |
| 823 class CodeSerializer : public Serializer { | |
| 824 public: | |
| 825 static ScriptData* Serialize(Isolate* isolate, | |
| 826 Handle<SharedFunctionInfo> info, | |
| 827 Handle<String> source); | |
| 828 | |
| 829 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize( | |
| 830 Isolate* isolate, ScriptData* cached_data, Handle<String> source); | |
| 831 | |
| 832 static const int kSourceObjectIndex = 0; | |
| 833 STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex); | |
| 834 | |
| 835 static const int kCodeStubsBaseIndex = 1; | |
| 836 | |
| 837 String* source() const { | |
| 838 DCHECK(!AllowHeapAllocation::IsAllowed()); | |
| 839 return source_; | |
| 840 } | |
| 841 | |
| 842 const List<uint32_t>* stub_keys() const { return &stub_keys_; } | |
| 843 int num_internalized_strings() const { return num_internalized_strings_; } | |
| 844 | |
| 845 private: | |
| 846 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source, | |
| 847 Code* main_code) | |
| 848 : Serializer(isolate, sink), | |
| 849 source_(source), | |
| 850 main_code_(main_code), | |
| 851 num_internalized_strings_(0) { | |
| 852 back_reference_map_.AddSourceString(source); | |
| 853 } | |
| 854 | |
| 855 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code, | |
| 856 WhereToPoint where_to_point, int skip) OVERRIDE; | |
| 857 | |
| 858 void SerializeBuiltin(int builtin_index, HowToCode how_to_code, | |
| 859 WhereToPoint where_to_point); | |
| 860 void SerializeIC(Code* ic, HowToCode how_to_code, | |
| 861 WhereToPoint where_to_point); | |
| 862 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code, | |
| 863 WhereToPoint where_to_point); | |
| 864 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code, | |
| 865 WhereToPoint where_to_point); | |
| 866 int AddCodeStubKey(uint32_t stub_key); | |
| 867 | |
| 868 DisallowHeapAllocation no_gc_; | |
| 869 String* source_; | |
| 870 Code* main_code_; | |
| 871 int num_internalized_strings_; | |
| 872 List<uint32_t> stub_keys_; | |
| 873 DISALLOW_COPY_AND_ASSIGN(CodeSerializer); | |
| 874 }; | |
| 875 | |
| 876 | |
| 877 // Wrapper around reservation sizes and the serialization payload. | |
| 878 class SnapshotData : public SerializedData { | |
| 879 public: | |
| 880 // Used when producing. | |
| 881 explicit SnapshotData(const Serializer& ser); | |
| 882 | |
| 883 // Used when consuming. | |
| 884 explicit SnapshotData(const Vector<const byte> snapshot) | |
| 885 : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) { | |
| 886 CHECK(IsSane()); | |
| 887 } | |
| 888 | |
| 889 Vector<const Reservation> Reservations() const; | |
| 890 Vector<const byte> Payload() const; | |
| 891 | |
| 892 Vector<const byte> RawData() const { | |
| 893 return Vector<const byte>(data_, size_); | |
| 894 } | |
| 895 | |
| 896 private: | |
| 897 bool IsSane(); | |
| 898 | |
| 899 // The data header consists of uint32_t-sized entries: | |
| 900 // [0] magic number and external reference count | |
| 901 // [1] version hash | |
| 902 // [2] number of reservation size entries | |
| 903 // [3] payload length | |
| 904 // ... reservations | |
| 905 // ... serialized payload | |
| 906 static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size; | |
| 907 static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size; | |
| 908 static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size; | |
| 909 static const int kHeaderSize = kPayloadLengthOffset + kInt32Size; | |
| 910 }; | |
| 911 | |
| 912 | |
| 913 // Wrapper around ScriptData to provide code-serializer-specific functionality. | |
| 914 class SerializedCodeData : public SerializedData { | |
| 915 public: | |
| 916 // Used when consuming. | |
| 917 static SerializedCodeData* FromCachedData(Isolate* isolate, | |
| 918 ScriptData* cached_data, | |
| 919 String* source); | |
| 920 | |
| 921 // Used when producing. | |
| 922 SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs); | |
| 923 | |
| 924 // Return ScriptData object and relinquish ownership over it to the caller. | |
| 925 ScriptData* GetScriptData(); | |
| 926 | |
| 927 Vector<const Reservation> Reservations() const; | |
| 928 Vector<const byte> Payload() const; | |
| 929 | |
| 930 int NumInternalizedStrings() const; | |
| 931 Vector<const uint32_t> CodeStubKeys() const; | |
| 932 | |
| 933 private: | |
| 934 explicit SerializedCodeData(ScriptData* data); | |
| 935 | |
| 936 enum SanityCheckResult { | |
| 937 CHECK_SUCCESS = 0, | |
| 938 MAGIC_NUMBER_MISMATCH = 1, | |
| 939 VERSION_MISMATCH = 2, | |
| 940 SOURCE_MISMATCH = 3, | |
| 941 CPU_FEATURES_MISMATCH = 4, | |
| 942 FLAGS_MISMATCH = 5, | |
| 943 CHECKSUM_MISMATCH = 6 | |
| 944 }; | |
| 945 | |
| 946 SanityCheckResult SanityCheck(Isolate* isolate, String* source) const; | |
| 947 | |
| 948 uint32_t SourceHash(String* source) const { return source->length(); } | |
| 949 | |
| 950 // The data header consists of uint32_t-sized entries: | |
| 951 // [ 0] magic number and external reference count | |
| 952 // [ 1] version hash | |
| 953 // [ 2] source hash | |
| 954 // [ 3] cpu features | |
| 955 // [ 4] flag hash | |
| 956 // [ 5] number of internalized strings | |
| 957 // [ 6] number of code stub keys | |
| 958 // [ 7] number of reservation size entries | |
| 959 // [ 8] payload length | |
| 960 // [ 9] payload checksum part 1 | |
| 961 // [10] payload checksum part 2 | |
| 962 // ... reservations | |
| 963 // ... code stub keys | |
| 964 // ... serialized payload | |
| 965 static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size; | |
| 966 static const int kSourceHashOffset = kVersionHashOffset + kInt32Size; | |
| 967 static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size; | |
| 968 static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size; | |
| 969 static const int kNumInternalizedStringsOffset = kFlagHashOffset + kInt32Size; | |
| 970 static const int kNumReservationsOffset = | |
| 971 kNumInternalizedStringsOffset + kInt32Size; | |
| 972 static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size; | |
| 973 static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size; | |
| 974 static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size; | |
| 975 static const int kChecksum2Offset = kChecksum1Offset + kInt32Size; | |
| 976 static const int kHeaderSize = kChecksum2Offset + kInt32Size; | |
| 977 }; | |
| 978 } } // namespace v8::internal | |
| 979 | |
| 980 #endif // V8_SERIALIZE_H_ | |
| OLD | NEW |