| Index: src/snapshot/serialize.cc
|
| diff --git a/src/snapshot/serialize.cc b/src/snapshot/serialize.cc
|
| index b19e4388af07e3076f4978c083849c5401bc78dd..dbe92a6accb129520b22838e09ea8a7f3ee485a1 100644
|
| --- a/src/snapshot/serialize.cc
|
| +++ b/src/snapshot/serialize.cc
|
| @@ -560,7 +560,6 @@
|
| isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
|
| isolate_->heap()->RepairFreeListsAfterDeserialization();
|
| isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
|
| - DeserializeDeferredObjects();
|
|
|
| isolate_->heap()->set_native_contexts_list(
|
| isolate_->heap()->undefined_value());
|
| @@ -610,7 +609,6 @@
|
| Object* outdated_contexts;
|
| VisitPointer(&root);
|
| VisitPointer(&outdated_contexts);
|
| - DeserializeDeferredObjects();
|
|
|
| // There's no code deserialized here. If this assert fires
|
| // then that's changed and logging should be added to notify
|
| @@ -633,7 +631,6 @@
|
| DisallowHeapAllocation no_gc;
|
| Object* root;
|
| VisitPointer(&root);
|
| - DeserializeDeferredObjects();
|
| return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
|
| }
|
| }
|
| @@ -655,22 +652,13 @@
|
| }
|
|
|
|
|
| -void Deserializer::DeserializeDeferredObjects() {
|
| - for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
|
| - int space = code & kSpaceMask;
|
| - DCHECK(space <= kNumberOfSpaces);
|
| - DCHECK(code - space == kNewObject);
|
| - HeapObject* object = GetBackReferencedObject(space);
|
| - int size = source_.GetInt() << kPointerSizeLog2;
|
| - Address obj_address = object->address();
|
| - Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
|
| - Object** end = reinterpret_cast<Object**>(obj_address + size);
|
| - bool filled = ReadData(start, end, space, obj_address);
|
| - CHECK(filled);
|
| - if (object->IsAllocationSite()) {
|
| - RelinkAllocationSite(AllocationSite::cast(object));
|
| - }
|
| - }
|
| +void Deserializer::RelinkAllocationSite(AllocationSite* site) {
|
| + if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
|
| + site->set_weak_next(isolate_->heap()->undefined_value());
|
| + } else {
|
| + site->set_weak_next(isolate_->heap()->allocation_sites_list());
|
| + }
|
| + isolate_->heap()->set_allocation_sites_list(site);
|
| }
|
|
|
|
|
| @@ -705,8 +693,7 @@
|
| };
|
|
|
|
|
| -HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj) {
|
| - DCHECK(deserializing_user_code());
|
| +HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
|
| if (obj->IsString()) {
|
| String* string = String::cast(obj);
|
| // Uninitialize hash field as the hash seed may have changed.
|
| @@ -721,27 +708,8 @@
|
| }
|
| } else if (obj->IsScript()) {
|
| Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
|
| - } else {
|
| - DCHECK(CanBeDeferred(obj));
|
| }
|
| return obj;
|
| -}
|
| -
|
| -
|
| -void Deserializer::RelinkAllocationSite(AllocationSite* obj) {
|
| - DCHECK(obj->IsAllocationSite());
|
| - // Allocation sites are present in the snapshot, and must be linked into
|
| - // a list at deserialization time.
|
| - AllocationSite* site = AllocationSite::cast(obj);
|
| - // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
| - // as a (weak) root. If this root is relocated correctly,
|
| - // RelinkAllocationSite() isn't necessary.
|
| - if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
|
| - site->set_weak_next(isolate_->heap()->undefined_value());
|
| - } else {
|
| - site->set_weak_next(isolate_->heap()->allocation_sites_list());
|
| - }
|
| - isolate_->heap()->set_allocation_sites_list(site);
|
| }
|
|
|
|
|
| @@ -800,21 +768,24 @@
|
| if (FLAG_log_snapshot_positions) {
|
| LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
|
| }
|
| -
|
| - if (ReadData(current, limit, space_number, address)) {
|
| - // Only post process if object content has not been deferred.
|
| - if (obj->IsAllocationSite()) {
|
| - RelinkAllocationSite(AllocationSite::cast(obj));
|
| - }
|
| - }
|
| -
|
| - if (deserializing_user_code()) obj = PostProcessNewObject(obj);
|
| + ReadData(current, limit, space_number, address);
|
| +
|
| + // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
| + // as a (weak) root. If this root is relocated correctly,
|
| + // RelinkAllocationSite() isn't necessary.
|
| + if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
|
| +
|
| + // Fix up strings from serialized user code.
|
| + if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
|
|
|
| Object* write_back_obj = obj;
|
| UnalignedCopy(write_back, &write_back_obj);
|
| #ifdef DEBUG
|
| if (obj->IsCode()) {
|
| DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
|
| +#ifdef VERIFY_HEAP
|
| + obj->ObjectVerify();
|
| +#endif // VERIFY_HEAP
|
| } else {
|
| DCHECK(space_number != CODE_SPACE);
|
| }
|
| @@ -858,7 +829,7 @@
|
| }
|
|
|
|
|
| -bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| +void Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| Address current_object_address) {
|
| Isolate* const isolate = isolate_;
|
| // Write barrier support costs around 1% in startup time. In fact there
|
| @@ -1115,14 +1086,6 @@
|
| break;
|
| }
|
|
|
| - case kDeferred: {
|
| - // Deferred can only occur right after the heap object header.
|
| - DCHECK(current == reinterpret_cast<Object**>(current_object_address +
|
| - kPointerSize));
|
| - current = limit;
|
| - return false;
|
| - }
|
| -
|
| case kSynchronize:
|
| // If we get here then that indicates that you have a mismatch between
|
| // the number of GC roots when serializing and deserializing.
|
| @@ -1229,7 +1192,6 @@
|
| }
|
| }
|
| CHECK_EQ(limit, current);
|
| - return true;
|
| }
|
|
|
|
|
| @@ -1238,7 +1200,6 @@
|
| sink_(sink),
|
| external_reference_encoder_(isolate),
|
| root_index_map_(isolate),
|
| - recursion_depth_(0),
|
| code_address_map_(NULL),
|
| large_objects_total_size_(0),
|
| seen_large_objects_index_(0) {
|
| @@ -1311,16 +1272,6 @@
|
| #undef PRINT_INSTANCE_TYPE
|
| PrintF("\n");
|
| #endif // OBJECT_PRINT
|
| -}
|
| -
|
| -
|
| -void Serializer::SerializeDeferredObjects() {
|
| - while (deferred_objects_.length() > 0) {
|
| - HeapObject* obj = deferred_objects_.RemoveLast();
|
| - ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
|
| - obj_serializer.SerializeDeferred();
|
| - }
|
| - sink_->Put(kSynchronize, "Finished with deferred objects");
|
| }
|
|
|
|
|
| @@ -1368,7 +1319,6 @@
|
| }
|
| VisitPointer(o);
|
| SerializeOutdatedContextsAsFixedArray();
|
| - SerializeDeferredObjects();
|
| Pad();
|
| }
|
|
|
| @@ -1392,10 +1342,10 @@
|
| sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
|
| }
|
| for (int i = 0; i < length; i++) {
|
| - Context* context = outdated_contexts_[i];
|
| - BackReference back_reference = back_reference_map_.Lookup(context);
|
| - sink_->Put(kBackref + back_reference.space(), "BackRef");
|
| - PutBackReference(context, back_reference);
|
| + BackReference back_ref = outdated_contexts_[i];
|
| + DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
|
| + sink_->Put(kBackref + back_ref.space(), "BackRef");
|
| + sink_->PutInt(back_ref.reference(), "BackRefValue");
|
| }
|
| }
|
| }
|
| @@ -1558,7 +1508,10 @@
|
| "BackRefWithSkip");
|
| sink_->PutInt(skip, "BackRefSkipDistance");
|
| }
|
| - PutBackReference(obj, back_reference);
|
| + DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
|
| + sink_->PutInt(back_reference.reference(), "BackRefValue");
|
| +
|
| + hot_objects_.Add(obj);
|
| }
|
| return true;
|
| }
|
| @@ -1594,7 +1547,7 @@
|
| }
|
|
|
|
|
| -void StartupSerializer::SerializeWeakReferencesAndDeferred() {
|
| +void StartupSerializer::SerializeWeakReferences() {
|
| // This phase comes right after the serialization (of the snapshot).
|
| // After we have done the partial serialization the partial snapshot cache
|
| // will contain some references needed to decode the partial snapshot. We
|
| @@ -1603,7 +1556,6 @@
|
| Object* undefined = isolate()->heap()->undefined_value();
|
| VisitPointer(&undefined);
|
| isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
|
| - SerializeDeferredObjects();
|
| Pad();
|
| }
|
|
|
| @@ -1636,13 +1588,6 @@
|
| }
|
|
|
|
|
| -void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
|
| - DCHECK(BackReferenceIsAlreadyAllocated(reference));
|
| - sink_->PutInt(reference.reference(), "BackRefValue");
|
| - hot_objects_.Add(object);
|
| -}
|
| -
|
| -
|
| void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
| WhereToPoint where_to_point, int skip) {
|
| if (obj->IsMap()) {
|
| @@ -1696,7 +1641,9 @@
|
| Context::cast(obj)->global_object() == global_object_) {
|
| // Context refers to the current global object. This reference will
|
| // become outdated after deserialization.
|
| - outdated_contexts_.Add(Context::cast(obj));
|
| + BackReference back_reference = back_reference_map_.Lookup(obj);
|
| + DCHECK(back_reference.is_valid());
|
| + outdated_contexts_.Add(back_reference);
|
| }
|
| }
|
|
|
| @@ -1863,39 +1810,6 @@
|
| CHECK_EQ(0, bytes_processed_so_far_);
|
| bytes_processed_so_far_ = kPointerSize;
|
|
|
| - RecursionScope recursion(serializer_);
|
| - // Objects that are immediately post processed during deserialization
|
| - // cannot be deferred, since post processing requires the object content.
|
| - if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
|
| - serializer_->QueueDeferredObject(object_);
|
| - sink_->Put(kDeferred, "Deferring object content");
|
| - return;
|
| - }
|
| -
|
| - object_->IterateBody(map->instance_type(), size, this);
|
| - OutputRawData(object_->address() + size);
|
| -}
|
| -
|
| -
|
| -void Serializer::ObjectSerializer::SerializeDeferred() {
|
| - if (FLAG_trace_serializer) {
|
| - PrintF(" Encoding deferred heap object: ");
|
| - object_->ShortPrint();
|
| - PrintF("\n");
|
| - }
|
| -
|
| - int size = object_->Size();
|
| - Map* map = object_->map();
|
| - BackReference reference = serializer_->back_reference_map()->Lookup(object_);
|
| -
|
| - // Serialize the rest of the object.
|
| - CHECK_EQ(0, bytes_processed_so_far_);
|
| - bytes_processed_so_far_ = kPointerSize;
|
| -
|
| - sink_->Put(kNewObject + reference.space(), "deferred object");
|
| - serializer_->PutBackReference(object_, reference);
|
| - sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
|
| -
|
| object_->IterateBody(map->instance_type(), size, this);
|
| OutputRawData(object_->address() + size);
|
| }
|
| @@ -2220,7 +2134,6 @@
|
| DisallowHeapAllocation no_gc;
|
| Object** location = Handle<Object>::cast(info).location();
|
| cs.VisitPointer(location);
|
| - cs.SerializeDeferredObjects();
|
| cs.Pad();
|
|
|
| SerializedCodeData data(sink.data(), cs);
|
|
|