| Index: src/snapshot/serialize.cc
|
| diff --git a/src/snapshot/serialize.cc b/src/snapshot/serialize.cc
|
| index 55685be2785713ef97345842aa3b765e04869363..7f123a3fbed622040a5d7cc5fab7bd85a1560faa 100644
|
| --- a/src/snapshot/serialize.cc
|
| +++ b/src/snapshot/serialize.cc
|
| @@ -516,10 +516,18 @@ void Deserializer::DecodeReservation(
|
|
|
|
|
| void Deserializer::FlushICacheForNewCodeObjects() {
|
| - PageIterator it(isolate_->heap()->code_space());
|
| - while (it.has_next()) {
|
| - Page* p = it.next();
|
| - CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
|
| + if (!deserializing_user_code_) {
|
| + // The entire isolate is newly deserialized. Simply flush all code pages.
|
| + PageIterator it(isolate_->heap()->code_space());
|
| + while (it.has_next()) {
|
| + Page* p = it.next();
|
| + CpuFeatures::FlushICache(p->area_start(),
|
| + p->area_end() - p->area_start());
|
| + }
|
| + }
|
| + for (Code* code : new_code_objects_) {
|
| + CpuFeatures::FlushICache(code->instruction_start(),
|
| + code->instruction_size());
|
| }
|
| }
|
|
|
| @@ -556,10 +564,15 @@ void Deserializer::Deserialize(Isolate* isolate) {
|
| DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
|
| // No active handles.
|
| DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
|
| - isolate_->heap()->IterateSmiRoots(this);
|
| - isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
|
| - isolate_->heap()->RepairFreeListsAfterDeserialization();
|
| - isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
|
| +
|
| + {
|
| + DisallowHeapAllocation no_gc;
|
| + isolate_->heap()->IterateSmiRoots(this);
|
| + isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
|
| + isolate_->heap()->RepairFreeListsAfterDeserialization();
|
| + isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
|
| + DeserializeDeferredObjects();
|
| + }
|
|
|
| isolate_->heap()->set_native_contexts_list(
|
| isolate_->heap()->undefined_value());
|
| @@ -608,11 +621,12 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
|
| Object* root;
|
| Object* outdated_contexts;
|
| VisitPointer(&root);
|
| + DeserializeDeferredObjects();
|
| VisitPointer(&outdated_contexts);
|
|
|
| - // There's no code deserialized here. If this assert fires
|
| - // then that's changed and logging should be added to notify
|
| - // the profiler et al of the new code.
|
| + // There's no code deserialized here. If this assert fires then that's
|
| + // changed and logging should be added to notify the profiler et al of the
|
| + // new code, which also has to be flushed from instruction cache.
|
| CHECK_EQ(start_address, code_space->top());
|
| CHECK(outdated_contexts->IsFixedArray());
|
| *outdated_contexts_out =
|
| @@ -628,10 +642,17 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
|
| return Handle<SharedFunctionInfo>();
|
| } else {
|
| deserializing_user_code_ = true;
|
| - DisallowHeapAllocation no_gc;
|
| - Object* root;
|
| - VisitPointer(&root);
|
| - return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
|
| + HandleScope scope(isolate);
|
| + Handle<SharedFunctionInfo> result;
|
| + {
|
| + DisallowHeapAllocation no_gc;
|
| + Object* root;
|
| + VisitPointer(&root);
|
| + DeserializeDeferredObjects();
|
| + result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
|
| + }
|
| + CommitNewInternalizedStrings(isolate);
|
| + return scope.CloseAndEscape(result);
|
| }
|
| }
|
|
|
| @@ -652,13 +673,21 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
|
| }
|
|
|
|
|
| -void Deserializer::RelinkAllocationSite(AllocationSite* site) {
|
| - if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
|
| - site->set_weak_next(isolate_->heap()->undefined_value());
|
| - } else {
|
| - site->set_weak_next(isolate_->heap()->allocation_sites_list());
|
| +void Deserializer::DeserializeDeferredObjects() {
|
| + for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
|
| + int space = code & kSpaceMask;
|
| + DCHECK(space <= kNumberOfSpaces);
|
| + DCHECK(code - space == kNewObject);
|
| + HeapObject* object = GetBackReferencedObject(space);
|
| + int size = source_.GetInt() << kPointerSizeLog2;
|
| + Address obj_address = object->address();
|
| + Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
|
| + Object** end = reinterpret_cast<Object**>(obj_address + size);
|
| + bool filled = ReadData(start, end, space, obj_address);
|
| + CHECK(filled);
|
| + DCHECK(CanBeDeferred(object));
|
| + PostProcessNewObject(object, space);
|
| }
|
| - isolate_->heap()->set_allocation_sites_list(site);
|
| }
|
|
|
|
|
| @@ -688,31 +717,76 @@ class StringTableInsertionKey : public HashTableKey {
|
| return handle(string_, isolate);
|
| }
|
|
|
| + private:
|
| String* string_;
|
| uint32_t hash_;
|
| + DisallowHeapAllocation no_gc;
|
| };
|
|
|
|
|
| -HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
|
| - if (obj->IsString()) {
|
| - String* string = String::cast(obj);
|
| - // Uninitialize hash field as the hash seed may have changed.
|
| - string->set_hash_field(String::kEmptyHashField);
|
| - if (string->IsInternalizedString()) {
|
| - DisallowHeapAllocation no_gc;
|
| - HandleScope scope(isolate_);
|
| - StringTableInsertionKey key(string);
|
| - String* canonical = *StringTable::LookupKey(isolate_, &key);
|
| - string->SetForwardedInternalizedString(canonical);
|
| - return canonical;
|
| +HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
|
| + if (deserializing_user_code()) {
|
| + if (obj->IsString()) {
|
| + String* string = String::cast(obj);
|
| + // Uninitialize hash field as the hash seed may have changed.
|
| + string->set_hash_field(String::kEmptyHashField);
|
| + if (string->IsInternalizedString()) {
|
| + // Canonicalize the internalized string. If it already exists in the
|
| + // string table, set it to forward to the existing one.
|
| + StringTableInsertionKey key(string);
|
| + String* canonical = StringTable::LookupKeyIfExists(isolate_, &key);
|
| + if (canonical == NULL) {
|
| + new_internalized_strings_.Add(handle(string));
|
| + return string;
|
| + } else {
|
| + string->SetForwardedInternalizedString(canonical);
|
| + return canonical;
|
| + }
|
| + }
|
| + } else if (obj->IsScript()) {
|
| + // Assign a new script id to avoid collision.
|
| + Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
|
| + } else {
|
| + DCHECK(CanBeDeferred(obj));
|
| + }
|
| + }
|
| + if (obj->IsAllocationSite()) {
|
| + DCHECK(obj->IsAllocationSite());
|
| + // Allocation sites are present in the snapshot, and must be linked into
|
| + // a list at deserialization time.
|
| + AllocationSite* site = AllocationSite::cast(obj);
|
| + // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
| + // as a (weak) root. If this root is relocated correctly, this becomes
|
| + // unnecessary.
|
| + if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
|
| + site->set_weak_next(isolate_->heap()->undefined_value());
|
| + } else {
|
| + site->set_weak_next(isolate_->heap()->allocation_sites_list());
|
| + }
|
| + isolate_->heap()->set_allocation_sites_list(site);
|
| + } else if (obj->IsCode()) {
|
| + // We flush all code pages after deserializing the startup snapshot. In that
|
| + // case, we only need to remember code objects in the large object space.
|
| + // When deserializing user code, remember each individual code object.
|
| + if (deserializing_user_code() || space == LO_SPACE) {
|
| + new_code_objects_.Add(Code::cast(obj));
|
| }
|
| - } else if (obj->IsScript()) {
|
| - Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
|
| }
|
| return obj;
|
| }
|
|
|
|
|
| +void Deserializer::CommitNewInternalizedStrings(Isolate* isolate) {
|
| + StringTable::EnsureCapacityForDeserialization(
|
| + isolate, new_internalized_strings_.length());
|
| + for (Handle<String> string : new_internalized_strings_) {
|
| + StringTableInsertionKey key(*string);
|
| + DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
|
| + StringTable::LookupKey(isolate, &key);
|
| + }
|
| +}
|
| +
|
| +
|
| HeapObject* Deserializer::GetBackReferencedObject(int space) {
|
| HeapObject* obj;
|
| BackReference back_reference(source_.GetInt());
|
| @@ -757,24 +831,17 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
|
| if (FLAG_log_snapshot_positions) {
|
| LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
|
| }
|
| - ReadData(current, limit, space_number, address);
|
| -
|
| - // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
|
| - // as a (weak) root. If this root is relocated correctly,
|
| - // RelinkAllocationSite() isn't necessary.
|
| - if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
|
|
|
| - // Fix up strings from serialized user code.
|
| - if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
|
| + if (ReadData(current, limit, space_number, address)) {
|
| + // Only post process if object content has not been deferred.
|
| + obj = PostProcessNewObject(obj, space_number);
|
| + }
|
|
|
| Object* write_back_obj = obj;
|
| UnalignedCopy(write_back, &write_back_obj);
|
| #ifdef DEBUG
|
| if (obj->IsCode()) {
|
| DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
|
| -#ifdef VERIFY_HEAP
|
| - obj->ObjectVerify();
|
| -#endif // VERIFY_HEAP
|
| } else {
|
| DCHECK(space_number != CODE_SPACE);
|
| }
|
| @@ -818,7 +885,7 @@ Address Deserializer::Allocate(int space_index, int size) {
|
| }
|
|
|
|
|
| -void Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| +bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| Address current_object_address) {
|
| Isolate* const isolate = isolate_;
|
| // Write barrier support costs around 1% in startup time. In fact there
|
| @@ -1075,6 +1142,18 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| break;
|
| }
|
|
|
| + case kDeferred: {
|
| + // Deferred can only occur right after the heap object header.
|
| + DCHECK(current == reinterpret_cast<Object**>(current_object_address +
|
| + kPointerSize));
|
| + HeapObject* obj = HeapObject::FromAddress(current_object_address);
|
| + // If the deferred object is a map, its instance type may be used
|
| + // during deserialization. Initialize it with a temporary value.
|
| + if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
|
| + current = limit;
|
| + return false;
|
| + }
|
| +
|
| case kSynchronize:
|
| // If we get here then that indicates that you have a mismatch between
|
| // the number of GC roots when serializing and deserializing.
|
| @@ -1181,6 +1260,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
|
| }
|
| }
|
| CHECK_EQ(limit, current);
|
| + return true;
|
| }
|
|
|
|
|
| @@ -1189,6 +1269,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
|
| sink_(sink),
|
| external_reference_encoder_(isolate),
|
| root_index_map_(isolate),
|
| + recursion_depth_(0),
|
| code_address_map_(NULL),
|
| large_objects_total_size_(0),
|
| seen_large_objects_index_(0) {
|
| @@ -1264,6 +1345,16 @@ void Serializer::OutputStatistics(const char* name) {
|
| }
|
|
|
|
|
| +void Serializer::SerializeDeferredObjects() {
|
| + while (deferred_objects_.length() > 0) {
|
| + HeapObject* obj = deferred_objects_.RemoveLast();
|
| + ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
|
| + obj_serializer.SerializeDeferred();
|
| + }
|
| + sink_->Put(kSynchronize, "Finished with deferred objects");
|
| +}
|
| +
|
| +
|
| void StartupSerializer::SerializeStrongReferences() {
|
| Isolate* isolate = this->isolate();
|
| // No active threads.
|
| @@ -1307,6 +1398,7 @@ void PartialSerializer::Serialize(Object** o) {
|
| back_reference_map()->AddGlobalProxy(context->global_proxy());
|
| }
|
| VisitPointer(o);
|
| + SerializeDeferredObjects();
|
| SerializeOutdatedContextsAsFixedArray();
|
| Pad();
|
| }
|
| @@ -1331,10 +1423,10 @@ void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
|
| sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
|
| }
|
| for (int i = 0; i < length; i++) {
|
| - BackReference back_ref = outdated_contexts_[i];
|
| - DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
|
| - sink_->Put(kBackref + back_ref.space(), "BackRef");
|
| - sink_->PutInt(back_ref.reference(), "BackRefValue");
|
| + Context* context = outdated_contexts_[i];
|
| + BackReference back_reference = back_reference_map_.Lookup(context);
|
| + sink_->Put(kBackref + back_reference.space(), "BackRef");
|
| + PutBackReference(context, back_reference);
|
| }
|
| }
|
| }
|
| @@ -1497,10 +1589,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
|
| "BackRefWithSkip");
|
| sink_->PutInt(skip, "BackRefSkipDistance");
|
| }
|
| - DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
|
| - sink_->PutInt(back_reference.reference(), "BackRefValue");
|
| -
|
| - hot_objects_.Add(obj);
|
| + PutBackReference(obj, back_reference);
|
| }
|
| return true;
|
| }
|
| @@ -1536,7 +1625,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
| }
|
|
|
|
|
| -void StartupSerializer::SerializeWeakReferences() {
|
| +void StartupSerializer::SerializeWeakReferencesAndDeferred() {
|
| // This phase comes right after the serialization (of the snapshot).
|
| // After we have done the partial serialization the partial snapshot cache
|
| // will contain some references needed to decode the partial snapshot. We
|
| @@ -1545,6 +1634,7 @@ void StartupSerializer::SerializeWeakReferences() {
|
| Object* undefined = isolate()->heap()->undefined_value();
|
| VisitPointer(&undefined);
|
| isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
|
| + SerializeDeferredObjects();
|
| Pad();
|
| }
|
|
|
| @@ -1577,6 +1667,13 @@ void Serializer::PutRoot(int root_index,
|
| }
|
|
|
|
|
| +void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
|
| + DCHECK(BackReferenceIsAlreadyAllocated(reference));
|
| + sink_->PutInt(reference.reference(), "BackRefValue");
|
| + hot_objects_.Add(object);
|
| +}
|
| +
|
| +
|
| void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
| WhereToPoint where_to_point, int skip) {
|
| if (obj->IsMap()) {
|
| @@ -1630,9 +1727,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
| Context::cast(obj)->global_object() == global_object_) {
|
| // Context refers to the current global object. This reference will
|
| // become outdated after deserialization.
|
| - BackReference back_reference = back_reference_map_.Lookup(obj);
|
| - DCHECK(back_reference.is_valid());
|
| - outdated_contexts_.Add(back_reference);
|
| + outdated_contexts_.Add(Context::cast(obj));
|
| }
|
| }
|
|
|
| @@ -1753,6 +1848,9 @@ void Serializer::ObjectSerializer::Serialize() {
|
| // We cannot serialize typed array objects correctly.
|
| DCHECK(!object_->IsJSTypedArray());
|
|
|
| + // We don't expect fillers.
|
| + DCHECK(!object_->IsFiller());
|
| +
|
| if (object_->IsPrototypeInfo()) {
|
| Object* prototype_users = PrototypeInfo::cast(object_)->prototype_users();
|
| if (prototype_users->IsWeakFixedArray()) {
|
| @@ -1790,6 +1888,39 @@ void Serializer::ObjectSerializer::Serialize() {
|
| CHECK_EQ(0, bytes_processed_so_far_);
|
| bytes_processed_so_far_ = kPointerSize;
|
|
|
| + RecursionScope recursion(serializer_);
|
| + // Objects that are immediately post processed during deserialization
|
| + // cannot be deferred, since post processing requires the object content.
|
| + if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
|
| + serializer_->QueueDeferredObject(object_);
|
| + sink_->Put(kDeferred, "Deferring object content");
|
| + return;
|
| + }
|
| +
|
| + object_->IterateBody(map->instance_type(), size, this);
|
| + OutputRawData(object_->address() + size);
|
| +}
|
| +
|
| +
|
| +void Serializer::ObjectSerializer::SerializeDeferred() {
|
| + if (FLAG_trace_serializer) {
|
| + PrintF(" Encoding deferred heap object: ");
|
| + object_->ShortPrint();
|
| + PrintF("\n");
|
| + }
|
| +
|
| + int size = object_->Size();
|
| + Map* map = object_->map();
|
| + BackReference reference = serializer_->back_reference_map()->Lookup(object_);
|
| +
|
| + // Serialize the rest of the object.
|
| + CHECK_EQ(0, bytes_processed_so_far_);
|
| + bytes_processed_so_far_ = kPointerSize;
|
| +
|
| + sink_->Put(kNewObject + reference.space(), "deferred object");
|
| + serializer_->PutBackReference(object_, reference);
|
| + sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
|
| +
|
| object_->IterateBody(map->instance_type(), size, this);
|
| OutputRawData(object_->address() + size);
|
| }
|
| @@ -2114,6 +2245,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
|
| DisallowHeapAllocation no_gc;
|
| Object** location = Handle<Object>::cast(info).location();
|
| cs.VisitPointer(location);
|
| + cs.SerializeDeferredObjects();
|
| cs.Pad();
|
|
|
| SerializedCodeData data(sink.data(), cs);
|
| @@ -2192,8 +2324,6 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
|
| void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
|
| HowToCode how_to_code,
|
| WhereToPoint where_to_point) {
|
| - if (heap_object->IsInternalizedString()) num_internalized_strings_++;
|
| -
|
| // Object has not yet been serialized. Serialize it here.
|
| ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
|
| where_to_point);
|
| @@ -2305,10 +2435,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
|
| return MaybeHandle<SharedFunctionInfo>();
|
| }
|
|
|
| - // Eagerly expand string table to avoid allocations during deserialization.
|
| - StringTable::EnsureCapacityForDeserialization(isolate,
|
| - scd->NumInternalizedStrings());
|
| -
|
| // Prepare and register list of attached objects.
|
| Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
|
| Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
|
| @@ -2472,7 +2598,6 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
|
| SetHeaderValue(kCpuFeaturesOffset,
|
| static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
|
| SetHeaderValue(kFlagHashOffset, FlagList::Hash());
|
| - SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
|
| SetHeaderValue(kNumReservationsOffset, reservations.length());
|
| SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
|
| SetHeaderValue(kPayloadLengthOffset, payload.length());
|
| @@ -2550,10 +2675,6 @@ Vector<const byte> SerializedCodeData::Payload() const {
|
| }
|
|
|
|
|
| -int SerializedCodeData::NumInternalizedStrings() const {
|
| - return GetHeaderValue(kNumInternalizedStringsOffset);
|
| -}
|
| -
|
| Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
|
| int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
|
| const byte* start = data_ + kHeaderSize + reservations_size;
|
|
|