| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/snapshot/startup-serializer.h" | 5 #include "src/snapshot/startup-serializer.h" |
| 6 | 6 |
| 7 #include "src/objects-inl.h" | 7 #include "src/objects-inl.h" |
| 8 #include "src/v8threads.h" | 8 #include "src/v8threads.h" |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 88 SerializerReference ref = reference_map_.Lookup(obj); | 88 SerializerReference ref = reference_map_.Lookup(obj); |
| 89 CHECK(ref.is_back_reference() && ref.chunk_index() == 0); | 89 CHECK(ref.is_back_reference() && ref.chunk_index() == 0); |
| 90 } | 90 } |
| 91 } | 91 } |
| 92 | 92 |
| 93 void StartupSerializer::SerializeWeakReferencesAndDeferred() { | 93 void StartupSerializer::SerializeWeakReferencesAndDeferred() { |
| 94 // This comes right after serialization of the partial snapshot, where we | 94 // This comes right after serialization of the partial snapshot, where we |
| 95 // add entries to the partial snapshot cache of the startup snapshot. Add | 95 // add entries to the partial snapshot cache of the startup snapshot. Add |
| 96 // one entry with 'undefined' to terminate the partial snapshot cache. | 96 // one entry with 'undefined' to terminate the partial snapshot cache. |
| 97 Object* undefined = isolate()->heap()->undefined_value(); | 97 Object* undefined = isolate()->heap()->undefined_value(); |
| 98 VisitPointer(&undefined); | 98 VisitRootPointer(Root::kPartialSnapshotCache, &undefined); |
| 99 isolate()->heap()->IterateWeakRoots(this, VISIT_ALL); | 99 isolate()->heap()->IterateWeakRoots(this, VISIT_ALL); |
| 100 SerializeDeferredObjects(); | 100 SerializeDeferredObjects(); |
| 101 Pad(); | 101 Pad(); |
| 102 } | 102 } |
| 103 | 103 |
| 104 int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { | 104 int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { |
| 105 int index; | 105 int index; |
| 106 if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) { | 106 if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) { |
| 107 // This object is not part of the partial snapshot cache yet. Add it to the | 107 // This object is not part of the partial snapshot cache yet. Add it to the |
| 108 // startup snapshot so we can refer to it via partial snapshot index from | 108 // startup snapshot so we can refer to it via partial snapshot index from |
| 109 // the partial snapshot. | 109 // the partial snapshot. |
| 110 VisitPointer(reinterpret_cast<Object**>(&heap_object)); | 110 VisitRootPointer(Root::kPartialSnapshotCache, |
| 111 reinterpret_cast<Object**>(&heap_object)); |
| 111 } | 112 } |
| 112 return index; | 113 return index; |
| 113 } | 114 } |
| 114 | 115 |
| 115 void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) { | 116 void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) { |
| 116 // We expect the builtins tag after builtins have been serialized. | 117 // We expect the builtins tag after builtins have been serialized. |
| 117 DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins); | 118 DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins); |
| 118 serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope); | 119 serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope); |
| 119 sink_.Put(kSynchronize, "Synchronize"); | 120 sink_.Put(kSynchronize, "Synchronize"); |
| 120 } | 121 } |
| (...skipping 16 matching lines...) Expand all Loading... |
| 137 // Clear the stack limits to make the snapshot reproducible. | 138 // Clear the stack limits to make the snapshot reproducible. |
| 138 // Reset it again afterwards. | 139 // Reset it again afterwards. |
| 139 isolate->heap()->ClearStackLimits(); | 140 isolate->heap()->ClearStackLimits(); |
| 140 isolate->heap()->IterateSmiRoots(this); | 141 isolate->heap()->IterateSmiRoots(this); |
| 141 isolate->heap()->SetStackLimits(); | 142 isolate->heap()->SetStackLimits(); |
| 142 | 143 |
| 143 isolate->heap()->IterateStrongRoots(this, | 144 isolate->heap()->IterateStrongRoots(this, |
| 144 VISIT_ONLY_STRONG_FOR_SERIALIZATION); | 145 VISIT_ONLY_STRONG_FOR_SERIALIZATION); |
| 145 } | 146 } |
| 146 | 147 |
| 147 void StartupSerializer::VisitPointers(Object** start, Object** end) { | 148 void StartupSerializer::VisitRootPointers(Root root, Object** start, |
| 149 Object** end) { |
| 148 if (start == isolate()->heap()->roots_array_start()) { | 150 if (start == isolate()->heap()->roots_array_start()) { |
| 149 // Serializing the root list needs special handling: | 151 // Serializing the root list needs special handling: |
| 150 // - The first pass over the root list only serializes immortal immovables. | 152 // - The first pass over the root list only serializes immortal immovables. |
| 151 // - The second pass over the root list serializes the rest. | 153 // - The second pass over the root list serializes the rest. |
| 152 // - Only root list elements that have been fully serialized can be | 154 // - Only root list elements that have been fully serialized can be |
| 153 // referenced via as root by using kRootArray bytecodes. | 155 // referenced via as root by using kRootArray bytecodes. |
| 154 int skip = 0; | 156 int skip = 0; |
| 155 for (Object** current = start; current < end; current++) { | 157 for (Object** current = start; current < end; current++) { |
| 156 int root_index = static_cast<int>(current - start); | 158 int root_index = static_cast<int>(current - start); |
| 157 if (RootShouldBeSkipped(root_index)) { | 159 if (RootShouldBeSkipped(root_index)) { |
| 158 skip += kPointerSize; | 160 skip += kPointerSize; |
| 159 continue; | 161 continue; |
| 160 } else { | 162 } else { |
| 161 if ((*current)->IsSmi()) { | 163 if ((*current)->IsSmi()) { |
| 162 FlushSkip(skip); | 164 FlushSkip(skip); |
| 163 PutSmi(Smi::cast(*current)); | 165 PutSmi(Smi::cast(*current)); |
| 164 } else { | 166 } else { |
| 165 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, | 167 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, |
| 166 skip); | 168 skip); |
| 167 } | 169 } |
| 168 root_has_been_serialized_.set(root_index); | 170 root_has_been_serialized_.set(root_index); |
| 169 skip = 0; | 171 skip = 0; |
| 170 } | 172 } |
| 171 } | 173 } |
| 172 FlushSkip(skip); | 174 FlushSkip(skip); |
| 173 } else { | 175 } else { |
| 174 Serializer::VisitPointers(start, end); | 176 Serializer::VisitRootPointers(root, start, end); |
| 175 } | 177 } |
| 176 } | 178 } |
| 177 | 179 |
| 178 bool StartupSerializer::RootShouldBeSkipped(int root_index) { | 180 bool StartupSerializer::RootShouldBeSkipped(int root_index) { |
| 179 if (root_index == Heap::kStackLimitRootIndex || | 181 if (root_index == Heap::kStackLimitRootIndex || |
| 180 root_index == Heap::kRealStackLimitRootIndex) { | 182 root_index == Heap::kRealStackLimitRootIndex) { |
| 181 return true; | 183 return true; |
| 182 } | 184 } |
| 183 return Heap::RootIsImmortalImmovable(root_index) != | 185 return Heap::RootIsImmortalImmovable(root_index) != |
| 184 serializing_immortal_immovables_roots_; | 186 serializing_immortal_immovables_roots_; |
| 185 } | 187 } |
| 186 | 188 |
| 187 } // namespace internal | 189 } // namespace internal |
| 188 } // namespace v8 | 190 } // namespace v8 |
| OLD | NEW |