| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/v8.h" | |
| 6 | |
| 7 #include "src/accessors.h" | |
| 8 #include "src/api.h" | |
| 9 #include "src/base/platform/platform.h" | |
| 10 #include "src/bootstrapper.h" | |
| 11 #include "src/code-stubs.h" | |
| 12 #include "src/cpu-profiler.h" | |
| 13 #include "src/deoptimizer.h" | |
| 14 #include "src/execution.h" | |
| 15 #include "src/global-handles.h" | |
| 16 #include "src/ic/ic.h" | |
| 17 #include "src/ic/stub-cache.h" | |
| 18 #include "src/objects.h" | |
| 19 #include "src/parser.h" | |
| 20 #include "src/runtime/runtime.h" | |
| 21 #include "src/snapshot/natives.h" | |
| 22 #include "src/snapshot/serialize.h" | |
| 23 #include "src/snapshot/snapshot.h" | |
| 24 #include "src/snapshot/snapshot-source-sink.h" | |
| 25 #include "src/v8threads.h" | |
| 26 #include "src/version.h" | |
| 27 | |
| 28 namespace v8 { | |
| 29 namespace internal { | |
| 30 | |
| 31 | |
| 32 // ----------------------------------------------------------------------------- | |
| 33 // Coding of external references. | |
| 34 | |
| 35 | |
| 36 ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) { | |
| 37 ExternalReferenceTable* external_reference_table = | |
| 38 isolate->external_reference_table(); | |
| 39 if (external_reference_table == NULL) { | |
| 40 external_reference_table = new ExternalReferenceTable(isolate); | |
| 41 isolate->set_external_reference_table(external_reference_table); | |
| 42 } | |
| 43 return external_reference_table; | |
| 44 } | |
| 45 | |
| 46 | |
| 47 ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) { | |
| 48 // Miscellaneous | |
| 49 Add(ExternalReference::roots_array_start(isolate).address(), | |
| 50 "Heap::roots_array_start()"); | |
| 51 Add(ExternalReference::address_of_stack_limit(isolate).address(), | |
| 52 "StackGuard::address_of_jslimit()"); | |
| 53 Add(ExternalReference::address_of_real_stack_limit(isolate).address(), | |
| 54 "StackGuard::address_of_real_jslimit()"); | |
| 55 Add(ExternalReference::new_space_start(isolate).address(), | |
| 56 "Heap::NewSpaceStart()"); | |
| 57 Add(ExternalReference::new_space_mask(isolate).address(), | |
| 58 "Heap::NewSpaceMask()"); | |
| 59 Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), | |
| 60 "Heap::NewSpaceAllocationLimitAddress()"); | |
| 61 Add(ExternalReference::new_space_allocation_top_address(isolate).address(), | |
| 62 "Heap::NewSpaceAllocationTopAddress()"); | |
| 63 Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()"); | |
| 64 Add(ExternalReference::debug_step_in_fp_address(isolate).address(), | |
| 65 "Debug::step_in_fp_addr()"); | |
| 66 Add(ExternalReference::mod_two_doubles_operation(isolate).address(), | |
| 67 "mod_two_doubles"); | |
| 68 // Keyed lookup cache. | |
| 69 Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), | |
| 70 "KeyedLookupCache::keys()"); | |
| 71 Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), | |
| 72 "KeyedLookupCache::field_offsets()"); | |
| 73 Add(ExternalReference::handle_scope_next_address(isolate).address(), | |
| 74 "HandleScope::next"); | |
| 75 Add(ExternalReference::handle_scope_limit_address(isolate).address(), | |
| 76 "HandleScope::limit"); | |
| 77 Add(ExternalReference::handle_scope_level_address(isolate).address(), | |
| 78 "HandleScope::level"); | |
| 79 Add(ExternalReference::new_deoptimizer_function(isolate).address(), | |
| 80 "Deoptimizer::New()"); | |
| 81 Add(ExternalReference::compute_output_frames_function(isolate).address(), | |
| 82 "Deoptimizer::ComputeOutputFrames()"); | |
| 83 Add(ExternalReference::address_of_min_int().address(), | |
| 84 "LDoubleConstant::min_int"); | |
| 85 Add(ExternalReference::address_of_one_half().address(), | |
| 86 "LDoubleConstant::one_half"); | |
| 87 Add(ExternalReference::isolate_address(isolate).address(), "isolate"); | |
| 88 Add(ExternalReference::address_of_negative_infinity().address(), | |
| 89 "LDoubleConstant::negative_infinity"); | |
| 90 Add(ExternalReference::power_double_double_function(isolate).address(), | |
| 91 "power_double_double_function"); | |
| 92 Add(ExternalReference::power_double_int_function(isolate).address(), | |
| 93 "power_double_int_function"); | |
| 94 Add(ExternalReference::store_buffer_top(isolate).address(), | |
| 95 "store_buffer_top"); | |
| 96 Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan"); | |
| 97 Add(ExternalReference::get_date_field_function(isolate).address(), | |
| 98 "JSDate::GetField"); | |
| 99 Add(ExternalReference::date_cache_stamp(isolate).address(), | |
| 100 "date_cache_stamp"); | |
| 101 Add(ExternalReference::address_of_pending_message_obj(isolate).address(), | |
| 102 "address_of_pending_message_obj"); | |
| 103 Add(ExternalReference::get_make_code_young_function(isolate).address(), | |
| 104 "Code::MakeCodeYoung"); | |
| 105 Add(ExternalReference::cpu_features().address(), "cpu_features"); | |
| 106 Add(ExternalReference::old_space_allocation_top_address(isolate).address(), | |
| 107 "Heap::OldSpaceAllocationTopAddress"); | |
| 108 Add(ExternalReference::old_space_allocation_limit_address(isolate).address(), | |
| 109 "Heap::OldSpaceAllocationLimitAddress"); | |
| 110 Add(ExternalReference::allocation_sites_list_address(isolate).address(), | |
| 111 "Heap::allocation_sites_list_address()"); | |
| 112 Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias"); | |
| 113 Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(), | |
| 114 "Code::MarkCodeAsExecuted"); | |
| 115 Add(ExternalReference::is_profiling_address(isolate).address(), | |
| 116 "CpuProfiler::is_profiling"); | |
| 117 Add(ExternalReference::scheduled_exception_address(isolate).address(), | |
| 118 "Isolate::scheduled_exception"); | |
| 119 Add(ExternalReference::invoke_function_callback(isolate).address(), | |
| 120 "InvokeFunctionCallback"); | |
| 121 Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(), | |
| 122 "InvokeAccessorGetterCallback"); | |
| 123 Add(ExternalReference::flush_icache_function(isolate).address(), | |
| 124 "CpuFeatures::FlushICache"); | |
| 125 Add(ExternalReference::log_enter_external_function(isolate).address(), | |
| 126 "Logger::EnterExternal"); | |
| 127 Add(ExternalReference::log_leave_external_function(isolate).address(), | |
| 128 "Logger::LeaveExternal"); | |
| 129 Add(ExternalReference::address_of_minus_one_half().address(), | |
| 130 "double_constants.minus_one_half"); | |
| 131 Add(ExternalReference::stress_deopt_count(isolate).address(), | |
| 132 "Isolate::stress_deopt_count_address()"); | |
| 133 | |
| 134 // Debug addresses | |
| 135 Add(ExternalReference::debug_after_break_target_address(isolate).address(), | |
| 136 "Debug::after_break_target_address()"); | |
| 137 Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate) | |
| 138 .address(), | |
| 139 "Debug::restarter_frame_function_pointer_address()"); | |
| 140 Add(ExternalReference::debug_is_active_address(isolate).address(), | |
| 141 "Debug::is_active_address()"); | |
| 142 | |
| 143 #ifndef V8_INTERPRETED_REGEXP | |
| 144 Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), | |
| 145 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); | |
| 146 Add(ExternalReference::re_check_stack_guard_state(isolate).address(), | |
| 147 "RegExpMacroAssembler*::CheckStackGuardState()"); | |
| 148 Add(ExternalReference::re_grow_stack(isolate).address(), | |
| 149 "NativeRegExpMacroAssembler::GrowStack()"); | |
| 150 Add(ExternalReference::re_word_character_map().address(), | |
| 151 "NativeRegExpMacroAssembler::word_character_map"); | |
| 152 Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(), | |
| 153 "RegExpStack::limit_address()"); | |
| 154 Add(ExternalReference::address_of_regexp_stack_memory_address(isolate) | |
| 155 .address(), | |
| 156 "RegExpStack::memory_address()"); | |
| 157 Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(), | |
| 158 "RegExpStack::memory_size()"); | |
| 159 Add(ExternalReference::address_of_static_offsets_vector(isolate).address(), | |
| 160 "OffsetsVector::static_offsets_vector"); | |
| 161 #endif // V8_INTERPRETED_REGEXP | |
| 162 | |
| 163 // The following populates all of the different type of external references | |
| 164 // into the ExternalReferenceTable. | |
| 165 // | |
| 166 // NOTE: This function was originally 100k of code. It has since been | |
| 167 // rewritten to be mostly table driven, as the callback macro style tends to | |
| 168 // very easily cause code bloat. Please be careful in the future when adding | |
| 169 // new references. | |
| 170 | |
| 171 struct RefTableEntry { | |
| 172 uint16_t id; | |
| 173 const char* name; | |
| 174 }; | |
| 175 | |
| 176 static const RefTableEntry c_builtins[] = { | |
| 177 #define DEF_ENTRY_C(name, ignored) \ | |
| 178 { Builtins::c_##name, "Builtins::" #name } \ | |
| 179 , | |
| 180 BUILTIN_LIST_C(DEF_ENTRY_C) | |
| 181 #undef DEF_ENTRY_C | |
| 182 }; | |
| 183 | |
| 184 for (unsigned i = 0; i < arraysize(c_builtins); ++i) { | |
| 185 ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id), | |
| 186 isolate); | |
| 187 Add(ref.address(), c_builtins[i].name); | |
| 188 } | |
| 189 | |
| 190 static const RefTableEntry builtins[] = { | |
| 191 #define DEF_ENTRY_C(name, ignored) \ | |
| 192 { Builtins::k##name, "Builtins::" #name } \ | |
| 193 , | |
| 194 #define DEF_ENTRY_A(name, i1, i2, i3) \ | |
| 195 { Builtins::k##name, "Builtins::" #name } \ | |
| 196 , | |
| 197 BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A) | |
| 198 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A) | |
| 199 #undef DEF_ENTRY_C | |
| 200 #undef DEF_ENTRY_A | |
| 201 }; | |
| 202 | |
| 203 for (unsigned i = 0; i < arraysize(builtins); ++i) { | |
| 204 ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate); | |
| 205 Add(ref.address(), builtins[i].name); | |
| 206 } | |
| 207 | |
| 208 static const RefTableEntry runtime_functions[] = { | |
| 209 #define RUNTIME_ENTRY(name, i1, i2) \ | |
| 210 { Runtime::k##name, "Runtime::" #name } \ | |
| 211 , | |
| 212 FOR_EACH_INTRINSIC(RUNTIME_ENTRY) | |
| 213 #undef RUNTIME_ENTRY | |
| 214 }; | |
| 215 | |
| 216 for (unsigned i = 0; i < arraysize(runtime_functions); ++i) { | |
| 217 ExternalReference ref( | |
| 218 static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate); | |
| 219 Add(ref.address(), runtime_functions[i].name); | |
| 220 } | |
| 221 | |
| 222 static const RefTableEntry inline_caches[] = { | |
| 223 #define IC_ENTRY(name) \ | |
| 224 { IC::k##name, "IC::" #name } \ | |
| 225 , | |
| 226 IC_UTIL_LIST(IC_ENTRY) | |
| 227 #undef IC_ENTRY | |
| 228 }; | |
| 229 | |
| 230 for (unsigned i = 0; i < arraysize(inline_caches); ++i) { | |
| 231 ExternalReference ref( | |
| 232 IC_Utility(static_cast<IC::UtilityId>(inline_caches[i].id)), isolate); | |
| 233 Add(ref.address(), runtime_functions[i].name); | |
| 234 } | |
| 235 | |
| 236 // Stat counters | |
| 237 struct StatsRefTableEntry { | |
| 238 StatsCounter* (Counters::*counter)(); | |
| 239 const char* name; | |
| 240 }; | |
| 241 | |
| 242 static const StatsRefTableEntry stats_ref_table[] = { | |
| 243 #define COUNTER_ENTRY(name, caption) \ | |
| 244 { &Counters::name, "Counters::" #name } \ | |
| 245 , | |
| 246 STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY) | |
| 247 #undef COUNTER_ENTRY | |
| 248 }; | |
| 249 | |
| 250 Counters* counters = isolate->counters(); | |
| 251 for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) { | |
| 252 // To make sure the indices are not dependent on whether counters are | |
| 253 // enabled, use a dummy address as filler. | |
| 254 Address address = NotAvailable(); | |
| 255 StatsCounter* counter = (counters->*(stats_ref_table[i].counter))(); | |
| 256 if (counter->Enabled()) { | |
| 257 address = reinterpret_cast<Address>(counter->GetInternalPointer()); | |
| 258 } | |
| 259 Add(address, stats_ref_table[i].name); | |
| 260 } | |
| 261 | |
| 262 // Top addresses | |
| 263 static const char* address_names[] = { | |
| 264 #define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address", | |
| 265 FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL | |
| 266 #undef BUILD_NAME_LITERAL | |
| 267 }; | |
| 268 | |
| 269 for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) { | |
| 270 Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)), | |
| 271 address_names[i]); | |
| 272 } | |
| 273 | |
| 274 // Accessors | |
| 275 struct AccessorRefTable { | |
| 276 Address address; | |
| 277 const char* name; | |
| 278 }; | |
| 279 | |
| 280 static const AccessorRefTable accessors[] = { | |
| 281 #define ACCESSOR_INFO_DECLARATION(name) \ | |
| 282 { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \ | |
| 283 , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"}, | |
| 284 ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION) | |
| 285 #undef ACCESSOR_INFO_DECLARATION | |
| 286 }; | |
| 287 | |
| 288 for (unsigned i = 0; i < arraysize(accessors); ++i) { | |
| 289 Add(accessors[i].address, accessors[i].name); | |
| 290 } | |
| 291 | |
| 292 StubCache* stub_cache = isolate->stub_cache(); | |
| 293 | |
| 294 // Stub cache tables | |
| 295 Add(stub_cache->key_reference(StubCache::kPrimary).address(), | |
| 296 "StubCache::primary_->key"); | |
| 297 Add(stub_cache->value_reference(StubCache::kPrimary).address(), | |
| 298 "StubCache::primary_->value"); | |
| 299 Add(stub_cache->map_reference(StubCache::kPrimary).address(), | |
| 300 "StubCache::primary_->map"); | |
| 301 Add(stub_cache->key_reference(StubCache::kSecondary).address(), | |
| 302 "StubCache::secondary_->key"); | |
| 303 Add(stub_cache->value_reference(StubCache::kSecondary).address(), | |
| 304 "StubCache::secondary_->value"); | |
| 305 Add(stub_cache->map_reference(StubCache::kSecondary).address(), | |
| 306 "StubCache::secondary_->map"); | |
| 307 | |
| 308 // Runtime entries | |
| 309 Add(ExternalReference::delete_handle_scope_extensions(isolate).address(), | |
| 310 "HandleScope::DeleteExtensions"); | |
| 311 Add(ExternalReference::incremental_marking_record_write_function(isolate) | |
| 312 .address(), | |
| 313 "IncrementalMarking::RecordWrite"); | |
| 314 Add(ExternalReference::store_buffer_overflow_function(isolate).address(), | |
| 315 "StoreBuffer::StoreBufferOverflow"); | |
| 316 | |
| 317 // Add a small set of deopt entry addresses to encoder without generating the | |
| 318 // deopt table code, which isn't possible at deserialization time. | |
| 319 HandleScope scope(isolate); | |
| 320 for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) { | |
| 321 Address address = Deoptimizer::GetDeoptimizationEntry( | |
| 322 isolate, | |
| 323 entry, | |
| 324 Deoptimizer::LAZY, | |
| 325 Deoptimizer::CALCULATE_ENTRY_ADDRESS); | |
| 326 Add(address, "lazy_deopt"); | |
| 327 } | |
| 328 } | |
| 329 | |
| 330 | |
| 331 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) { | |
| 332 map_ = isolate->external_reference_map(); | |
| 333 if (map_ != NULL) return; | |
| 334 map_ = new HashMap(HashMap::PointersMatch); | |
| 335 ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate); | |
| 336 for (int i = 0; i < table->size(); ++i) { | |
| 337 Address addr = table->address(i); | |
| 338 if (addr == ExternalReferenceTable::NotAvailable()) continue; | |
| 339 // We expect no duplicate external references entries in the table. | |
| 340 DCHECK_NULL(map_->Lookup(addr, Hash(addr))); | |
| 341 map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i); | |
| 342 } | |
| 343 isolate->set_external_reference_map(map_); | |
| 344 } | |
| 345 | |
| 346 | |
| 347 uint32_t ExternalReferenceEncoder::Encode(Address address) const { | |
| 348 DCHECK_NOT_NULL(address); | |
| 349 HashMap::Entry* entry = | |
| 350 const_cast<HashMap*>(map_)->Lookup(address, Hash(address)); | |
| 351 DCHECK_NOT_NULL(entry); | |
| 352 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value)); | |
| 353 } | |
| 354 | |
| 355 | |
| 356 const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate, | |
| 357 Address address) const { | |
| 358 HashMap::Entry* entry = | |
| 359 const_cast<HashMap*>(map_)->Lookup(address, Hash(address)); | |
| 360 if (entry == NULL) return "<unknown>"; | |
| 361 uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value)); | |
| 362 return ExternalReferenceTable::instance(isolate)->name(i); | |
| 363 } | |
| 364 | |
| 365 | |
| 366 RootIndexMap::RootIndexMap(Isolate* isolate) { | |
| 367 map_ = isolate->root_index_map(); | |
| 368 if (map_ != NULL) return; | |
| 369 map_ = new HashMap(HashMap::PointersMatch); | |
| 370 Object** root_array = isolate->heap()->roots_array_start(); | |
| 371 for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) { | |
| 372 Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i); | |
| 373 Object* root = root_array[root_index]; | |
| 374 // Omit root entries that can be written after initialization. They must | |
| 375 // not be referenced through the root list in the snapshot. | |
| 376 if (root->IsHeapObject() && | |
| 377 isolate->heap()->RootCanBeTreatedAsConstant(root_index)) { | |
| 378 HeapObject* heap_object = HeapObject::cast(root); | |
| 379 HashMap::Entry* entry = LookupEntry(map_, heap_object, false); | |
| 380 if (entry != NULL) { | |
| 381 // Some are initialized to a previous value in the root list. | |
| 382 DCHECK_LT(GetValue(entry), i); | |
| 383 } else { | |
| 384 SetValue(LookupEntry(map_, heap_object, true), i); | |
| 385 } | |
| 386 } | |
| 387 } | |
| 388 isolate->set_root_index_map(map_); | |
| 389 } | |
| 390 | |
| 391 | |
| 392 class CodeAddressMap: public CodeEventLogger { | |
| 393 public: | |
| 394 explicit CodeAddressMap(Isolate* isolate) | |
| 395 : isolate_(isolate) { | |
| 396 isolate->logger()->addCodeEventListener(this); | |
| 397 } | |
| 398 | |
| 399 virtual ~CodeAddressMap() { | |
| 400 isolate_->logger()->removeCodeEventListener(this); | |
| 401 } | |
| 402 | |
| 403 virtual void CodeMoveEvent(Address from, Address to) { | |
| 404 address_to_name_map_.Move(from, to); | |
| 405 } | |
| 406 | |
| 407 virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { | |
| 408 } | |
| 409 | |
| 410 virtual void CodeDeleteEvent(Address from) { | |
| 411 address_to_name_map_.Remove(from); | |
| 412 } | |
| 413 | |
| 414 const char* Lookup(Address address) { | |
| 415 return address_to_name_map_.Lookup(address); | |
| 416 } | |
| 417 | |
| 418 private: | |
| 419 class NameMap { | |
| 420 public: | |
| 421 NameMap() : impl_(HashMap::PointersMatch) {} | |
| 422 | |
| 423 ~NameMap() { | |
| 424 for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) { | |
| 425 DeleteArray(static_cast<const char*>(p->value)); | |
| 426 } | |
| 427 } | |
| 428 | |
| 429 void Insert(Address code_address, const char* name, int name_size) { | |
| 430 HashMap::Entry* entry = FindOrCreateEntry(code_address); | |
| 431 if (entry->value == NULL) { | |
| 432 entry->value = CopyName(name, name_size); | |
| 433 } | |
| 434 } | |
| 435 | |
| 436 const char* Lookup(Address code_address) { | |
| 437 HashMap::Entry* entry = FindEntry(code_address); | |
| 438 return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL; | |
| 439 } | |
| 440 | |
| 441 void Remove(Address code_address) { | |
| 442 HashMap::Entry* entry = FindEntry(code_address); | |
| 443 if (entry != NULL) { | |
| 444 DeleteArray(static_cast<char*>(entry->value)); | |
| 445 RemoveEntry(entry); | |
| 446 } | |
| 447 } | |
| 448 | |
| 449 void Move(Address from, Address to) { | |
| 450 if (from == to) return; | |
| 451 HashMap::Entry* from_entry = FindEntry(from); | |
| 452 DCHECK(from_entry != NULL); | |
| 453 void* value = from_entry->value; | |
| 454 RemoveEntry(from_entry); | |
| 455 HashMap::Entry* to_entry = FindOrCreateEntry(to); | |
| 456 DCHECK(to_entry->value == NULL); | |
| 457 to_entry->value = value; | |
| 458 } | |
| 459 | |
| 460 private: | |
| 461 static char* CopyName(const char* name, int name_size) { | |
| 462 char* result = NewArray<char>(name_size + 1); | |
| 463 for (int i = 0; i < name_size; ++i) { | |
| 464 char c = name[i]; | |
| 465 if (c == '\0') c = ' '; | |
| 466 result[i] = c; | |
| 467 } | |
| 468 result[name_size] = '\0'; | |
| 469 return result; | |
| 470 } | |
| 471 | |
| 472 HashMap::Entry* FindOrCreateEntry(Address code_address) { | |
| 473 return impl_.LookupOrInsert(code_address, | |
| 474 ComputePointerHash(code_address)); | |
| 475 } | |
| 476 | |
| 477 HashMap::Entry* FindEntry(Address code_address) { | |
| 478 return impl_.Lookup(code_address, ComputePointerHash(code_address)); | |
| 479 } | |
| 480 | |
| 481 void RemoveEntry(HashMap::Entry* entry) { | |
| 482 impl_.Remove(entry->key, entry->hash); | |
| 483 } | |
| 484 | |
| 485 HashMap impl_; | |
| 486 | |
| 487 DISALLOW_COPY_AND_ASSIGN(NameMap); | |
| 488 }; | |
| 489 | |
| 490 virtual void LogRecordedBuffer(Code* code, | |
| 491 SharedFunctionInfo*, | |
| 492 const char* name, | |
| 493 int length) { | |
| 494 address_to_name_map_.Insert(code->address(), name, length); | |
| 495 } | |
| 496 | |
| 497 NameMap address_to_name_map_; | |
| 498 Isolate* isolate_; | |
| 499 }; | |
| 500 | |
| 501 | |
| 502 void Deserializer::DecodeReservation( | |
| 503 Vector<const SerializedData::Reservation> res) { | |
| 504 DCHECK_EQ(0, reservations_[NEW_SPACE].length()); | |
| 505 STATIC_ASSERT(NEW_SPACE == 0); | |
| 506 int current_space = NEW_SPACE; | |
| 507 for (auto& r : res) { | |
| 508 reservations_[current_space].Add({r.chunk_size(), NULL, NULL}); | |
| 509 if (r.is_last()) current_space++; | |
| 510 } | |
| 511 DCHECK_EQ(kNumberOfSpaces, current_space); | |
| 512 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0; | |
| 513 } | |
| 514 | |
| 515 | |
| 516 void Deserializer::FlushICacheForNewCodeObjects() { | |
| 517 PageIterator it(isolate_->heap()->code_space()); | |
| 518 while (it.has_next()) { | |
| 519 Page* p = it.next(); | |
| 520 CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start()); | |
| 521 } | |
| 522 } | |
| 523 | |
| 524 | |
| 525 bool Deserializer::ReserveSpace() { | |
| 526 #ifdef DEBUG | |
| 527 for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) { | |
| 528 CHECK(reservations_[i].length() > 0); | |
| 529 } | |
| 530 #endif // DEBUG | |
| 531 if (!isolate_->heap()->ReserveSpace(reservations_)) return false; | |
| 532 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | |
| 533 high_water_[i] = reservations_[i][0].start; | |
| 534 } | |
| 535 return true; | |
| 536 } | |
| 537 | |
| 538 | |
| 539 void Deserializer::Initialize(Isolate* isolate) { | |
| 540 DCHECK_NULL(isolate_); | |
| 541 DCHECK_NOT_NULL(isolate); | |
| 542 isolate_ = isolate; | |
| 543 DCHECK_NULL(external_reference_table_); | |
| 544 external_reference_table_ = ExternalReferenceTable::instance(isolate); | |
| 545 CHECK_EQ(magic_number_, | |
| 546 SerializedData::ComputeMagicNumber(external_reference_table_)); | |
| 547 } | |
| 548 | |
| 549 | |
| 550 void Deserializer::Deserialize(Isolate* isolate) { | |
| 551 Initialize(isolate); | |
| 552 if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context"); | |
| 553 // No active threads. | |
| 554 DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse()); | |
| 555 // No active handles. | |
| 556 DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty()); | |
| 557 isolate_->heap()->IterateSmiRoots(this); | |
| 558 isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | |
| 559 isolate_->heap()->RepairFreeListsAfterDeserialization(); | |
| 560 isolate_->heap()->IterateWeakRoots(this, VISIT_ALL); | |
| 561 | |
| 562 isolate_->heap()->set_native_contexts_list( | |
| 563 isolate_->heap()->undefined_value()); | |
| 564 isolate_->heap()->set_array_buffers_list( | |
| 565 isolate_->heap()->undefined_value()); | |
| 566 isolate->heap()->set_new_array_buffer_views_list( | |
| 567 isolate_->heap()->undefined_value()); | |
| 568 | |
| 569 // The allocation site list is build during root iteration, but if no sites | |
| 570 // were encountered then it needs to be initialized to undefined. | |
| 571 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { | |
| 572 isolate_->heap()->set_allocation_sites_list( | |
| 573 isolate_->heap()->undefined_value()); | |
| 574 } | |
| 575 | |
| 576 // Update data pointers to the external strings containing natives sources. | |
| 577 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | |
| 578 Object* source = isolate_->heap()->natives_source_cache()->get(i); | |
| 579 if (!source->IsUndefined()) { | |
| 580 ExternalOneByteString::cast(source)->update_data_cache(); | |
| 581 } | |
| 582 } | |
| 583 | |
| 584 FlushICacheForNewCodeObjects(); | |
| 585 | |
| 586 // Issue code events for newly deserialized code objects. | |
| 587 LOG_CODE_EVENT(isolate_, LogCodeObjects()); | |
| 588 LOG_CODE_EVENT(isolate_, LogCompiledFunctions()); | |
| 589 } | |
| 590 | |
| 591 | |
| 592 MaybeHandle<Object> Deserializer::DeserializePartial( | |
| 593 Isolate* isolate, Handle<JSGlobalProxy> global_proxy, | |
| 594 Handle<FixedArray>* outdated_contexts_out) { | |
| 595 Initialize(isolate); | |
| 596 if (!ReserveSpace()) { | |
| 597 V8::FatalProcessOutOfMemory("deserialize context"); | |
| 598 return MaybeHandle<Object>(); | |
| 599 } | |
| 600 | |
| 601 Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1); | |
| 602 attached_objects[kGlobalProxyReference] = global_proxy; | |
| 603 SetAttachedObjects(attached_objects); | |
| 604 | |
| 605 DisallowHeapAllocation no_gc; | |
| 606 // Keep track of the code space start and end pointers in case new | |
| 607 // code objects were unserialized | |
| 608 OldSpace* code_space = isolate_->heap()->code_space(); | |
| 609 Address start_address = code_space->top(); | |
| 610 Object* root; | |
| 611 Object* outdated_contexts; | |
| 612 VisitPointer(&root); | |
| 613 VisitPointer(&outdated_contexts); | |
| 614 | |
| 615 // There's no code deserialized here. If this assert fires | |
| 616 // then that's changed and logging should be added to notify | |
| 617 // the profiler et al of the new code. | |
| 618 CHECK_EQ(start_address, code_space->top()); | |
| 619 CHECK(outdated_contexts->IsFixedArray()); | |
| 620 *outdated_contexts_out = | |
| 621 Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate); | |
| 622 return Handle<Object>(root, isolate); | |
| 623 } | |
| 624 | |
| 625 | |
| 626 MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode( | |
| 627 Isolate* isolate) { | |
| 628 Initialize(isolate); | |
| 629 if (!ReserveSpace()) { | |
| 630 return Handle<SharedFunctionInfo>(); | |
| 631 } else { | |
| 632 deserializing_user_code_ = true; | |
| 633 DisallowHeapAllocation no_gc; | |
| 634 Object* root; | |
| 635 VisitPointer(&root); | |
| 636 return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root)); | |
| 637 } | |
| 638 } | |
| 639 | |
| 640 | |
| 641 Deserializer::~Deserializer() { | |
| 642 // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed. | |
| 643 // DCHECK(source_.AtEOF()); | |
| 644 attached_objects_.Dispose(); | |
| 645 } | |
| 646 | |
| 647 | |
| 648 // This is called on the roots. It is the driver of the deserialization | |
| 649 // process. It is also called on the body of each function. | |
| 650 void Deserializer::VisitPointers(Object** start, Object** end) { | |
| 651 // The space must be new space. Any other space would cause ReadChunk to try | |
| 652 // to update the remembered using NULL as the address. | |
| 653 ReadData(start, end, NEW_SPACE, NULL); | |
| 654 } | |
| 655 | |
| 656 | |
| 657 void Deserializer::RelinkAllocationSite(AllocationSite* site) { | |
| 658 if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) { | |
| 659 site->set_weak_next(isolate_->heap()->undefined_value()); | |
| 660 } else { | |
| 661 site->set_weak_next(isolate_->heap()->allocation_sites_list()); | |
| 662 } | |
| 663 isolate_->heap()->set_allocation_sites_list(site); | |
| 664 } | |
| 665 | |
| 666 | |
| 667 // Used to insert a deserialized internalized string into the string table. | |
| 668 class StringTableInsertionKey : public HashTableKey { | |
| 669 public: | |
| 670 explicit StringTableInsertionKey(String* string) | |
| 671 : string_(string), hash_(HashForObject(string)) { | |
| 672 DCHECK(string->IsInternalizedString()); | |
| 673 } | |
| 674 | |
| 675 bool IsMatch(Object* string) OVERRIDE { | |
| 676 // We know that all entries in a hash table had their hash keys created. | |
| 677 // Use that knowledge to have fast failure. | |
| 678 if (hash_ != HashForObject(string)) return false; | |
| 679 // We want to compare the content of two internalized strings here. | |
| 680 return string_->SlowEquals(String::cast(string)); | |
| 681 } | |
| 682 | |
| 683 uint32_t Hash() OVERRIDE { return hash_; } | |
| 684 | |
| 685 uint32_t HashForObject(Object* key) OVERRIDE { | |
| 686 return String::cast(key)->Hash(); | |
| 687 } | |
| 688 | |
| 689 MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate) | |
| 690 OVERRIDE { | |
| 691 return handle(string_, isolate); | |
| 692 } | |
| 693 | |
| 694 String* string_; | |
| 695 uint32_t hash_; | |
| 696 }; | |
| 697 | |
| 698 | |
| 699 HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) { | |
| 700 if (obj->IsString()) { | |
| 701 String* string = String::cast(obj); | |
| 702 // Uninitialize hash field as the hash seed may have changed. | |
| 703 string->set_hash_field(String::kEmptyHashField); | |
| 704 if (string->IsInternalizedString()) { | |
| 705 DisallowHeapAllocation no_gc; | |
| 706 HandleScope scope(isolate_); | |
| 707 StringTableInsertionKey key(string); | |
| 708 String* canonical = *StringTable::LookupKey(isolate_, &key); | |
| 709 string->SetForwardedInternalizedString(canonical); | |
| 710 return canonical; | |
| 711 } | |
| 712 } else if (obj->IsScript()) { | |
| 713 Script::cast(obj)->set_id(isolate_->heap()->NextScriptId()); | |
| 714 } | |
| 715 return obj; | |
| 716 } | |
| 717 | |
| 718 | |
| 719 HeapObject* Deserializer::GetBackReferencedObject(int space) { | |
| 720 HeapObject* obj; | |
| 721 BackReference back_reference(source_.GetInt()); | |
| 722 if (space == LO_SPACE) { | |
| 723 CHECK(back_reference.chunk_index() == 0); | |
| 724 uint32_t index = back_reference.large_object_index(); | |
| 725 obj = deserialized_large_objects_[index]; | |
| 726 } else { | |
| 727 DCHECK(space < kNumberOfPreallocatedSpaces); | |
| 728 uint32_t chunk_index = back_reference.chunk_index(); | |
| 729 DCHECK_LE(chunk_index, current_chunk_[space]); | |
| 730 uint32_t chunk_offset = back_reference.chunk_offset(); | |
| 731 obj = HeapObject::FromAddress(reservations_[space][chunk_index].start + | |
| 732 chunk_offset); | |
| 733 } | |
| 734 if (deserializing_user_code() && obj->IsInternalizedString()) { | |
| 735 obj = String::cast(obj)->GetForwardedInternalizedString(); | |
| 736 } | |
| 737 hot_objects_.Add(obj); | |
| 738 return obj; | |
| 739 } | |
| 740 | |
| 741 | |
| 742 // This routine writes the new object into the pointer provided and then | |
| 743 // returns true if the new object was in young space and false otherwise. | |
| 744 // The reason for this strange interface is that otherwise the object is | |
| 745 // written very late, which means the FreeSpace map is not set up by the | |
| 746 // time we need to use it to mark the space at the end of a page free. | |
| 747 void Deserializer::ReadObject(int space_number, Object** write_back) { | |
| 748 Address address; | |
| 749 HeapObject* obj; | |
| 750 int next_int = source_.GetInt(); | |
| 751 | |
| 752 bool double_align = false; | |
| 753 #ifndef V8_HOST_ARCH_64_BIT | |
| 754 double_align = next_int == kDoubleAlignmentSentinel; | |
| 755 if (double_align) next_int = source_.GetInt(); | |
| 756 #endif | |
| 757 | |
| 758 DCHECK_NE(kDoubleAlignmentSentinel, next_int); | |
| 759 int size = next_int << kObjectAlignmentBits; | |
| 760 int reserved_size = size + (double_align ? kPointerSize : 0); | |
| 761 address = Allocate(space_number, reserved_size); | |
| 762 obj = HeapObject::FromAddress(address); | |
| 763 if (double_align) { | |
| 764 obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size); | |
| 765 address = obj->address(); | |
| 766 } | |
| 767 | |
| 768 isolate_->heap()->OnAllocationEvent(obj, size); | |
| 769 Object** current = reinterpret_cast<Object**>(address); | |
| 770 Object** limit = current + (size >> kPointerSizeLog2); | |
| 771 if (FLAG_log_snapshot_positions) { | |
| 772 LOG(isolate_, SnapshotPositionEvent(address, source_.position())); | |
| 773 } | |
| 774 ReadData(current, limit, space_number, address); | |
| 775 | |
| 776 // TODO(mvstanton): consider treating the heap()->allocation_sites_list() | |
| 777 // as a (weak) root. If this root is relocated correctly, | |
| 778 // RelinkAllocationSite() isn't necessary. | |
| 779 if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj)); | |
| 780 | |
| 781 // Fix up strings from serialized user code. | |
| 782 if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj); | |
| 783 | |
| 784 Object* write_back_obj = obj; | |
| 785 UnalignedCopy(write_back, &write_back_obj); | |
| 786 #ifdef DEBUG | |
| 787 if (obj->IsCode()) { | |
| 788 DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE); | |
| 789 #ifdef VERIFY_HEAP | |
| 790 obj->ObjectVerify(); | |
| 791 #endif // VERIFY_HEAP | |
| 792 } else { | |
| 793 DCHECK(space_number != CODE_SPACE); | |
| 794 } | |
| 795 #endif // DEBUG | |
| 796 } | |
| 797 | |
| 798 | |
| 799 // We know the space requirements before deserialization and can | |
| 800 // pre-allocate that reserved space. During deserialization, all we need | |
| 801 // to do is to bump up the pointer for each space in the reserved | |
| 802 // space. This is also used for fixing back references. | |
| 803 // We may have to split up the pre-allocation into several chunks | |
| 804 // because it would not fit onto a single page. We do not have to keep | |
| 805 // track of when to move to the next chunk. An opcode will signal this. | |
| 806 // Since multiple large objects cannot be folded into one large object | |
| 807 // space allocation, we have to do an actual allocation when deserializing | |
| 808 // each large object. Instead of tracking offset for back references, we | |
| 809 // reference large objects by index. | |
| 810 Address Deserializer::Allocate(int space_index, int size) { | |
| 811 if (space_index == LO_SPACE) { | |
| 812 AlwaysAllocateScope scope(isolate_); | |
| 813 LargeObjectSpace* lo_space = isolate_->heap()->lo_space(); | |
| 814 Executability exec = static_cast<Executability>(source_.Get()); | |
| 815 AllocationResult result = lo_space->AllocateRaw(size, exec); | |
| 816 HeapObject* obj = HeapObject::cast(result.ToObjectChecked()); | |
| 817 deserialized_large_objects_.Add(obj); | |
| 818 return obj->address(); | |
| 819 } else { | |
| 820 DCHECK(space_index < kNumberOfPreallocatedSpaces); | |
| 821 Address address = high_water_[space_index]; | |
| 822 DCHECK_NOT_NULL(address); | |
| 823 high_water_[space_index] += size; | |
| 824 #ifdef DEBUG | |
| 825 // Assert that the current reserved chunk is still big enough. | |
| 826 const Heap::Reservation& reservation = reservations_[space_index]; | |
| 827 int chunk_index = current_chunk_[space_index]; | |
| 828 CHECK_LE(high_water_[space_index], reservation[chunk_index].end); | |
| 829 #endif | |
| 830 return address; | |
| 831 } | |
| 832 } | |
| 833 | |
| 834 | |
| 835 void Deserializer::ReadData(Object** current, Object** limit, int source_space, | |
| 836 Address current_object_address) { | |
| 837 Isolate* const isolate = isolate_; | |
| 838 // Write barrier support costs around 1% in startup time. In fact there | |
| 839 // are no new space objects in current boot snapshots, so it's not needed, | |
| 840 // but that may change. | |
| 841 bool write_barrier_needed = | |
| 842 (current_object_address != NULL && source_space != NEW_SPACE && | |
| 843 source_space != CODE_SPACE); | |
| 844 while (current < limit) { | |
| 845 byte data = source_.Get(); | |
| 846 switch (data) { | |
| 847 #define CASE_STATEMENT(where, how, within, space_number) \ | |
| 848 case where + how + within + space_number: \ | |
| 849 STATIC_ASSERT((where & ~kWhereMask) == 0); \ | |
| 850 STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \ | |
| 851 STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \ | |
| 852 STATIC_ASSERT((space_number & ~kSpaceMask) == 0); | |
| 853 | |
| 854 #define CASE_BODY(where, how, within, space_number_if_any) \ | |
| 855 { \ | |
| 856 bool emit_write_barrier = false; \ | |
| 857 bool current_was_incremented = false; \ | |
| 858 int space_number = space_number_if_any == kAnyOldSpace \ | |
| 859 ? (data & kSpaceMask) \ | |
| 860 : space_number_if_any; \ | |
| 861 if (where == kNewObject && how == kPlain && within == kStartOfObject) { \ | |
| 862 ReadObject(space_number, current); \ | |
| 863 emit_write_barrier = (space_number == NEW_SPACE); \ | |
| 864 } else { \ | |
| 865 Object* new_object = NULL; /* May not be a real Object pointer. */ \ | |
| 866 if (where == kNewObject) { \ | |
| 867 ReadObject(space_number, &new_object); \ | |
| 868 } else if (where == kBackref) { \ | |
| 869 emit_write_barrier = (space_number == NEW_SPACE); \ | |
| 870 new_object = GetBackReferencedObject(data & kSpaceMask); \ | |
| 871 } else if (where == kBackrefWithSkip) { \ | |
| 872 int skip = source_.GetInt(); \ | |
| 873 current = reinterpret_cast<Object**>( \ | |
| 874 reinterpret_cast<Address>(current) + skip); \ | |
| 875 emit_write_barrier = (space_number == NEW_SPACE); \ | |
| 876 new_object = GetBackReferencedObject(data & kSpaceMask); \ | |
| 877 } else if (where == kRootArray) { \ | |
| 878 int root_id = source_.GetInt(); \ | |
| 879 new_object = isolate->heap()->roots_array_start()[root_id]; \ | |
| 880 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
| 881 } else if (where == kPartialSnapshotCache) { \ | |
| 882 int cache_index = source_.GetInt(); \ | |
| 883 new_object = isolate->partial_snapshot_cache()->at(cache_index); \ | |
| 884 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
| 885 } else if (where == kExternalReference) { \ | |
| 886 int skip = source_.GetInt(); \ | |
| 887 current = reinterpret_cast<Object**>( \ | |
| 888 reinterpret_cast<Address>(current) + skip); \ | |
| 889 int reference_id = source_.GetInt(); \ | |
| 890 Address address = external_reference_table_->address(reference_id); \ | |
| 891 new_object = reinterpret_cast<Object*>(address); \ | |
| 892 } else if (where == kAttachedReference) { \ | |
| 893 int index = source_.GetInt(); \ | |
| 894 DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \ | |
| 895 new_object = *attached_objects_[index]; \ | |
| 896 emit_write_barrier = isolate->heap()->InNewSpace(new_object); \ | |
| 897 } else { \ | |
| 898 DCHECK(where == kBuiltin); \ | |
| 899 DCHECK(deserializing_user_code()); \ | |
| 900 int builtin_id = source_.GetInt(); \ | |
| 901 DCHECK_LE(0, builtin_id); \ | |
| 902 DCHECK_LT(builtin_id, Builtins::builtin_count); \ | |
| 903 Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \ | |
| 904 new_object = isolate->builtins()->builtin(name); \ | |
| 905 emit_write_barrier = false; \ | |
| 906 } \ | |
| 907 if (within == kInnerPointer) { \ | |
| 908 if (space_number != CODE_SPACE || new_object->IsCode()) { \ | |
| 909 Code* new_code_object = reinterpret_cast<Code*>(new_object); \ | |
| 910 new_object = \ | |
| 911 reinterpret_cast<Object*>(new_code_object->instruction_start()); \ | |
| 912 } else { \ | |
| 913 DCHECK(space_number == CODE_SPACE); \ | |
| 914 Cell* cell = Cell::cast(new_object); \ | |
| 915 new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \ | |
| 916 } \ | |
| 917 } \ | |
| 918 if (how == kFromCode) { \ | |
| 919 Address location_of_branch_data = reinterpret_cast<Address>(current); \ | |
| 920 Assembler::deserialization_set_special_target_at( \ | |
| 921 location_of_branch_data, \ | |
| 922 Code::cast(HeapObject::FromAddress(current_object_address)), \ | |
| 923 reinterpret_cast<Address>(new_object)); \ | |
| 924 location_of_branch_data += Assembler::kSpecialTargetSize; \ | |
| 925 current = reinterpret_cast<Object**>(location_of_branch_data); \ | |
| 926 current_was_incremented = true; \ | |
| 927 } else { \ | |
| 928 UnalignedCopy(current, &new_object); \ | |
| 929 } \ | |
| 930 } \ | |
| 931 if (emit_write_barrier && write_barrier_needed) { \ | |
| 932 Address current_address = reinterpret_cast<Address>(current); \ | |
| 933 isolate->heap()->RecordWrite( \ | |
| 934 current_object_address, \ | |
| 935 static_cast<int>(current_address - current_object_address)); \ | |
| 936 } \ | |
| 937 if (!current_was_incremented) { \ | |
| 938 current++; \ | |
| 939 } \ | |
| 940 break; \ | |
| 941 } | |
| 942 | |
| 943 // This generates a case and a body for the new space (which has to do extra | |
| 944 // write barrier handling) and handles the other spaces with fall-through cases | |
| 945 // and one body. | |
| 946 #define ALL_SPACES(where, how, within) \ | |
| 947 CASE_STATEMENT(where, how, within, NEW_SPACE) \ | |
| 948 CASE_BODY(where, how, within, NEW_SPACE) \ | |
| 949 CASE_STATEMENT(where, how, within, OLD_SPACE) \ | |
| 950 CASE_STATEMENT(where, how, within, CODE_SPACE) \ | |
| 951 CASE_STATEMENT(where, how, within, MAP_SPACE) \ | |
| 952 CASE_STATEMENT(where, how, within, LO_SPACE) \ | |
| 953 CASE_BODY(where, how, within, kAnyOldSpace) | |
| 954 | |
| 955 #define FOUR_CASES(byte_code) \ | |
| 956 case byte_code: \ | |
| 957 case byte_code + 1: \ | |
| 958 case byte_code + 2: \ | |
| 959 case byte_code + 3: | |
| 960 | |
| 961 #define SIXTEEN_CASES(byte_code) \ | |
| 962 FOUR_CASES(byte_code) \ | |
| 963 FOUR_CASES(byte_code + 4) \ | |
| 964 FOUR_CASES(byte_code + 8) \ | |
| 965 FOUR_CASES(byte_code + 12) | |
| 966 | |
| 967 // Deserialize a new object and write a pointer to it to the current | |
| 968 // object. | |
| 969 ALL_SPACES(kNewObject, kPlain, kStartOfObject) | |
| 970 // Support for direct instruction pointers in functions. It's an inner | |
| 971 // pointer because it points at the entry point, not at the start of the | |
| 972 // code object. | |
| 973 CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE) | |
| 974 CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE) | |
| 975 // Deserialize a new code object and write a pointer to its first | |
| 976 // instruction to the current code object. | |
| 977 ALL_SPACES(kNewObject, kFromCode, kInnerPointer) | |
| 978 // Find a recently deserialized object using its offset from the current | |
| 979 // allocation point and write a pointer to it to the current object. | |
| 980 ALL_SPACES(kBackref, kPlain, kStartOfObject) | |
| 981 ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject) | |
| 982 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ | |
| 983 defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL | |
| 984 // Deserialize a new object from pointer found in code and write | |
| 985 // a pointer to it to the current object. Required only for MIPS, PPC or | |
| 986 // ARM with ool constant pool, and omitted on the other architectures | |
| 987 // because it is fully unrolled and would cause bloat. | |
| 988 ALL_SPACES(kNewObject, kFromCode, kStartOfObject) | |
| 989 // Find a recently deserialized code object using its offset from the | |
| 990 // current allocation point and write a pointer to it to the current | |
| 991 // object. Required only for MIPS, PPC or ARM with ool constant pool. | |
| 992 ALL_SPACES(kBackref, kFromCode, kStartOfObject) | |
| 993 ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject) | |
| 994 #endif | |
| 995 // Find a recently deserialized code object using its offset from the | |
| 996 // current allocation point and write a pointer to its first instruction | |
| 997 // to the current code object or the instruction pointer in a function | |
| 998 // object. | |
| 999 ALL_SPACES(kBackref, kFromCode, kInnerPointer) | |
| 1000 ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer) | |
| 1001 ALL_SPACES(kBackref, kPlain, kInnerPointer) | |
| 1002 ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer) | |
| 1003 // Find an object in the roots array and write a pointer to it to the | |
| 1004 // current object. | |
| 1005 CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0) | |
| 1006 CASE_BODY(kRootArray, kPlain, kStartOfObject, 0) | |
| 1007 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \ | |
| 1008 defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) | |
| 1009 // Find an object in the roots array and write a pointer to it to in code. | |
| 1010 CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0) | |
| 1011 CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0) | |
| 1012 #endif | |
| 1013 // Find an object in the partial snapshots cache and write a pointer to it | |
| 1014 // to the current object. | |
| 1015 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | |
| 1016 CASE_BODY(kPartialSnapshotCache, kPlain, kStartOfObject, 0) | |
| 1017 // Find an code entry in the partial snapshots cache and | |
| 1018 // write a pointer to it to the current object. | |
| 1019 CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0) | |
| 1020 CASE_BODY(kPartialSnapshotCache, kPlain, kInnerPointer, 0) | |
| 1021 // Find an external reference and write a pointer to it to the current | |
| 1022 // object. | |
| 1023 CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0) | |
| 1024 CASE_BODY(kExternalReference, kPlain, kStartOfObject, 0) | |
| 1025 // Find an external reference and write a pointer to it in the current | |
| 1026 // code object. | |
| 1027 CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0) | |
| 1028 CASE_BODY(kExternalReference, kFromCode, kStartOfObject, 0) | |
| 1029 // Find an object in the attached references and write a pointer to it to | |
| 1030 // the current object. | |
| 1031 CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0) | |
| 1032 CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0) | |
| 1033 CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0) | |
| 1034 CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0) | |
| 1035 CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0) | |
| 1036 CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0) | |
| 1037 // Find a builtin and write a pointer to it to the current object. | |
| 1038 CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0) | |
| 1039 CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0) | |
| 1040 CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0) | |
| 1041 CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0) | |
| 1042 CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0) | |
| 1043 CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0) | |
| 1044 | |
| 1045 #undef CASE_STATEMENT | |
| 1046 #undef CASE_BODY | |
| 1047 #undef ALL_SPACES | |
| 1048 | |
| 1049 case kSkip: { | |
| 1050 int size = source_.GetInt(); | |
| 1051 current = reinterpret_cast<Object**>( | |
| 1052 reinterpret_cast<intptr_t>(current) + size); | |
| 1053 break; | |
| 1054 } | |
| 1055 | |
| 1056 case kInternalReferenceEncoded: | |
| 1057 case kInternalReference: { | |
| 1058 // Internal reference address is not encoded via skip, but by offset | |
| 1059 // from code entry. | |
| 1060 int pc_offset = source_.GetInt(); | |
| 1061 int target_offset = source_.GetInt(); | |
| 1062 Code* code = | |
| 1063 Code::cast(HeapObject::FromAddress(current_object_address)); | |
| 1064 DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size()); | |
| 1065 DCHECK(0 <= target_offset && target_offset <= code->instruction_size()); | |
| 1066 Address pc = code->entry() + pc_offset; | |
| 1067 Address target = code->entry() + target_offset; | |
| 1068 Assembler::deserialization_set_target_internal_reference_at( | |
| 1069 pc, target, data == kInternalReference | |
| 1070 ? RelocInfo::INTERNAL_REFERENCE | |
| 1071 : RelocInfo::INTERNAL_REFERENCE_ENCODED); | |
| 1072 break; | |
| 1073 } | |
| 1074 | |
| 1075 case kNop: | |
| 1076 break; | |
| 1077 | |
| 1078 case kNextChunk: { | |
| 1079 int space = source_.Get(); | |
| 1080 DCHECK(space < kNumberOfPreallocatedSpaces); | |
| 1081 int chunk_index = current_chunk_[space]; | |
| 1082 const Heap::Reservation& reservation = reservations_[space]; | |
| 1083 // Make sure the current chunk is indeed exhausted. | |
| 1084 CHECK_EQ(reservation[chunk_index].end, high_water_[space]); | |
| 1085 // Move to next reserved chunk. | |
| 1086 chunk_index = ++current_chunk_[space]; | |
| 1087 CHECK_LT(chunk_index, reservation.length()); | |
| 1088 high_water_[space] = reservation[chunk_index].start; | |
| 1089 break; | |
| 1090 } | |
| 1091 | |
| 1092 case kSynchronize: | |
| 1093 // If we get here then that indicates that you have a mismatch between | |
| 1094 // the number of GC roots when serializing and deserializing. | |
| 1095 CHECK(false); | |
| 1096 break; | |
| 1097 | |
| 1098 case kNativesStringResource: { | |
| 1099 DCHECK(!isolate_->heap()->deserialization_complete()); | |
| 1100 int index = source_.Get(); | |
| 1101 Vector<const char> source_vector = Natives::GetScriptSource(index); | |
| 1102 NativesExternalStringResource* resource = | |
| 1103 new NativesExternalStringResource(source_vector.start(), | |
| 1104 source_vector.length()); | |
| 1105 Object* resource_obj = reinterpret_cast<Object*>(resource); | |
| 1106 UnalignedCopy(current++, &resource_obj); | |
| 1107 break; | |
| 1108 } | |
| 1109 | |
| 1110 // Deserialize raw data of variable length. | |
| 1111 case kVariableRawData: { | |
| 1112 int size_in_bytes = source_.GetInt(); | |
| 1113 byte* raw_data_out = reinterpret_cast<byte*>(current); | |
| 1114 source_.CopyRaw(raw_data_out, size_in_bytes); | |
| 1115 break; | |
| 1116 } | |
| 1117 | |
| 1118 case kVariableRepeat: { | |
| 1119 int repeats = source_.GetInt(); | |
| 1120 Object* object = current[-1]; | |
| 1121 DCHECK(!isolate->heap()->InNewSpace(object)); | |
| 1122 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); | |
| 1123 break; | |
| 1124 } | |
| 1125 | |
| 1126 STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots); | |
| 1127 STATIC_ASSERT(kNumberOfRootArrayConstants == 32); | |
| 1128 SIXTEEN_CASES(kRootArrayConstantsWithSkip) | |
| 1129 SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) { | |
| 1130 int skip = source_.GetInt(); | |
| 1131 current = reinterpret_cast<Object**>( | |
| 1132 reinterpret_cast<intptr_t>(current) + skip); | |
| 1133 // Fall through. | |
| 1134 } | |
| 1135 | |
| 1136 SIXTEEN_CASES(kRootArrayConstants) | |
| 1137 SIXTEEN_CASES(kRootArrayConstants + 16) { | |
| 1138 int root_id = data & kRootArrayConstantsMask; | |
| 1139 Object* object = isolate->heap()->roots_array_start()[root_id]; | |
| 1140 DCHECK(!isolate->heap()->InNewSpace(object)); | |
| 1141 UnalignedCopy(current++, &object); | |
| 1142 break; | |
| 1143 } | |
| 1144 | |
| 1145 STATIC_ASSERT(kNumberOfHotObjects == 8); | |
| 1146 FOUR_CASES(kHotObjectWithSkip) | |
| 1147 FOUR_CASES(kHotObjectWithSkip + 4) { | |
| 1148 int skip = source_.GetInt(); | |
| 1149 current = reinterpret_cast<Object**>( | |
| 1150 reinterpret_cast<Address>(current) + skip); | |
| 1151 // Fall through. | |
| 1152 } | |
| 1153 | |
| 1154 FOUR_CASES(kHotObject) | |
| 1155 FOUR_CASES(kHotObject + 4) { | |
| 1156 int index = data & kHotObjectMask; | |
| 1157 Object* hot_object = hot_objects_.Get(index); | |
| 1158 UnalignedCopy(current, &hot_object); | |
| 1159 if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) { | |
| 1160 Address current_address = reinterpret_cast<Address>(current); | |
| 1161 isolate->heap()->RecordWrite( | |
| 1162 current_object_address, | |
| 1163 static_cast<int>(current_address - current_object_address)); | |
| 1164 } | |
| 1165 current++; | |
| 1166 break; | |
| 1167 } | |
| 1168 | |
| 1169 // Deserialize raw data of fixed length from 1 to 32 words. | |
| 1170 STATIC_ASSERT(kNumberOfFixedRawData == 32); | |
| 1171 SIXTEEN_CASES(kFixedRawData) | |
| 1172 SIXTEEN_CASES(kFixedRawData + 16) { | |
| 1173 byte* raw_data_out = reinterpret_cast<byte*>(current); | |
| 1174 int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2; | |
| 1175 source_.CopyRaw(raw_data_out, size_in_bytes); | |
| 1176 current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes); | |
| 1177 break; | |
| 1178 } | |
| 1179 | |
| 1180 STATIC_ASSERT(kNumberOfFixedRepeat == 16); | |
| 1181 SIXTEEN_CASES(kFixedRepeat) { | |
| 1182 int repeats = data - kFixedRepeatStart; | |
| 1183 Object* object; | |
| 1184 UnalignedCopy(&object, current - 1); | |
| 1185 DCHECK(!isolate->heap()->InNewSpace(object)); | |
| 1186 for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object); | |
| 1187 break; | |
| 1188 } | |
| 1189 | |
| 1190 #undef SIXTEEN_CASES | |
| 1191 #undef FOUR_CASES | |
| 1192 | |
| 1193 default: | |
| 1194 CHECK(false); | |
| 1195 } | |
| 1196 } | |
| 1197 CHECK_EQ(limit, current); | |
| 1198 } | |
| 1199 | |
| 1200 | |
| 1201 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) | |
| 1202 : isolate_(isolate), | |
| 1203 sink_(sink), | |
| 1204 external_reference_encoder_(isolate), | |
| 1205 root_index_map_(isolate), | |
| 1206 code_address_map_(NULL), | |
| 1207 large_objects_total_size_(0), | |
| 1208 seen_large_objects_index_(0) { | |
| 1209 // The serializer is meant to be used only to generate initial heap images | |
| 1210 // from a context in which there is only one isolate. | |
| 1211 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | |
| 1212 pending_chunk_[i] = 0; | |
| 1213 max_chunk_size_[i] = static_cast<uint32_t>( | |
| 1214 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i))); | |
| 1215 } | |
| 1216 } | |
| 1217 | |
| 1218 | |
| 1219 Serializer::~Serializer() { | |
| 1220 if (code_address_map_ != NULL) delete code_address_map_; | |
| 1221 } | |
| 1222 | |
| 1223 | |
| 1224 void StartupSerializer::SerializeStrongReferences() { | |
| 1225 Isolate* isolate = this->isolate(); | |
| 1226 // No active threads. | |
| 1227 CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse()); | |
| 1228 // No active or weak handles. | |
| 1229 CHECK(isolate->handle_scope_implementer()->blocks()->is_empty()); | |
| 1230 CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles()); | |
| 1231 CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles()); | |
| 1232 // We don't support serializing installed extensions. | |
| 1233 CHECK(!isolate->has_installed_extensions()); | |
| 1234 isolate->heap()->IterateSmiRoots(this); | |
| 1235 isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG); | |
| 1236 } | |
| 1237 | |
| 1238 | |
| 1239 void StartupSerializer::VisitPointers(Object** start, Object** end) { | |
| 1240 for (Object** current = start; current < end; current++) { | |
| 1241 if (start == isolate()->heap()->roots_array_start()) { | |
| 1242 root_index_wave_front_ = | |
| 1243 Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); | |
| 1244 } | |
| 1245 if (ShouldBeSkipped(current)) { | |
| 1246 sink_->Put(kSkip, "Skip"); | |
| 1247 sink_->PutInt(kPointerSize, "SkipOneWord"); | |
| 1248 } else if ((*current)->IsSmi()) { | |
| 1249 sink_->Put(kOnePointerRawData, "Smi"); | |
| 1250 for (int i = 0; i < kPointerSize; i++) { | |
| 1251 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | |
| 1252 } | |
| 1253 } else { | |
| 1254 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0); | |
| 1255 } | |
| 1256 } | |
| 1257 } | |
| 1258 | |
| 1259 | |
| 1260 void PartialSerializer::Serialize(Object** o) { | |
| 1261 if ((*o)->IsContext()) { | |
| 1262 Context* context = Context::cast(*o); | |
| 1263 global_object_ = context->global_object(); | |
| 1264 back_reference_map()->AddGlobalProxy(context->global_proxy()); | |
| 1265 } | |
| 1266 VisitPointer(o); | |
| 1267 SerializeOutdatedContextsAsFixedArray(); | |
| 1268 Pad(); | |
| 1269 } | |
| 1270 | |
| 1271 | |
| 1272 void PartialSerializer::SerializeOutdatedContextsAsFixedArray() { | |
| 1273 int length = outdated_contexts_.length(); | |
| 1274 if (length == 0) { | |
| 1275 FixedArray* empty = isolate_->heap()->empty_fixed_array(); | |
| 1276 SerializeObject(empty, kPlain, kStartOfObject, 0); | |
| 1277 } else { | |
| 1278 // Serialize an imaginary fixed array containing outdated contexts. | |
| 1279 int size = FixedArray::SizeFor(length); | |
| 1280 Allocate(NEW_SPACE, size); | |
| 1281 sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray"); | |
| 1282 sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words"); | |
| 1283 Map* map = isolate_->heap()->fixed_array_map(); | |
| 1284 SerializeObject(map, kPlain, kStartOfObject, 0); | |
| 1285 Smi* length_smi = Smi::FromInt(length); | |
| 1286 sink_->Put(kOnePointerRawData, "Smi"); | |
| 1287 for (int i = 0; i < kPointerSize; i++) { | |
| 1288 sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte"); | |
| 1289 } | |
| 1290 for (int i = 0; i < length; i++) { | |
| 1291 BackReference back_ref = outdated_contexts_[i]; | |
| 1292 DCHECK(BackReferenceIsAlreadyAllocated(back_ref)); | |
| 1293 sink_->Put(kBackref + back_ref.space(), "BackRef"); | |
| 1294 sink_->PutInt(back_ref.reference(), "BackRefValue"); | |
| 1295 } | |
| 1296 } | |
| 1297 } | |
| 1298 | |
| 1299 | |
| 1300 bool Serializer::ShouldBeSkipped(Object** current) { | |
| 1301 Object** roots = isolate()->heap()->roots_array_start(); | |
| 1302 return current == &roots[Heap::kStoreBufferTopRootIndex] | |
| 1303 || current == &roots[Heap::kStackLimitRootIndex] | |
| 1304 || current == &roots[Heap::kRealStackLimitRootIndex]; | |
| 1305 } | |
| 1306 | |
| 1307 | |
| 1308 void Serializer::VisitPointers(Object** start, Object** end) { | |
| 1309 for (Object** current = start; current < end; current++) { | |
| 1310 if ((*current)->IsSmi()) { | |
| 1311 sink_->Put(kOnePointerRawData, "Smi"); | |
| 1312 for (int i = 0; i < kPointerSize; i++) { | |
| 1313 sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); | |
| 1314 } | |
| 1315 } else { | |
| 1316 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0); | |
| 1317 } | |
| 1318 } | |
| 1319 } | |
| 1320 | |
| 1321 | |
| 1322 void Serializer::EncodeReservations( | |
| 1323 List<SerializedData::Reservation>* out) const { | |
| 1324 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) { | |
| 1325 for (int j = 0; j < completed_chunks_[i].length(); j++) { | |
| 1326 out->Add(SerializedData::Reservation(completed_chunks_[i][j])); | |
| 1327 } | |
| 1328 | |
| 1329 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) { | |
| 1330 out->Add(SerializedData::Reservation(pending_chunk_[i])); | |
| 1331 } | |
| 1332 out->last().mark_as_last(); | |
| 1333 } | |
| 1334 | |
| 1335 out->Add(SerializedData::Reservation(large_objects_total_size_)); | |
| 1336 out->last().mark_as_last(); | |
| 1337 } | |
| 1338 | |
| 1339 | |
| 1340 // This ensures that the partial snapshot cache keeps things alive during GC and | |
| 1341 // tracks their movement. When it is called during serialization of the startup | |
| 1342 // snapshot nothing happens. When the partial (context) snapshot is created, | |
| 1343 // this array is populated with the pointers that the partial snapshot will | |
| 1344 // need. As that happens we emit serialized objects to the startup snapshot | |
| 1345 // that correspond to the elements of this cache array. On deserialization we | |
| 1346 // therefore need to visit the cache array. This fills it up with pointers to | |
| 1347 // deserialized objects. | |
| 1348 void SerializerDeserializer::Iterate(Isolate* isolate, | |
| 1349 ObjectVisitor* visitor) { | |
| 1350 if (isolate->serializer_enabled()) return; | |
| 1351 List<Object*>* cache = isolate->partial_snapshot_cache(); | |
| 1352 for (int i = 0;; ++i) { | |
| 1353 // Extend the array ready to get a value when deserializing. | |
| 1354 if (cache->length() <= i) cache->Add(Smi::FromInt(0)); | |
| 1355 visitor->VisitPointer(&cache->at(i)); | |
| 1356 // Sentinel is the undefined object, which is a root so it will not normally | |
| 1357 // be found in the cache. | |
| 1358 if (cache->at(i)->IsUndefined()) break; | |
| 1359 } | |
| 1360 } | |
| 1361 | |
| 1362 | |
| 1363 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { | |
| 1364 Isolate* isolate = this->isolate(); | |
| 1365 List<Object*>* cache = isolate->partial_snapshot_cache(); | |
| 1366 int new_index = cache->length(); | |
| 1367 | |
| 1368 int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index); | |
| 1369 if (index == PartialCacheIndexMap::kInvalidIndex) { | |
| 1370 // We didn't find the object in the cache. So we add it to the cache and | |
| 1371 // then visit the pointer so that it becomes part of the startup snapshot | |
| 1372 // and we can refer to it from the partial snapshot. | |
| 1373 cache->Add(heap_object); | |
| 1374 startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object)); | |
| 1375 // We don't recurse from the startup snapshot generator into the partial | |
| 1376 // snapshot generator. | |
| 1377 return new_index; | |
| 1378 } | |
| 1379 return index; | |
| 1380 } | |
| 1381 | |
| 1382 | |
| 1383 #ifdef DEBUG | |
| 1384 bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) { | |
| 1385 DCHECK(reference.is_valid()); | |
| 1386 DCHECK(!reference.is_source()); | |
| 1387 DCHECK(!reference.is_global_proxy()); | |
| 1388 AllocationSpace space = reference.space(); | |
| 1389 int chunk_index = reference.chunk_index(); | |
| 1390 if (space == LO_SPACE) { | |
| 1391 return chunk_index == 0 && | |
| 1392 reference.large_object_index() < seen_large_objects_index_; | |
| 1393 } else if (chunk_index == completed_chunks_[space].length()) { | |
| 1394 return reference.chunk_offset() < pending_chunk_[space]; | |
| 1395 } else { | |
| 1396 return chunk_index < completed_chunks_[space].length() && | |
| 1397 reference.chunk_offset() < completed_chunks_[space][chunk_index]; | |
| 1398 } | |
| 1399 } | |
| 1400 #endif // DEBUG | |
| 1401 | |
| 1402 | |
| 1403 bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, | |
| 1404 WhereToPoint where_to_point, int skip) { | |
| 1405 if (how_to_code == kPlain && where_to_point == kStartOfObject) { | |
| 1406 // Encode a reference to a hot object by its index in the working set. | |
| 1407 int index = hot_objects_.Find(obj); | |
| 1408 if (index != HotObjectsList::kNotFound) { | |
| 1409 DCHECK(index >= 0 && index < kNumberOfHotObjects); | |
| 1410 if (FLAG_trace_serializer) { | |
| 1411 PrintF(" Encoding hot object %d:", index); | |
| 1412 obj->ShortPrint(); | |
| 1413 PrintF("\n"); | |
| 1414 } | |
| 1415 if (skip != 0) { | |
| 1416 sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip"); | |
| 1417 sink_->PutInt(skip, "HotObjectSkipDistance"); | |
| 1418 } else { | |
| 1419 sink_->Put(kHotObject + index, "HotObject"); | |
| 1420 } | |
| 1421 return true; | |
| 1422 } | |
| 1423 } | |
| 1424 BackReference back_reference = back_reference_map_.Lookup(obj); | |
| 1425 if (back_reference.is_valid()) { | |
| 1426 // Encode the location of an already deserialized object in order to write | |
| 1427 // its location into a later object. We can encode the location as an | |
| 1428 // offset fromthe start of the deserialized objects or as an offset | |
| 1429 // backwards from thecurrent allocation pointer. | |
| 1430 if (back_reference.is_source()) { | |
| 1431 FlushSkip(skip); | |
| 1432 if (FLAG_trace_serializer) PrintF(" Encoding source object\n"); | |
| 1433 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject); | |
| 1434 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source"); | |
| 1435 sink_->PutInt(kSourceObjectReference, "kSourceObjectReference"); | |
| 1436 } else if (back_reference.is_global_proxy()) { | |
| 1437 FlushSkip(skip); | |
| 1438 if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n"); | |
| 1439 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject); | |
| 1440 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy"); | |
| 1441 sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference"); | |
| 1442 } else { | |
| 1443 if (FLAG_trace_serializer) { | |
| 1444 PrintF(" Encoding back reference to: "); | |
| 1445 obj->ShortPrint(); | |
| 1446 PrintF("\n"); | |
| 1447 } | |
| 1448 | |
| 1449 AllocationSpace space = back_reference.space(); | |
| 1450 if (skip == 0) { | |
| 1451 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef"); | |
| 1452 } else { | |
| 1453 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space, | |
| 1454 "BackRefWithSkip"); | |
| 1455 sink_->PutInt(skip, "BackRefSkipDistance"); | |
| 1456 } | |
| 1457 DCHECK(BackReferenceIsAlreadyAllocated(back_reference)); | |
| 1458 sink_->PutInt(back_reference.reference(), "BackRefValue"); | |
| 1459 | |
| 1460 hot_objects_.Add(obj); | |
| 1461 } | |
| 1462 return true; | |
| 1463 } | |
| 1464 return false; | |
| 1465 } | |
| 1466 | |
| 1467 | |
| 1468 void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, | |
| 1469 WhereToPoint where_to_point, int skip) { | |
| 1470 DCHECK(!obj->IsJSFunction()); | |
| 1471 | |
| 1472 int root_index = root_index_map_.Lookup(obj); | |
| 1473 // We can only encode roots as such if it has already been serialized. | |
| 1474 // That applies to root indices below the wave front. | |
| 1475 if (root_index != RootIndexMap::kInvalidRootIndex && | |
| 1476 root_index < root_index_wave_front_) { | |
| 1477 PutRoot(root_index, obj, how_to_code, where_to_point, skip); | |
| 1478 return; | |
| 1479 } | |
| 1480 | |
| 1481 if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) { | |
| 1482 obj = isolate()->builtins()->builtin(Builtins::kCompileLazy); | |
| 1483 } | |
| 1484 | |
| 1485 if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return; | |
| 1486 | |
| 1487 FlushSkip(skip); | |
| 1488 | |
| 1489 // Object has not yet been serialized. Serialize it here. | |
| 1490 ObjectSerializer object_serializer(this, obj, sink_, how_to_code, | |
| 1491 where_to_point); | |
| 1492 object_serializer.Serialize(); | |
| 1493 } | |
| 1494 | |
| 1495 | |
| 1496 void StartupSerializer::SerializeWeakReferences() { | |
| 1497 // This phase comes right after the serialization (of the snapshot). | |
| 1498 // After we have done the partial serialization the partial snapshot cache | |
| 1499 // will contain some references needed to decode the partial snapshot. We | |
| 1500 // add one entry with 'undefined' which is the sentinel that the deserializer | |
| 1501 // uses to know it is done deserializing the array. | |
| 1502 Object* undefined = isolate()->heap()->undefined_value(); | |
| 1503 VisitPointer(&undefined); | |
| 1504 isolate()->heap()->IterateWeakRoots(this, VISIT_ALL); | |
| 1505 Pad(); | |
| 1506 } | |
| 1507 | |
| 1508 | |
| 1509 void Serializer::PutRoot(int root_index, | |
| 1510 HeapObject* object, | |
| 1511 SerializerDeserializer::HowToCode how_to_code, | |
| 1512 SerializerDeserializer::WhereToPoint where_to_point, | |
| 1513 int skip) { | |
| 1514 if (FLAG_trace_serializer) { | |
| 1515 PrintF(" Encoding root %d:", root_index); | |
| 1516 object->ShortPrint(); | |
| 1517 PrintF("\n"); | |
| 1518 } | |
| 1519 | |
| 1520 if (how_to_code == kPlain && where_to_point == kStartOfObject && | |
| 1521 root_index < kNumberOfRootArrayConstants && | |
| 1522 !isolate()->heap()->InNewSpace(object)) { | |
| 1523 if (skip == 0) { | |
| 1524 sink_->Put(kRootArrayConstants + root_index, "RootConstant"); | |
| 1525 } else { | |
| 1526 sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant"); | |
| 1527 sink_->PutInt(skip, "SkipInPutRoot"); | |
| 1528 } | |
| 1529 } else { | |
| 1530 FlushSkip(skip); | |
| 1531 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization"); | |
| 1532 sink_->PutInt(root_index, "root_index"); | |
| 1533 } | |
| 1534 } | |
| 1535 | |
| 1536 | |
| 1537 void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, | |
| 1538 WhereToPoint where_to_point, int skip) { | |
| 1539 if (obj->IsMap()) { | |
| 1540 // The code-caches link to context-specific code objects, which | |
| 1541 // the startup and context serializes cannot currently handle. | |
| 1542 DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array()); | |
| 1543 } | |
| 1544 | |
| 1545 // Replace typed arrays by undefined. | |
| 1546 if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value(); | |
| 1547 | |
| 1548 int root_index = root_index_map_.Lookup(obj); | |
| 1549 if (root_index != RootIndexMap::kInvalidRootIndex) { | |
| 1550 PutRoot(root_index, obj, how_to_code, where_to_point, skip); | |
| 1551 return; | |
| 1552 } | |
| 1553 | |
| 1554 if (ShouldBeInThePartialSnapshotCache(obj)) { | |
| 1555 FlushSkip(skip); | |
| 1556 | |
| 1557 int cache_index = PartialSnapshotCacheIndex(obj); | |
| 1558 sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point, | |
| 1559 "PartialSnapshotCache"); | |
| 1560 sink_->PutInt(cache_index, "partial_snapshot_cache_index"); | |
| 1561 return; | |
| 1562 } | |
| 1563 | |
| 1564 // Pointers from the partial snapshot to the objects in the startup snapshot | |
| 1565 // should go through the root array or through the partial snapshot cache. | |
| 1566 // If this is not the case you may have to add something to the root array. | |
| 1567 DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid()); | |
| 1568 // All the internalized strings that the partial snapshot needs should be | |
| 1569 // either in the root table or in the partial snapshot cache. | |
| 1570 DCHECK(!obj->IsInternalizedString()); | |
| 1571 | |
| 1572 if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return; | |
| 1573 | |
| 1574 FlushSkip(skip); | |
| 1575 | |
| 1576 // Clear literal boilerplates. | |
| 1577 if (obj->IsJSFunction() && !JSFunction::cast(obj)->shared()->bound()) { | |
| 1578 FixedArray* literals = JSFunction::cast(obj)->literals(); | |
| 1579 for (int i = 0; i < literals->length(); i++) literals->set_undefined(i); | |
| 1580 } | |
| 1581 | |
| 1582 // Object has not yet been serialized. Serialize it here. | |
| 1583 ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point); | |
| 1584 serializer.Serialize(); | |
| 1585 | |
| 1586 if (obj->IsContext() && | |
| 1587 Context::cast(obj)->global_object() == global_object_) { | |
| 1588 // Context refers to the current global object. This reference will | |
| 1589 // become outdated after deserialization. | |
| 1590 BackReference back_reference = back_reference_map_.Lookup(obj); | |
| 1591 DCHECK(back_reference.is_valid()); | |
| 1592 outdated_contexts_.Add(back_reference); | |
| 1593 } | |
| 1594 } | |
| 1595 | |
| 1596 | |
| 1597 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space, | |
| 1598 int size, Map* map) { | |
| 1599 if (serializer_->code_address_map_) { | |
| 1600 const char* code_name = | |
| 1601 serializer_->code_address_map_->Lookup(object_->address()); | |
| 1602 LOG(serializer_->isolate_, | |
| 1603 CodeNameEvent(object_->address(), sink_->Position(), code_name)); | |
| 1604 LOG(serializer_->isolate_, | |
| 1605 SnapshotPositionEvent(object_->address(), sink_->Position())); | |
| 1606 } | |
| 1607 | |
| 1608 BackReference back_reference; | |
| 1609 if (space == LO_SPACE) { | |
| 1610 sink_->Put(kNewObject + reference_representation_ + space, | |
| 1611 "NewLargeObject"); | |
| 1612 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords"); | |
| 1613 if (object_->IsCode()) { | |
| 1614 sink_->Put(EXECUTABLE, "executable large object"); | |
| 1615 } else { | |
| 1616 sink_->Put(NOT_EXECUTABLE, "not executable large object"); | |
| 1617 } | |
| 1618 back_reference = serializer_->AllocateLargeObject(size); | |
| 1619 } else { | |
| 1620 bool needs_double_align = false; | |
| 1621 if (object_->NeedsToEnsureDoubleAlignment()) { | |
| 1622 // Add wriggle room for double alignment padding. | |
| 1623 back_reference = serializer_->Allocate(space, size + kPointerSize); | |
| 1624 needs_double_align = true; | |
| 1625 } else { | |
| 1626 back_reference = serializer_->Allocate(space, size); | |
| 1627 } | |
| 1628 sink_->Put(kNewObject + reference_representation_ + space, "NewObject"); | |
| 1629 if (needs_double_align) | |
| 1630 sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel"); | |
| 1631 int encoded_size = size >> kObjectAlignmentBits; | |
| 1632 DCHECK_NE(kDoubleAlignmentSentinel, encoded_size); | |
| 1633 sink_->PutInt(encoded_size, "ObjectSizeInWords"); | |
| 1634 } | |
| 1635 | |
| 1636 // Mark this object as already serialized. | |
| 1637 serializer_->back_reference_map()->Add(object_, back_reference); | |
| 1638 | |
| 1639 // Serialize the map (first word of the object). | |
| 1640 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0); | |
| 1641 } | |
| 1642 | |
| 1643 | |
| 1644 void Serializer::ObjectSerializer::SerializeExternalString() { | |
| 1645 // Instead of serializing this as an external string, we serialize | |
| 1646 // an imaginary sequential string with the same content. | |
| 1647 Isolate* isolate = serializer_->isolate(); | |
| 1648 DCHECK(object_->IsExternalString()); | |
| 1649 DCHECK(object_->map() != isolate->heap()->native_source_string_map()); | |
| 1650 ExternalString* string = ExternalString::cast(object_); | |
| 1651 int length = string->length(); | |
| 1652 Map* map; | |
| 1653 int content_size; | |
| 1654 int allocation_size; | |
| 1655 const byte* resource; | |
| 1656 // Find the map and size for the imaginary sequential string. | |
| 1657 bool internalized = object_->IsInternalizedString(); | |
| 1658 if (object_->IsExternalOneByteString()) { | |
| 1659 map = internalized ? isolate->heap()->one_byte_internalized_string_map() | |
| 1660 : isolate->heap()->one_byte_string_map(); | |
| 1661 allocation_size = SeqOneByteString::SizeFor(length); | |
| 1662 content_size = length * kCharSize; | |
| 1663 resource = reinterpret_cast<const byte*>( | |
| 1664 ExternalOneByteString::cast(string)->resource()->data()); | |
| 1665 } else { | |
| 1666 map = internalized ? isolate->heap()->internalized_string_map() | |
| 1667 : isolate->heap()->string_map(); | |
| 1668 allocation_size = SeqTwoByteString::SizeFor(length); | |
| 1669 content_size = length * kShortSize; | |
| 1670 resource = reinterpret_cast<const byte*>( | |
| 1671 ExternalTwoByteString::cast(string)->resource()->data()); | |
| 1672 } | |
| 1673 | |
| 1674 AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize) | |
| 1675 ? LO_SPACE | |
| 1676 : OLD_SPACE; | |
| 1677 SerializePrologue(space, allocation_size, map); | |
| 1678 | |
| 1679 // Output the rest of the imaginary string. | |
| 1680 int bytes_to_output = allocation_size - HeapObject::kHeaderSize; | |
| 1681 | |
| 1682 // Output raw data header. Do not bother with common raw length cases here. | |
| 1683 sink_->Put(kVariableRawData, "RawDataForString"); | |
| 1684 sink_->PutInt(bytes_to_output, "length"); | |
| 1685 | |
| 1686 // Serialize string header (except for map). | |
| 1687 Address string_start = string->address(); | |
| 1688 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) { | |
| 1689 sink_->PutSection(string_start[i], "StringHeader"); | |
| 1690 } | |
| 1691 | |
| 1692 // Serialize string content. | |
| 1693 sink_->PutRaw(resource, content_size, "StringContent"); | |
| 1694 | |
| 1695 // Since the allocation size is rounded up to object alignment, there | |
| 1696 // maybe left-over bytes that need to be padded. | |
| 1697 int padding_size = allocation_size - SeqString::kHeaderSize - content_size; | |
| 1698 DCHECK(0 <= padding_size && padding_size < kObjectAlignment); | |
| 1699 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding"); | |
| 1700 | |
| 1701 sink_->Put(kSkip, "SkipAfterString"); | |
| 1702 sink_->PutInt(bytes_to_output, "SkipDistance"); | |
| 1703 } | |
| 1704 | |
| 1705 | |
| 1706 void Serializer::ObjectSerializer::Serialize() { | |
| 1707 if (FLAG_trace_serializer) { | |
| 1708 PrintF(" Encoding heap object: "); | |
| 1709 object_->ShortPrint(); | |
| 1710 PrintF("\n"); | |
| 1711 } | |
| 1712 | |
| 1713 // We cannot serialize typed array objects correctly. | |
| 1714 DCHECK(!object_->IsJSTypedArray()); | |
| 1715 | |
| 1716 if (object_->IsScript()) { | |
| 1717 // Clear cached line ends. | |
| 1718 Object* undefined = serializer_->isolate()->heap()->undefined_value(); | |
| 1719 Script::cast(object_)->set_line_ends(undefined); | |
| 1720 } | |
| 1721 | |
| 1722 if (object_->IsExternalString()) { | |
| 1723 Heap* heap = serializer_->isolate()->heap(); | |
| 1724 if (object_->map() != heap->native_source_string_map()) { | |
| 1725 // Usually we cannot recreate resources for external strings. To work | |
| 1726 // around this, external strings are serialized to look like ordinary | |
| 1727 // sequential strings. | |
| 1728 // The exception are native source code strings, since we can recreate | |
| 1729 // their resources. In that case we fall through and leave it to | |
| 1730 // VisitExternalOneByteString further down. | |
| 1731 SerializeExternalString(); | |
| 1732 return; | |
| 1733 } | |
| 1734 } | |
| 1735 | |
| 1736 int size = object_->Size(); | |
| 1737 Map* map = object_->map(); | |
| 1738 AllocationSpace space = | |
| 1739 MemoryChunk::FromAddress(object_->address())->owner()->identity(); | |
| 1740 SerializePrologue(space, size, map); | |
| 1741 | |
| 1742 // Serialize the rest of the object. | |
| 1743 CHECK_EQ(0, bytes_processed_so_far_); | |
| 1744 bytes_processed_so_far_ = kPointerSize; | |
| 1745 | |
| 1746 object_->IterateBody(map->instance_type(), size, this); | |
| 1747 OutputRawData(object_->address() + size); | |
| 1748 } | |
| 1749 | |
| 1750 | |
| 1751 void Serializer::ObjectSerializer::VisitPointers(Object** start, | |
| 1752 Object** end) { | |
| 1753 Object** current = start; | |
| 1754 while (current < end) { | |
| 1755 while (current < end && (*current)->IsSmi()) current++; | |
| 1756 if (current < end) OutputRawData(reinterpret_cast<Address>(current)); | |
| 1757 | |
| 1758 while (current < end && !(*current)->IsSmi()) { | |
| 1759 HeapObject* current_contents = HeapObject::cast(*current); | |
| 1760 int root_index = serializer_->root_index_map()->Lookup(current_contents); | |
| 1761 // Repeats are not subject to the write barrier so we can only use | |
| 1762 // immortal immovable root members. They are never in new space. | |
| 1763 if (current != start && root_index != RootIndexMap::kInvalidRootIndex && | |
| 1764 Heap::RootIsImmortalImmovable(root_index) && | |
| 1765 current_contents == current[-1]) { | |
| 1766 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents)); | |
| 1767 int repeat_count = 1; | |
| 1768 while (¤t[repeat_count] < end - 1 && | |
| 1769 current[repeat_count] == current_contents) { | |
| 1770 repeat_count++; | |
| 1771 } | |
| 1772 current += repeat_count; | |
| 1773 bytes_processed_so_far_ += repeat_count * kPointerSize; | |
| 1774 if (repeat_count > kNumberOfFixedRepeat) { | |
| 1775 sink_->Put(kVariableRepeat, "VariableRepeat"); | |
| 1776 sink_->PutInt(repeat_count, "repeat count"); | |
| 1777 } else { | |
| 1778 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat"); | |
| 1779 } | |
| 1780 } else { | |
| 1781 serializer_->SerializeObject( | |
| 1782 current_contents, kPlain, kStartOfObject, 0); | |
| 1783 bytes_processed_so_far_ += kPointerSize; | |
| 1784 current++; | |
| 1785 } | |
| 1786 } | |
| 1787 } | |
| 1788 } | |
| 1789 | |
| 1790 | |
| 1791 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { | |
| 1792 // Out-of-line constant pool entries will be visited by the ConstantPoolArray. | |
| 1793 if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return; | |
| 1794 | |
| 1795 int skip = OutputRawData(rinfo->target_address_address(), | |
| 1796 kCanReturnSkipInsteadOfSkipping); | |
| 1797 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain; | |
| 1798 Object* object = rinfo->target_object(); | |
| 1799 serializer_->SerializeObject(HeapObject::cast(object), how_to_code, | |
| 1800 kStartOfObject, skip); | |
| 1801 bytes_processed_so_far_ += rinfo->target_address_size(); | |
| 1802 } | |
| 1803 | |
| 1804 | |
| 1805 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) { | |
| 1806 int skip = OutputRawData(reinterpret_cast<Address>(p), | |
| 1807 kCanReturnSkipInsteadOfSkipping); | |
| 1808 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); | |
| 1809 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
| 1810 Address target = *p; | |
| 1811 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id"); | |
| 1812 bytes_processed_so_far_ += kPointerSize; | |
| 1813 } | |
| 1814 | |
| 1815 | |
| 1816 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) { | |
| 1817 int skip = OutputRawData(rinfo->target_address_address(), | |
| 1818 kCanReturnSkipInsteadOfSkipping); | |
| 1819 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain; | |
| 1820 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef"); | |
| 1821 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
| 1822 Address target = rinfo->target_external_reference(); | |
| 1823 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id"); | |
| 1824 bytes_processed_so_far_ += rinfo->target_address_size(); | |
| 1825 } | |
| 1826 | |
| 1827 | |
| 1828 void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) { | |
| 1829 // We can only reference to internal references of code that has been output. | |
| 1830 DCHECK(is_code_object_ && code_has_been_output_); | |
| 1831 // We do not use skip from last patched pc to find the pc to patch, since | |
| 1832 // target_address_address may not return addresses in ascending order when | |
| 1833 // used for internal references. External references may be stored at the | |
| 1834 // end of the code in the constant pool, whereas internal references are | |
| 1835 // inline. That would cause the skip to be negative. Instead, we store the | |
| 1836 // offset from code entry. | |
| 1837 Address entry = Code::cast(object_)->entry(); | |
| 1838 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry; | |
| 1839 intptr_t target_offset = rinfo->target_internal_reference() - entry; | |
| 1840 DCHECK(0 <= pc_offset && | |
| 1841 pc_offset <= Code::cast(object_)->instruction_size()); | |
| 1842 DCHECK(0 <= target_offset && | |
| 1843 target_offset <= Code::cast(object_)->instruction_size()); | |
| 1844 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE | |
| 1845 ? kInternalReference | |
| 1846 : kInternalReferenceEncoded, | |
| 1847 "InternalRef"); | |
| 1848 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address"); | |
| 1849 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value"); | |
| 1850 } | |
| 1851 | |
| 1852 | |
| 1853 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { | |
| 1854 int skip = OutputRawData(rinfo->target_address_address(), | |
| 1855 kCanReturnSkipInsteadOfSkipping); | |
| 1856 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain; | |
| 1857 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef"); | |
| 1858 sink_->PutInt(skip, "SkipB4ExternalRef"); | |
| 1859 Address target = rinfo->target_address(); | |
| 1860 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id"); | |
| 1861 bytes_processed_so_far_ += rinfo->target_address_size(); | |
| 1862 } | |
| 1863 | |
| 1864 | |
| 1865 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { | |
| 1866 // Out-of-line constant pool entries will be visited by the ConstantPoolArray. | |
| 1867 if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return; | |
| 1868 | |
| 1869 int skip = OutputRawData(rinfo->target_address_address(), | |
| 1870 kCanReturnSkipInsteadOfSkipping); | |
| 1871 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address()); | |
| 1872 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip); | |
| 1873 bytes_processed_so_far_ += rinfo->target_address_size(); | |
| 1874 } | |
| 1875 | |
| 1876 | |
| 1877 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) { | |
| 1878 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping); | |
| 1879 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); | |
| 1880 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip); | |
| 1881 bytes_processed_so_far_ += kPointerSize; | |
| 1882 } | |
| 1883 | |
| 1884 | |
| 1885 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) { | |
| 1886 // Out-of-line constant pool entries will be visited by the ConstantPoolArray. | |
| 1887 if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return; | |
| 1888 | |
| 1889 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping); | |
| 1890 Cell* object = Cell::cast(rinfo->target_cell()); | |
| 1891 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip); | |
| 1892 bytes_processed_so_far_ += kPointerSize; | |
| 1893 } | |
| 1894 | |
| 1895 | |
| 1896 void Serializer::ObjectSerializer::VisitExternalOneByteString( | |
| 1897 v8::String::ExternalOneByteStringResource** resource_pointer) { | |
| 1898 Address references_start = reinterpret_cast<Address>(resource_pointer); | |
| 1899 OutputRawData(references_start); | |
| 1900 for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { | |
| 1901 Object* source = | |
| 1902 serializer_->isolate()->heap()->natives_source_cache()->get(i); | |
| 1903 if (!source->IsUndefined()) { | |
| 1904 ExternalOneByteString* string = ExternalOneByteString::cast(source); | |
| 1905 typedef v8::String::ExternalOneByteStringResource Resource; | |
| 1906 const Resource* resource = string->resource(); | |
| 1907 if (resource == *resource_pointer) { | |
| 1908 sink_->Put(kNativesStringResource, "NativesStringResource"); | |
| 1909 sink_->PutSection(i, "NativesStringResourceEnd"); | |
| 1910 bytes_processed_so_far_ += sizeof(resource); | |
| 1911 return; | |
| 1912 } | |
| 1913 } | |
| 1914 } | |
| 1915 // One of the strings in the natives cache should match the resource. We | |
| 1916 // don't expect any other kinds of external strings here. | |
| 1917 UNREACHABLE(); | |
| 1918 } | |
| 1919 | |
| 1920 | |
| 1921 Address Serializer::ObjectSerializer::PrepareCode() { | |
| 1922 // To make snapshots reproducible, we make a copy of the code object | |
| 1923 // and wipe all pointers in the copy, which we then serialize. | |
| 1924 Code* original = Code::cast(object_); | |
| 1925 Code* code = serializer_->CopyCode(original); | |
| 1926 // Code age headers are not serializable. | |
| 1927 code->MakeYoung(serializer_->isolate()); | |
| 1928 int mode_mask = RelocInfo::kCodeTargetMask | | |
| 1929 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | | |
| 1930 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | | |
| 1931 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) | | |
| 1932 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | | |
| 1933 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); | |
| 1934 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) { | |
| 1935 RelocInfo* rinfo = it.rinfo(); | |
| 1936 if (!(FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool())) { | |
| 1937 rinfo->WipeOut(); | |
| 1938 } | |
| 1939 } | |
| 1940 // We need to wipe out the header fields *after* wiping out the | |
| 1941 // relocations, because some of these fields are needed for the latter. | |
| 1942 code->WipeOutHeader(); | |
| 1943 return code->address(); | |
| 1944 } | |
| 1945 | |
| 1946 | |
| 1947 int Serializer::ObjectSerializer::OutputRawData( | |
| 1948 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) { | |
| 1949 Address object_start = object_->address(); | |
| 1950 int base = bytes_processed_so_far_; | |
| 1951 int up_to_offset = static_cast<int>(up_to - object_start); | |
| 1952 int to_skip = up_to_offset - bytes_processed_so_far_; | |
| 1953 int bytes_to_output = to_skip; | |
| 1954 bytes_processed_so_far_ += to_skip; | |
| 1955 // This assert will fail if the reloc info gives us the target_address_address | |
| 1956 // locations in a non-ascending order. Luckily that doesn't happen. | |
| 1957 DCHECK(to_skip >= 0); | |
| 1958 bool outputting_code = false; | |
| 1959 if (to_skip != 0 && is_code_object_ && !code_has_been_output_) { | |
| 1960 // Output the code all at once and fix later. | |
| 1961 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_; | |
| 1962 outputting_code = true; | |
| 1963 code_has_been_output_ = true; | |
| 1964 } | |
| 1965 if (bytes_to_output != 0 && (!is_code_object_ || outputting_code)) { | |
| 1966 if (!outputting_code && bytes_to_output == to_skip && | |
| 1967 IsAligned(bytes_to_output, kPointerAlignment) && | |
| 1968 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) { | |
| 1969 int size_in_words = bytes_to_output >> kPointerSizeLog2; | |
| 1970 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData"); | |
| 1971 to_skip = 0; // This instruction includes skip. | |
| 1972 } else { | |
| 1973 // We always end up here if we are outputting the code of a code object. | |
| 1974 sink_->Put(kVariableRawData, "VariableRawData"); | |
| 1975 sink_->PutInt(bytes_to_output, "length"); | |
| 1976 } | |
| 1977 | |
| 1978 if (is_code_object_) object_start = PrepareCode(); | |
| 1979 | |
| 1980 const char* description = is_code_object_ ? "Code" : "Byte"; | |
| 1981 #ifdef MEMORY_SANITIZER | |
| 1982 // Object sizes are usually rounded up with uninitialized padding space. | |
| 1983 MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output); | |
| 1984 #endif // MEMORY_SANITIZER | |
| 1985 sink_->PutRaw(object_start + base, bytes_to_output, description); | |
| 1986 } | |
| 1987 if (to_skip != 0 && return_skip == kIgnoringReturn) { | |
| 1988 sink_->Put(kSkip, "Skip"); | |
| 1989 sink_->PutInt(to_skip, "SkipDistance"); | |
| 1990 to_skip = 0; | |
| 1991 } | |
| 1992 return to_skip; | |
| 1993 } | |
| 1994 | |
| 1995 | |
| 1996 BackReference Serializer::AllocateLargeObject(int size) { | |
| 1997 // Large objects are allocated one-by-one when deserializing. We do not | |
| 1998 // have to keep track of multiple chunks. | |
| 1999 large_objects_total_size_ += size; | |
| 2000 return BackReference::LargeObjectReference(seen_large_objects_index_++); | |
| 2001 } | |
| 2002 | |
| 2003 | |
| 2004 BackReference Serializer::Allocate(AllocationSpace space, int size) { | |
| 2005 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces); | |
| 2006 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space))); | |
| 2007 uint32_t new_chunk_size = pending_chunk_[space] + size; | |
| 2008 if (new_chunk_size > max_chunk_size(space)) { | |
| 2009 // The new chunk size would not fit onto a single page. Complete the | |
| 2010 // current chunk and start a new one. | |
| 2011 sink_->Put(kNextChunk, "NextChunk"); | |
| 2012 sink_->Put(space, "NextChunkSpace"); | |
| 2013 completed_chunks_[space].Add(pending_chunk_[space]); | |
| 2014 DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex); | |
| 2015 pending_chunk_[space] = 0; | |
| 2016 new_chunk_size = size; | |
| 2017 } | |
| 2018 uint32_t offset = pending_chunk_[space]; | |
| 2019 pending_chunk_[space] = new_chunk_size; | |
| 2020 return BackReference::Reference(space, completed_chunks_[space].length(), | |
| 2021 offset); | |
| 2022 } | |
| 2023 | |
| 2024 | |
| 2025 void Serializer::Pad() { | |
| 2026 // The non-branching GetInt will read up to 3 bytes too far, so we need | |
| 2027 // to pad the snapshot to make sure we don't read over the end. | |
| 2028 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) { | |
| 2029 sink_->Put(kNop, "Padding"); | |
| 2030 } | |
| 2031 // Pad up to pointer size for checksum. | |
| 2032 while (!IsAligned(sink_->Position(), kPointerAlignment)) { | |
| 2033 sink_->Put(kNop, "Padding"); | |
| 2034 } | |
| 2035 } | |
| 2036 | |
| 2037 | |
| 2038 void Serializer::InitializeCodeAddressMap() { | |
| 2039 isolate_->InitializeLoggingAndCounters(); | |
| 2040 code_address_map_ = new CodeAddressMap(isolate_); | |
| 2041 } | |
| 2042 | |
| 2043 | |
| 2044 Code* Serializer::CopyCode(Code* code) { | |
| 2045 code_buffer_.Rewind(0); // Clear buffer without deleting backing store. | |
| 2046 int size = code->CodeSize(); | |
| 2047 code_buffer_.AddAll(Vector<byte>(code->address(), size)); | |
| 2048 return Code::cast(HeapObject::FromAddress(&code_buffer_.first())); | |
| 2049 } | |
| 2050 | |
| 2051 | |
| 2052 ScriptData* CodeSerializer::Serialize(Isolate* isolate, | |
| 2053 Handle<SharedFunctionInfo> info, | |
| 2054 Handle<String> source) { | |
| 2055 base::ElapsedTimer timer; | |
| 2056 if (FLAG_profile_deserialization) timer.Start(); | |
| 2057 if (FLAG_trace_serializer) { | |
| 2058 PrintF("[Serializing from"); | |
| 2059 Object* script = info->script(); | |
| 2060 if (script->IsScript()) Script::cast(script)->name()->ShortPrint(); | |
| 2061 PrintF("]\n"); | |
| 2062 } | |
| 2063 | |
| 2064 // Serialize code object. | |
| 2065 SnapshotByteSink sink(info->code()->CodeSize() * 2); | |
| 2066 CodeSerializer cs(isolate, &sink, *source, info->code()); | |
| 2067 DisallowHeapAllocation no_gc; | |
| 2068 Object** location = Handle<Object>::cast(info).location(); | |
| 2069 cs.VisitPointer(location); | |
| 2070 cs.Pad(); | |
| 2071 | |
| 2072 SerializedCodeData data(sink.data(), cs); | |
| 2073 ScriptData* script_data = data.GetScriptData(); | |
| 2074 | |
| 2075 if (FLAG_profile_deserialization) { | |
| 2076 double ms = timer.Elapsed().InMillisecondsF(); | |
| 2077 int length = script_data->length(); | |
| 2078 PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms); | |
| 2079 } | |
| 2080 | |
| 2081 return script_data; | |
| 2082 } | |
| 2083 | |
| 2084 | |
| 2085 void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, | |
| 2086 WhereToPoint where_to_point, int skip) { | |
| 2087 int root_index = root_index_map_.Lookup(obj); | |
| 2088 if (root_index != RootIndexMap::kInvalidRootIndex) { | |
| 2089 PutRoot(root_index, obj, how_to_code, where_to_point, skip); | |
| 2090 return; | |
| 2091 } | |
| 2092 | |
| 2093 if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return; | |
| 2094 | |
| 2095 FlushSkip(skip); | |
| 2096 | |
| 2097 if (obj->IsCode()) { | |
| 2098 Code* code_object = Code::cast(obj); | |
| 2099 switch (code_object->kind()) { | |
| 2100 case Code::OPTIMIZED_FUNCTION: // No optimized code compiled yet. | |
| 2101 case Code::HANDLER: // No handlers patched in yet. | |
| 2102 case Code::REGEXP: // No regexp literals initialized yet. | |
| 2103 case Code::NUMBER_OF_KINDS: // Pseudo enum value. | |
| 2104 CHECK(false); | |
| 2105 case Code::BUILTIN: | |
| 2106 SerializeBuiltin(code_object->builtin_index(), how_to_code, | |
| 2107 where_to_point); | |
| 2108 return; | |
| 2109 case Code::STUB: | |
| 2110 SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point); | |
| 2111 return; | |
| 2112 #define IC_KIND_CASE(KIND) case Code::KIND: | |
| 2113 IC_KIND_LIST(IC_KIND_CASE) | |
| 2114 #undef IC_KIND_CASE | |
| 2115 SerializeIC(code_object, how_to_code, where_to_point); | |
| 2116 return; | |
| 2117 case Code::FUNCTION: | |
| 2118 DCHECK(code_object->has_reloc_info_for_serialization()); | |
| 2119 // Only serialize the code for the toplevel function unless specified | |
| 2120 // by flag. Replace code of inner functions by the lazy compile builtin. | |
| 2121 // This is safe, as checked in Compiler::BuildFunctionInfo. | |
| 2122 if (code_object != main_code_ && !FLAG_serialize_inner) { | |
| 2123 SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point); | |
| 2124 } else { | |
| 2125 SerializeGeneric(code_object, how_to_code, where_to_point); | |
| 2126 } | |
| 2127 return; | |
| 2128 } | |
| 2129 UNREACHABLE(); | |
| 2130 } | |
| 2131 | |
| 2132 // Past this point we should not see any (context-specific) maps anymore. | |
| 2133 CHECK(!obj->IsMap()); | |
| 2134 // There should be no references to the global object embedded. | |
| 2135 CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject()); | |
| 2136 // There should be no hash table embedded. They would require rehashing. | |
| 2137 CHECK(!obj->IsHashTable()); | |
| 2138 // We expect no instantiated function objects or contexts. | |
| 2139 CHECK(!obj->IsJSFunction() && !obj->IsContext()); | |
| 2140 | |
| 2141 SerializeGeneric(obj, how_to_code, where_to_point); | |
| 2142 } | |
| 2143 | |
| 2144 | |
| 2145 void CodeSerializer::SerializeGeneric(HeapObject* heap_object, | |
| 2146 HowToCode how_to_code, | |
| 2147 WhereToPoint where_to_point) { | |
| 2148 if (heap_object->IsInternalizedString()) num_internalized_strings_++; | |
| 2149 | |
| 2150 // Object has not yet been serialized. Serialize it here. | |
| 2151 ObjectSerializer serializer(this, heap_object, sink_, how_to_code, | |
| 2152 where_to_point); | |
| 2153 serializer.Serialize(); | |
| 2154 } | |
| 2155 | |
| 2156 | |
| 2157 void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code, | |
| 2158 WhereToPoint where_to_point) { | |
| 2159 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) || | |
| 2160 (how_to_code == kPlain && where_to_point == kInnerPointer) || | |
| 2161 (how_to_code == kFromCode && where_to_point == kInnerPointer)); | |
| 2162 DCHECK_LT(builtin_index, Builtins::builtin_count); | |
| 2163 DCHECK_LE(0, builtin_index); | |
| 2164 | |
| 2165 if (FLAG_trace_serializer) { | |
| 2166 PrintF(" Encoding builtin: %s\n", | |
| 2167 isolate()->builtins()->name(builtin_index)); | |
| 2168 } | |
| 2169 | |
| 2170 sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin"); | |
| 2171 sink_->PutInt(builtin_index, "builtin_index"); | |
| 2172 } | |
| 2173 | |
| 2174 | |
| 2175 void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code, | |
| 2176 WhereToPoint where_to_point) { | |
| 2177 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) || | |
| 2178 (how_to_code == kPlain && where_to_point == kInnerPointer) || | |
| 2179 (how_to_code == kFromCode && where_to_point == kInnerPointer)); | |
| 2180 DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache); | |
| 2181 DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null()); | |
| 2182 | |
| 2183 int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex; | |
| 2184 | |
| 2185 if (FLAG_trace_serializer) { | |
| 2186 PrintF(" Encoding code stub %s as %d\n", | |
| 2187 CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false), | |
| 2188 index); | |
| 2189 } | |
| 2190 | |
| 2191 sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub"); | |
| 2192 sink_->PutInt(index, "CodeStub key"); | |
| 2193 } | |
| 2194 | |
| 2195 | |
| 2196 void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code, | |
| 2197 WhereToPoint where_to_point) { | |
| 2198 // The IC may be implemented as a stub. | |
| 2199 uint32_t stub_key = ic->stub_key(); | |
| 2200 if (stub_key != CodeStub::NoCacheKey()) { | |
| 2201 if (FLAG_trace_serializer) { | |
| 2202 PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind())); | |
| 2203 } | |
| 2204 SerializeCodeStub(stub_key, how_to_code, where_to_point); | |
| 2205 return; | |
| 2206 } | |
| 2207 // The IC may be implemented as builtin. Only real builtins have an | |
| 2208 // actual builtin_index value attached (otherwise it's just garbage). | |
| 2209 // Compare to make sure we are really dealing with a builtin. | |
| 2210 int builtin_index = ic->builtin_index(); | |
| 2211 if (builtin_index < Builtins::builtin_count) { | |
| 2212 Builtins::Name name = static_cast<Builtins::Name>(builtin_index); | |
| 2213 Code* builtin = isolate()->builtins()->builtin(name); | |
| 2214 if (builtin == ic) { | |
| 2215 if (FLAG_trace_serializer) { | |
| 2216 PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind())); | |
| 2217 } | |
| 2218 DCHECK(ic->kind() == Code::KEYED_LOAD_IC || | |
| 2219 ic->kind() == Code::KEYED_STORE_IC); | |
| 2220 SerializeBuiltin(builtin_index, how_to_code, where_to_point); | |
| 2221 return; | |
| 2222 } | |
| 2223 } | |
| 2224 // The IC may also just be a piece of code kept in the non_monomorphic_cache. | |
| 2225 // In that case, just serialize as a normal code object. | |
| 2226 if (FLAG_trace_serializer) { | |
| 2227 PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind())); | |
| 2228 } | |
| 2229 DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC); | |
| 2230 SerializeGeneric(ic, how_to_code, where_to_point); | |
| 2231 } | |
| 2232 | |
| 2233 | |
| 2234 int CodeSerializer::AddCodeStubKey(uint32_t stub_key) { | |
| 2235 // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2). | |
| 2236 int index = 0; | |
| 2237 while (index < stub_keys_.length()) { | |
| 2238 if (stub_keys_[index] == stub_key) return index; | |
| 2239 index++; | |
| 2240 } | |
| 2241 stub_keys_.Add(stub_key); | |
| 2242 return index; | |
| 2243 } | |
| 2244 | |
| 2245 | |
| 2246 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize( | |
| 2247 Isolate* isolate, ScriptData* cached_data, Handle<String> source) { | |
| 2248 base::ElapsedTimer timer; | |
| 2249 if (FLAG_profile_deserialization) timer.Start(); | |
| 2250 | |
| 2251 HandleScope scope(isolate); | |
| 2252 | |
| 2253 SmartPointer<SerializedCodeData> scd( | |
| 2254 SerializedCodeData::FromCachedData(isolate, cached_data, *source)); | |
| 2255 if (scd.is_empty()) { | |
| 2256 if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n"); | |
| 2257 DCHECK(cached_data->rejected()); | |
| 2258 return MaybeHandle<SharedFunctionInfo>(); | |
| 2259 } | |
| 2260 | |
| 2261 // Eagerly expand string table to avoid allocations during deserialization. | |
| 2262 StringTable::EnsureCapacityForDeserialization(isolate, | |
| 2263 scd->NumInternalizedStrings()); | |
| 2264 | |
| 2265 // Prepare and register list of attached objects. | |
| 2266 Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys(); | |
| 2267 Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New( | |
| 2268 code_stub_keys.length() + kCodeStubsBaseIndex); | |
| 2269 attached_objects[kSourceObjectIndex] = source; | |
| 2270 for (int i = 0; i < code_stub_keys.length(); i++) { | |
| 2271 attached_objects[i + kCodeStubsBaseIndex] = | |
| 2272 CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked(); | |
| 2273 } | |
| 2274 | |
| 2275 Deserializer deserializer(scd.get()); | |
| 2276 deserializer.SetAttachedObjects(attached_objects); | |
| 2277 | |
| 2278 // Deserialize. | |
| 2279 Handle<SharedFunctionInfo> result; | |
| 2280 if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) { | |
| 2281 // Deserializing may fail if the reservations cannot be fulfilled. | |
| 2282 if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n"); | |
| 2283 return MaybeHandle<SharedFunctionInfo>(); | |
| 2284 } | |
| 2285 deserializer.FlushICacheForNewCodeObjects(); | |
| 2286 | |
| 2287 if (FLAG_profile_deserialization) { | |
| 2288 double ms = timer.Elapsed().InMillisecondsF(); | |
| 2289 int length = cached_data->length(); | |
| 2290 PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms); | |
| 2291 } | |
| 2292 result->set_deserialized(true); | |
| 2293 | |
| 2294 if (isolate->logger()->is_logging_code_events() || | |
| 2295 isolate->cpu_profiler()->is_profiling()) { | |
| 2296 String* name = isolate->heap()->empty_string(); | |
| 2297 if (result->script()->IsScript()) { | |
| 2298 Script* script = Script::cast(result->script()); | |
| 2299 if (script->name()->IsString()) name = String::cast(script->name()); | |
| 2300 } | |
| 2301 isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(), | |
| 2302 *result, NULL, name); | |
| 2303 } | |
| 2304 return scope.CloseAndEscape(result); | |
| 2305 } | |
| 2306 | |
| 2307 | |
| 2308 void SerializedData::AllocateData(int size) { | |
| 2309 DCHECK(!owns_data_); | |
| 2310 data_ = NewArray<byte>(size); | |
| 2311 size_ = size; | |
| 2312 owns_data_ = true; | |
| 2313 DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment)); | |
| 2314 } | |
| 2315 | |
| 2316 | |
| 2317 SnapshotData::SnapshotData(const Serializer& ser) { | |
| 2318 DisallowHeapAllocation no_gc; | |
| 2319 List<Reservation> reservations; | |
| 2320 ser.EncodeReservations(&reservations); | |
| 2321 const List<byte>& payload = ser.sink()->data(); | |
| 2322 | |
| 2323 // Calculate sizes. | |
| 2324 int reservation_size = reservations.length() * kInt32Size; | |
| 2325 int size = kHeaderSize + reservation_size + payload.length(); | |
| 2326 | |
| 2327 // Allocate backing store and create result data. | |
| 2328 AllocateData(size); | |
| 2329 | |
| 2330 // Set header values. | |
| 2331 SetMagicNumber(ser.isolate()); | |
| 2332 SetHeaderValue(kCheckSumOffset, Version::Hash()); | |
| 2333 SetHeaderValue(kNumReservationsOffset, reservations.length()); | |
| 2334 SetHeaderValue(kPayloadLengthOffset, payload.length()); | |
| 2335 | |
| 2336 // Copy reservation chunk sizes. | |
| 2337 CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()), | |
| 2338 reservation_size); | |
| 2339 | |
| 2340 // Copy serialized data. | |
| 2341 CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(), | |
| 2342 static_cast<size_t>(payload.length())); | |
| 2343 } | |
| 2344 | |
| 2345 | |
| 2346 bool SnapshotData::IsSane() { | |
| 2347 return GetHeaderValue(kCheckSumOffset) == Version::Hash(); | |
| 2348 } | |
| 2349 | |
| 2350 | |
| 2351 Vector<const SerializedData::Reservation> SnapshotData::Reservations() const { | |
| 2352 return Vector<const Reservation>( | |
| 2353 reinterpret_cast<const Reservation*>(data_ + kHeaderSize), | |
| 2354 GetHeaderValue(kNumReservationsOffset)); | |
| 2355 } | |
| 2356 | |
| 2357 | |
| 2358 Vector<const byte> SnapshotData::Payload() const { | |
| 2359 int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; | |
| 2360 const byte* payload = data_ + kHeaderSize + reservations_size; | |
| 2361 int length = GetHeaderValue(kPayloadLengthOffset); | |
| 2362 DCHECK_EQ(data_ + size_, payload + length); | |
| 2363 return Vector<const byte>(payload, length); | |
| 2364 } | |
| 2365 | |
| 2366 | |
| 2367 class Checksum { | |
| 2368 public: | |
| 2369 explicit Checksum(Vector<const byte> payload) { | |
| 2370 // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit. | |
| 2371 uintptr_t a = 1; | |
| 2372 uintptr_t b = 0; | |
| 2373 const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start()); | |
| 2374 DCHECK(IsAligned(payload.length(), kIntptrSize)); | |
| 2375 const uintptr_t* end = cur + payload.length() / kIntptrSize; | |
| 2376 while (cur < end) { | |
| 2377 // Unsigned overflow expected and intended. | |
| 2378 a += *cur++; | |
| 2379 b += a; | |
| 2380 } | |
| 2381 #if V8_HOST_ARCH_64_BIT | |
| 2382 a ^= a >> 32; | |
| 2383 b ^= b >> 32; | |
| 2384 #endif // V8_HOST_ARCH_64_BIT | |
| 2385 a_ = static_cast<uint32_t>(a); | |
| 2386 b_ = static_cast<uint32_t>(b); | |
| 2387 } | |
| 2388 | |
| 2389 bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; } | |
| 2390 | |
| 2391 uint32_t a() const { return a_; } | |
| 2392 uint32_t b() const { return b_; } | |
| 2393 | |
| 2394 private: | |
| 2395 uint32_t a_; | |
| 2396 uint32_t b_; | |
| 2397 | |
| 2398 DISALLOW_COPY_AND_ASSIGN(Checksum); | |
| 2399 }; | |
| 2400 | |
| 2401 | |
| 2402 SerializedCodeData::SerializedCodeData(const List<byte>& payload, | |
| 2403 const CodeSerializer& cs) { | |
| 2404 DisallowHeapAllocation no_gc; | |
| 2405 const List<uint32_t>* stub_keys = cs.stub_keys(); | |
| 2406 | |
| 2407 List<Reservation> reservations; | |
| 2408 cs.EncodeReservations(&reservations); | |
| 2409 | |
| 2410 // Calculate sizes. | |
| 2411 int reservation_size = reservations.length() * kInt32Size; | |
| 2412 int num_stub_keys = stub_keys->length(); | |
| 2413 int stub_keys_size = stub_keys->length() * kInt32Size; | |
| 2414 int payload_offset = kHeaderSize + reservation_size + stub_keys_size; | |
| 2415 int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset); | |
| 2416 int size = padded_payload_offset + payload.length(); | |
| 2417 | |
| 2418 // Allocate backing store and create result data. | |
| 2419 AllocateData(size); | |
| 2420 | |
| 2421 // Set header values. | |
| 2422 SetMagicNumber(cs.isolate()); | |
| 2423 SetHeaderValue(kVersionHashOffset, Version::Hash()); | |
| 2424 SetHeaderValue(kSourceHashOffset, SourceHash(cs.source())); | |
| 2425 SetHeaderValue(kCpuFeaturesOffset, | |
| 2426 static_cast<uint32_t>(CpuFeatures::SupportedFeatures())); | |
| 2427 SetHeaderValue(kFlagHashOffset, FlagList::Hash()); | |
| 2428 SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings()); | |
| 2429 SetHeaderValue(kNumReservationsOffset, reservations.length()); | |
| 2430 SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys); | |
| 2431 SetHeaderValue(kPayloadLengthOffset, payload.length()); | |
| 2432 | |
| 2433 Checksum checksum(payload.ToConstVector()); | |
| 2434 SetHeaderValue(kChecksum1Offset, checksum.a()); | |
| 2435 SetHeaderValue(kChecksum2Offset, checksum.b()); | |
| 2436 | |
| 2437 // Copy reservation chunk sizes. | |
| 2438 CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()), | |
| 2439 reservation_size); | |
| 2440 | |
| 2441 // Copy code stub keys. | |
| 2442 CopyBytes(data_ + kHeaderSize + reservation_size, | |
| 2443 reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size); | |
| 2444 | |
| 2445 memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset); | |
| 2446 | |
| 2447 // Copy serialized data. | |
| 2448 CopyBytes(data_ + padded_payload_offset, payload.begin(), | |
| 2449 static_cast<size_t>(payload.length())); | |
| 2450 } | |
| 2451 | |
| 2452 | |
| 2453 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck( | |
| 2454 Isolate* isolate, String* source) const { | |
| 2455 uint32_t magic_number = GetMagicNumber(); | |
| 2456 uint32_t version_hash = GetHeaderValue(kVersionHashOffset); | |
| 2457 uint32_t source_hash = GetHeaderValue(kSourceHashOffset); | |
| 2458 uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset); | |
| 2459 uint32_t flags_hash = GetHeaderValue(kFlagHashOffset); | |
| 2460 uint32_t c1 = GetHeaderValue(kChecksum1Offset); | |
| 2461 uint32_t c2 = GetHeaderValue(kChecksum2Offset); | |
| 2462 if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH; | |
| 2463 if (version_hash != Version::Hash()) return VERSION_MISMATCH; | |
| 2464 if (source_hash != SourceHash(source)) return SOURCE_MISMATCH; | |
| 2465 if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) { | |
| 2466 return CPU_FEATURES_MISMATCH; | |
| 2467 } | |
| 2468 if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH; | |
| 2469 if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH; | |
| 2470 return CHECK_SUCCESS; | |
| 2471 } | |
| 2472 | |
| 2473 | |
| 2474 // Return ScriptData object and relinquish ownership over it to the caller. | |
| 2475 ScriptData* SerializedCodeData::GetScriptData() { | |
| 2476 DCHECK(owns_data_); | |
| 2477 ScriptData* result = new ScriptData(data_, size_); | |
| 2478 result->AcquireDataOwnership(); | |
| 2479 owns_data_ = false; | |
| 2480 data_ = NULL; | |
| 2481 return result; | |
| 2482 } | |
| 2483 | |
| 2484 | |
| 2485 Vector<const SerializedData::Reservation> SerializedCodeData::Reservations() | |
| 2486 const { | |
| 2487 return Vector<const Reservation>( | |
| 2488 reinterpret_cast<const Reservation*>(data_ + kHeaderSize), | |
| 2489 GetHeaderValue(kNumReservationsOffset)); | |
| 2490 } | |
| 2491 | |
| 2492 | |
| 2493 Vector<const byte> SerializedCodeData::Payload() const { | |
| 2494 int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; | |
| 2495 int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size; | |
| 2496 int payload_offset = kHeaderSize + reservations_size + code_stubs_size; | |
| 2497 int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset); | |
| 2498 const byte* payload = data_ + padded_payload_offset; | |
| 2499 DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment)); | |
| 2500 int length = GetHeaderValue(kPayloadLengthOffset); | |
| 2501 DCHECK_EQ(data_ + size_, payload + length); | |
| 2502 return Vector<const byte>(payload, length); | |
| 2503 } | |
| 2504 | |
| 2505 | |
| 2506 int SerializedCodeData::NumInternalizedStrings() const { | |
| 2507 return GetHeaderValue(kNumInternalizedStringsOffset); | |
| 2508 } | |
| 2509 | |
| 2510 Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const { | |
| 2511 int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size; | |
| 2512 const byte* start = data_ + kHeaderSize + reservations_size; | |
| 2513 return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start), | |
| 2514 GetHeaderValue(kNumCodeStubKeysOffset)); | |
| 2515 } | |
| 2516 | |
| 2517 | |
| 2518 SerializedCodeData::SerializedCodeData(ScriptData* data) | |
| 2519 : SerializedData(const_cast<byte*>(data->data()), data->length()) {} | |
| 2520 | |
| 2521 | |
| 2522 SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate, | |
| 2523 ScriptData* cached_data, | |
| 2524 String* source) { | |
| 2525 DisallowHeapAllocation no_gc; | |
| 2526 SerializedCodeData* scd = new SerializedCodeData(cached_data); | |
| 2527 SanityCheckResult r = scd->SanityCheck(isolate, source); | |
| 2528 if (r == CHECK_SUCCESS) return scd; | |
| 2529 cached_data->Reject(); | |
| 2530 source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r); | |
| 2531 delete scd; | |
| 2532 return NULL; | |
| 2533 } | |
| 2534 | |
| 2535 } // namespace internal | |
| 2536 } // namespace v8 | |
| OLD | NEW |