OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/accessors.h" | |
8 #include "src/api.h" | |
9 #include "src/base/once.h" | |
10 #include "src/base/utils/random-number-generator.h" | |
11 #include "src/bootstrapper.h" | |
12 #include "src/codegen.h" | |
13 #include "src/compilation-cache.h" | |
14 #include "src/conversions.h" | |
15 #include "src/cpu-profiler.h" | |
16 #include "src/debug.h" | |
17 #include "src/deoptimizer.h" | |
18 #include "src/global-handles.h" | |
19 #include "src/heap-profiler.h" | |
20 #include "src/incremental-marking.h" | |
21 #include "src/isolate-inl.h" | |
22 #include "src/mark-compact.h" | |
23 #include "src/natives.h" | |
24 #include "src/objects-visiting-inl.h" | |
25 #include "src/objects-visiting.h" | |
26 #include "src/runtime-profiler.h" | |
27 #include "src/scopeinfo.h" | |
28 #include "src/snapshot.h" | |
29 #include "src/store-buffer.h" | |
30 #include "src/utils.h" | |
31 #include "src/v8threads.h" | |
32 #include "src/vm-state-inl.h" | |
33 | |
34 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP | |
35 #include "src/regexp-macro-assembler.h" // NOLINT | |
36 #include "src/arm/regexp-macro-assembler-arm.h" // NOLINT | |
37 #endif | |
38 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP | |
39 #include "src/regexp-macro-assembler.h" // NOLINT | |
40 #include "src/mips/regexp-macro-assembler-mips.h" // NOLINT | |
41 #endif | |
42 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP | |
43 #include "src/regexp-macro-assembler.h" | |
44 #include "src/mips64/regexp-macro-assembler-mips64.h" | |
45 #endif | |
46 | |
47 namespace v8 { | |
48 namespace internal { | |
49 | |
50 | |
51 Heap::Heap() | |
52 : amount_of_external_allocated_memory_(0), | |
53 amount_of_external_allocated_memory_at_last_global_gc_(0), | |
54 isolate_(NULL), | |
55 code_range_size_(0), | |
56 // semispace_size_ should be a power of 2 and old_generation_size_ should | |
57 // be a multiple of Page::kPageSize. | |
58 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), | |
59 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | |
60 initial_semispace_size_(Page::kPageSize), | |
61 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), | |
62 max_executable_size_(256ul * (kPointerSize / 4) * MB), | |
63 // Variables set based on semispace_size_ and old_generation_size_ in | |
64 // ConfigureHeap. | |
65 // Will be 4 * reserved_semispace_size_ to ensure that young | |
66 // generation can be aligned to its size. | |
67 maximum_committed_(0), | |
68 survived_since_last_expansion_(0), | |
69 sweep_generation_(0), | |
70 always_allocate_scope_depth_(0), | |
71 contexts_disposed_(0), | |
72 global_ic_age_(0), | |
73 flush_monomorphic_ics_(false), | |
74 scan_on_scavenge_pages_(0), | |
75 new_space_(this), | |
76 old_pointer_space_(NULL), | |
77 old_data_space_(NULL), | |
78 code_space_(NULL), | |
79 map_space_(NULL), | |
80 cell_space_(NULL), | |
81 property_cell_space_(NULL), | |
82 lo_space_(NULL), | |
83 gc_state_(NOT_IN_GC), | |
84 gc_post_processing_depth_(0), | |
85 allocations_count_(0), | |
86 raw_allocations_hash_(0), | |
87 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), | |
88 ms_count_(0), | |
89 gc_count_(0), | |
90 remembered_unmapped_pages_index_(0), | |
91 unflattened_strings_length_(0), | |
92 #ifdef DEBUG | |
93 allocation_timeout_(0), | |
94 #endif // DEBUG | |
95 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), | |
96 old_gen_exhausted_(false), | |
97 inline_allocation_disabled_(false), | |
98 store_buffer_rebuilder_(store_buffer()), | |
99 hidden_string_(NULL), | |
100 gc_safe_size_of_old_object_(NULL), | |
101 total_regexp_code_generated_(0), | |
102 tracer_(this), | |
103 high_survival_rate_period_length_(0), | |
104 promoted_objects_size_(0), | |
105 promotion_rate_(0), | |
106 semi_space_copied_object_size_(0), | |
107 semi_space_copied_rate_(0), | |
108 nodes_died_in_new_space_(0), | |
109 nodes_copied_in_new_space_(0), | |
110 nodes_promoted_(0), | |
111 maximum_size_scavenges_(0), | |
112 max_gc_pause_(0.0), | |
113 total_gc_time_ms_(0.0), | |
114 max_alive_after_gc_(0), | |
115 min_in_mutator_(kMaxInt), | |
116 marking_time_(0.0), | |
117 sweeping_time_(0.0), | |
118 mark_compact_collector_(this), | |
119 store_buffer_(this), | |
120 marking_(this), | |
121 incremental_marking_(this), | |
122 number_idle_notifications_(0), | |
123 last_idle_notification_gc_count_(0), | |
124 last_idle_notification_gc_count_init_(false), | |
125 mark_sweeps_since_idle_round_started_(0), | |
126 gc_count_at_last_idle_gc_(0), | |
127 scavenges_since_last_idle_round_(kIdleScavengeThreshold), | |
128 full_codegen_bytes_generated_(0), | |
129 crankshaft_codegen_bytes_generated_(0), | |
130 gcs_since_last_deopt_(0), | |
131 #ifdef VERIFY_HEAP | |
132 no_weak_object_verification_scope_depth_(0), | |
133 #endif | |
134 allocation_sites_scratchpad_length_(0), | |
135 promotion_queue_(this), | |
136 configured_(false), | |
137 external_string_table_(this), | |
138 chunks_queued_for_free_(NULL), | |
139 gc_callbacks_depth_(0) { | |
140 // Allow build-time customization of the max semispace size. Building | |
141 // V8 with snapshots and a non-default max semispace size is much | |
142 // easier if you can define it as part of the build environment. | |
143 #if defined(V8_MAX_SEMISPACE_SIZE) | |
144 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; | |
145 #endif | |
146 | |
147 // Ensure old_generation_size_ is a multiple of kPageSize. | |
148 DCHECK(MB >= Page::kPageSize); | |
149 | |
150 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); | |
151 set_native_contexts_list(NULL); | |
152 set_array_buffers_list(Smi::FromInt(0)); | |
153 set_allocation_sites_list(Smi::FromInt(0)); | |
154 set_encountered_weak_collections(Smi::FromInt(0)); | |
155 // Put a dummy entry in the remembered pages so we can find the list the | |
156 // minidump even if there are no real unmapped pages. | |
157 RememberUnmappedPage(NULL, false); | |
158 | |
159 ClearObjectStats(true); | |
160 } | |
161 | |
162 | |
163 intptr_t Heap::Capacity() { | |
164 if (!HasBeenSetUp()) return 0; | |
165 | |
166 return new_space_.Capacity() + | |
167 old_pointer_space_->Capacity() + | |
168 old_data_space_->Capacity() + | |
169 code_space_->Capacity() + | |
170 map_space_->Capacity() + | |
171 cell_space_->Capacity() + | |
172 property_cell_space_->Capacity(); | |
173 } | |
174 | |
175 | |
176 intptr_t Heap::CommittedMemory() { | |
177 if (!HasBeenSetUp()) return 0; | |
178 | |
179 return new_space_.CommittedMemory() + | |
180 old_pointer_space_->CommittedMemory() + | |
181 old_data_space_->CommittedMemory() + | |
182 code_space_->CommittedMemory() + | |
183 map_space_->CommittedMemory() + | |
184 cell_space_->CommittedMemory() + | |
185 property_cell_space_->CommittedMemory() + | |
186 lo_space_->Size(); | |
187 } | |
188 | |
189 | |
190 size_t Heap::CommittedPhysicalMemory() { | |
191 if (!HasBeenSetUp()) return 0; | |
192 | |
193 return new_space_.CommittedPhysicalMemory() + | |
194 old_pointer_space_->CommittedPhysicalMemory() + | |
195 old_data_space_->CommittedPhysicalMemory() + | |
196 code_space_->CommittedPhysicalMemory() + | |
197 map_space_->CommittedPhysicalMemory() + | |
198 cell_space_->CommittedPhysicalMemory() + | |
199 property_cell_space_->CommittedPhysicalMemory() + | |
200 lo_space_->CommittedPhysicalMemory(); | |
201 } | |
202 | |
203 | |
204 intptr_t Heap::CommittedMemoryExecutable() { | |
205 if (!HasBeenSetUp()) return 0; | |
206 | |
207 return isolate()->memory_allocator()->SizeExecutable(); | |
208 } | |
209 | |
210 | |
211 void Heap::UpdateMaximumCommitted() { | |
212 if (!HasBeenSetUp()) return; | |
213 | |
214 intptr_t current_committed_memory = CommittedMemory(); | |
215 if (current_committed_memory > maximum_committed_) { | |
216 maximum_committed_ = current_committed_memory; | |
217 } | |
218 } | |
219 | |
220 | |
221 intptr_t Heap::Available() { | |
222 if (!HasBeenSetUp()) return 0; | |
223 | |
224 return new_space_.Available() + | |
225 old_pointer_space_->Available() + | |
226 old_data_space_->Available() + | |
227 code_space_->Available() + | |
228 map_space_->Available() + | |
229 cell_space_->Available() + | |
230 property_cell_space_->Available(); | |
231 } | |
232 | |
233 | |
234 bool Heap::HasBeenSetUp() { | |
235 return old_pointer_space_ != NULL && | |
236 old_data_space_ != NULL && | |
237 code_space_ != NULL && | |
238 map_space_ != NULL && | |
239 cell_space_ != NULL && | |
240 property_cell_space_ != NULL && | |
241 lo_space_ != NULL; | |
242 } | |
243 | |
244 | |
245 int Heap::GcSafeSizeOfOldObject(HeapObject* object) { | |
246 if (IntrusiveMarking::IsMarked(object)) { | |
247 return IntrusiveMarking::SizeOfMarkedObject(object); | |
248 } | |
249 return object->SizeFromMap(object->map()); | |
250 } | |
251 | |
252 | |
253 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, | |
254 const char** reason) { | |
255 // Is global GC requested? | |
256 if (space != NEW_SPACE) { | |
257 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); | |
258 *reason = "GC in old space requested"; | |
259 return MARK_COMPACTOR; | |
260 } | |
261 | |
262 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { | |
263 *reason = "GC in old space forced by flags"; | |
264 return MARK_COMPACTOR; | |
265 } | |
266 | |
267 // Is enough data promoted to justify a global GC? | |
268 if (OldGenerationAllocationLimitReached()) { | |
269 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); | |
270 *reason = "promotion limit reached"; | |
271 return MARK_COMPACTOR; | |
272 } | |
273 | |
274 // Have allocation in OLD and LO failed? | |
275 if (old_gen_exhausted_) { | |
276 isolate_->counters()-> | |
277 gc_compactor_caused_by_oldspace_exhaustion()->Increment(); | |
278 *reason = "old generations exhausted"; | |
279 return MARK_COMPACTOR; | |
280 } | |
281 | |
282 // Is there enough space left in OLD to guarantee that a scavenge can | |
283 // succeed? | |
284 // | |
285 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available | |
286 // for object promotion. It counts only the bytes that the memory | |
287 // allocator has not yet allocated from the OS and assigned to any space, | |
288 // and does not count available bytes already in the old space or code | |
289 // space. Undercounting is safe---we may get an unrequested full GC when | |
290 // a scavenge would have succeeded. | |
291 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) { | |
292 isolate_->counters()-> | |
293 gc_compactor_caused_by_oldspace_exhaustion()->Increment(); | |
294 *reason = "scavenge might not succeed"; | |
295 return MARK_COMPACTOR; | |
296 } | |
297 | |
298 // Default | |
299 *reason = NULL; | |
300 return SCAVENGER; | |
301 } | |
302 | |
303 | |
304 // TODO(1238405): Combine the infrastructure for --heap-stats and | |
305 // --log-gc to avoid the complicated preprocessor and flag testing. | |
306 void Heap::ReportStatisticsBeforeGC() { | |
307 // Heap::ReportHeapStatistics will also log NewSpace statistics when | |
308 // compiled --log-gc is set. The following logic is used to avoid | |
309 // double logging. | |
310 #ifdef DEBUG | |
311 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); | |
312 if (FLAG_heap_stats) { | |
313 ReportHeapStatistics("Before GC"); | |
314 } else if (FLAG_log_gc) { | |
315 new_space_.ReportStatistics(); | |
316 } | |
317 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); | |
318 #else | |
319 if (FLAG_log_gc) { | |
320 new_space_.CollectStatistics(); | |
321 new_space_.ReportStatistics(); | |
322 new_space_.ClearHistograms(); | |
323 } | |
324 #endif // DEBUG | |
325 } | |
326 | |
327 | |
328 void Heap::PrintShortHeapStatistics() { | |
329 if (!FLAG_trace_gc_verbose) return; | |
330 PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX "d KB" | |
331 ", available: %6" V8_PTR_PREFIX "d KB\n", | |
332 isolate_->memory_allocator()->Size() / KB, | |
333 isolate_->memory_allocator()->Available() / KB); | |
334 PrintPID("New space, used: %6" V8_PTR_PREFIX "d KB" | |
335 ", available: %6" V8_PTR_PREFIX "d KB" | |
336 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
337 new_space_.Size() / KB, | |
338 new_space_.Available() / KB, | |
339 new_space_.CommittedMemory() / KB); | |
340 PrintPID("Old pointers, used: %6" V8_PTR_PREFIX "d KB" | |
341 ", available: %6" V8_PTR_PREFIX "d KB" | |
342 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
343 old_pointer_space_->SizeOfObjects() / KB, | |
344 old_pointer_space_->Available() / KB, | |
345 old_pointer_space_->CommittedMemory() / KB); | |
346 PrintPID("Old data space, used: %6" V8_PTR_PREFIX "d KB" | |
347 ", available: %6" V8_PTR_PREFIX "d KB" | |
348 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
349 old_data_space_->SizeOfObjects() / KB, | |
350 old_data_space_->Available() / KB, | |
351 old_data_space_->CommittedMemory() / KB); | |
352 PrintPID("Code space, used: %6" V8_PTR_PREFIX "d KB" | |
353 ", available: %6" V8_PTR_PREFIX "d KB" | |
354 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
355 code_space_->SizeOfObjects() / KB, | |
356 code_space_->Available() / KB, | |
357 code_space_->CommittedMemory() / KB); | |
358 PrintPID("Map space, used: %6" V8_PTR_PREFIX "d KB" | |
359 ", available: %6" V8_PTR_PREFIX "d KB" | |
360 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
361 map_space_->SizeOfObjects() / KB, | |
362 map_space_->Available() / KB, | |
363 map_space_->CommittedMemory() / KB); | |
364 PrintPID("Cell space, used: %6" V8_PTR_PREFIX "d KB" | |
365 ", available: %6" V8_PTR_PREFIX "d KB" | |
366 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
367 cell_space_->SizeOfObjects() / KB, | |
368 cell_space_->Available() / KB, | |
369 cell_space_->CommittedMemory() / KB); | |
370 PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB" | |
371 ", available: %6" V8_PTR_PREFIX "d KB" | |
372 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
373 property_cell_space_->SizeOfObjects() / KB, | |
374 property_cell_space_->Available() / KB, | |
375 property_cell_space_->CommittedMemory() / KB); | |
376 PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB" | |
377 ", available: %6" V8_PTR_PREFIX "d KB" | |
378 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
379 lo_space_->SizeOfObjects() / KB, | |
380 lo_space_->Available() / KB, | |
381 lo_space_->CommittedMemory() / KB); | |
382 PrintPID("All spaces, used: %6" V8_PTR_PREFIX "d KB" | |
383 ", available: %6" V8_PTR_PREFIX "d KB" | |
384 ", committed: %6" V8_PTR_PREFIX "d KB\n", | |
385 this->SizeOfObjects() / KB, | |
386 this->Available() / KB, | |
387 this->CommittedMemory() / KB); | |
388 PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n", | |
389 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB)); | |
390 PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_); | |
391 } | |
392 | |
393 | |
394 // TODO(1238405): Combine the infrastructure for --heap-stats and | |
395 // --log-gc to avoid the complicated preprocessor and flag testing. | |
396 void Heap::ReportStatisticsAfterGC() { | |
397 // Similar to the before GC, we use some complicated logic to ensure that | |
398 // NewSpace statistics are logged exactly once when --log-gc is turned on. | |
399 #if defined(DEBUG) | |
400 if (FLAG_heap_stats) { | |
401 new_space_.CollectStatistics(); | |
402 ReportHeapStatistics("After GC"); | |
403 } else if (FLAG_log_gc) { | |
404 new_space_.ReportStatistics(); | |
405 } | |
406 #else | |
407 if (FLAG_log_gc) new_space_.ReportStatistics(); | |
408 #endif // DEBUG | |
409 } | |
410 | |
411 | |
412 void Heap::GarbageCollectionPrologue() { | |
413 { AllowHeapAllocation for_the_first_part_of_prologue; | |
414 ClearJSFunctionResultCaches(); | |
415 gc_count_++; | |
416 unflattened_strings_length_ = 0; | |
417 | |
418 if (FLAG_flush_code && FLAG_flush_code_incrementally) { | |
419 mark_compact_collector()->EnableCodeFlushing(true); | |
420 } | |
421 | |
422 #ifdef VERIFY_HEAP | |
423 if (FLAG_verify_heap) { | |
424 Verify(); | |
425 } | |
426 #endif | |
427 } | |
428 | |
429 // Reset GC statistics. | |
430 promoted_objects_size_ = 0; | |
431 semi_space_copied_object_size_ = 0; | |
432 nodes_died_in_new_space_ = 0; | |
433 nodes_copied_in_new_space_ = 0; | |
434 nodes_promoted_ = 0; | |
435 | |
436 UpdateMaximumCommitted(); | |
437 | |
438 #ifdef DEBUG | |
439 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); | |
440 | |
441 if (FLAG_gc_verbose) Print(); | |
442 | |
443 ReportStatisticsBeforeGC(); | |
444 #endif // DEBUG | |
445 | |
446 store_buffer()->GCPrologue(); | |
447 | |
448 if (isolate()->concurrent_osr_enabled()) { | |
449 isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); | |
450 } | |
451 | |
452 if (new_space_.IsAtMaximumCapacity()) { | |
453 maximum_size_scavenges_++; | |
454 } else { | |
455 maximum_size_scavenges_ = 0; | |
456 } | |
457 CheckNewSpaceExpansionCriteria(); | |
458 } | |
459 | |
460 | |
461 intptr_t Heap::SizeOfObjects() { | |
462 intptr_t total = 0; | |
463 AllSpaces spaces(this); | |
464 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { | |
465 total += space->SizeOfObjects(); | |
466 } | |
467 return total; | |
468 } | |
469 | |
470 | |
471 void Heap::ClearAllICsByKind(Code::Kind kind) { | |
472 HeapObjectIterator it(code_space()); | |
473 | |
474 for (Object* object = it.Next(); object != NULL; object = it.Next()) { | |
475 Code* code = Code::cast(object); | |
476 Code::Kind current_kind = code->kind(); | |
477 if (current_kind == Code::FUNCTION || | |
478 current_kind == Code::OPTIMIZED_FUNCTION) { | |
479 code->ClearInlineCaches(kind); | |
480 } | |
481 } | |
482 } | |
483 | |
484 | |
485 void Heap::RepairFreeListsAfterBoot() { | |
486 PagedSpaces spaces(this); | |
487 for (PagedSpace* space = spaces.next(); | |
488 space != NULL; | |
489 space = spaces.next()) { | |
490 space->RepairFreeListsAfterBoot(); | |
491 } | |
492 } | |
493 | |
494 | |
495 void Heap::ProcessPretenuringFeedback() { | |
496 if (FLAG_allocation_site_pretenuring) { | |
497 int tenure_decisions = 0; | |
498 int dont_tenure_decisions = 0; | |
499 int allocation_mementos_found = 0; | |
500 int allocation_sites = 0; | |
501 int active_allocation_sites = 0; | |
502 | |
503 // If the scratchpad overflowed, we have to iterate over the allocation | |
504 // sites list. | |
505 // TODO(hpayer): We iterate over the whole list of allocation sites when | |
506 // we grew to the maximum semi-space size to deopt maybe tenured | |
507 // allocation sites. We could hold the maybe tenured allocation sites | |
508 // in a seperate data structure if this is a performance problem. | |
509 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); | |
510 bool use_scratchpad = | |
511 allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize && | |
512 !deopt_maybe_tenured; | |
513 | |
514 int i = 0; | |
515 Object* list_element = allocation_sites_list(); | |
516 bool trigger_deoptimization = false; | |
517 bool maximum_size_scavenge = MaximumSizeScavenge(); | |
518 while (use_scratchpad ? | |
519 i < allocation_sites_scratchpad_length_ : | |
520 list_element->IsAllocationSite()) { | |
521 AllocationSite* site = use_scratchpad ? | |
522 AllocationSite::cast(allocation_sites_scratchpad()->get(i)) : | |
523 AllocationSite::cast(list_element); | |
524 allocation_mementos_found += site->memento_found_count(); | |
525 if (site->memento_found_count() > 0) { | |
526 active_allocation_sites++; | |
527 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) { | |
528 trigger_deoptimization = true; | |
529 } | |
530 if (site->GetPretenureMode() == TENURED) { | |
531 tenure_decisions++; | |
532 } else { | |
533 dont_tenure_decisions++; | |
534 } | |
535 allocation_sites++; | |
536 } | |
537 | |
538 if (deopt_maybe_tenured && site->IsMaybeTenure()) { | |
539 site->set_deopt_dependent_code(true); | |
540 trigger_deoptimization = true; | |
541 } | |
542 | |
543 if (use_scratchpad) { | |
544 i++; | |
545 } else { | |
546 list_element = site->weak_next(); | |
547 } | |
548 } | |
549 | |
550 if (trigger_deoptimization) { | |
551 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); | |
552 } | |
553 | |
554 FlushAllocationSitesScratchpad(); | |
555 | |
556 if (FLAG_trace_pretenuring_statistics && | |
557 (allocation_mementos_found > 0 || | |
558 tenure_decisions > 0 || | |
559 dont_tenure_decisions > 0)) { | |
560 PrintF("GC: (mode, #visited allocation sites, #active allocation sites, " | |
561 "#mementos, #tenure decisions, #donttenure decisions) " | |
562 "(%s, %d, %d, %d, %d, %d)\n", | |
563 use_scratchpad ? "use scratchpad" : "use list", | |
564 allocation_sites, | |
565 active_allocation_sites, | |
566 allocation_mementos_found, | |
567 tenure_decisions, | |
568 dont_tenure_decisions); | |
569 } | |
570 } | |
571 } | |
572 | |
573 | |
574 void Heap::DeoptMarkedAllocationSites() { | |
575 // TODO(hpayer): If iterating over the allocation sites list becomes a | |
576 // performance issue, use a cache heap data structure instead (similar to the | |
577 // allocation sites scratchpad). | |
578 Object* list_element = allocation_sites_list(); | |
579 while (list_element->IsAllocationSite()) { | |
580 AllocationSite* site = AllocationSite::cast(list_element); | |
581 if (site->deopt_dependent_code()) { | |
582 site->dependent_code()->MarkCodeForDeoptimization( | |
583 isolate_, | |
584 DependentCode::kAllocationSiteTenuringChangedGroup); | |
585 site->set_deopt_dependent_code(false); | |
586 } | |
587 list_element = site->weak_next(); | |
588 } | |
589 Deoptimizer::DeoptimizeMarkedCode(isolate_); | |
590 } | |
591 | |
592 | |
593 void Heap::GarbageCollectionEpilogue() { | |
594 store_buffer()->GCEpilogue(); | |
595 | |
596 // In release mode, we only zap the from space under heap verification. | |
597 if (Heap::ShouldZapGarbage()) { | |
598 ZapFromSpace(); | |
599 } | |
600 | |
601 // Process pretenuring feedback and update allocation sites. | |
602 ProcessPretenuringFeedback(); | |
603 | |
604 #ifdef VERIFY_HEAP | |
605 if (FLAG_verify_heap) { | |
606 Verify(); | |
607 } | |
608 #endif | |
609 | |
610 AllowHeapAllocation for_the_rest_of_the_epilogue; | |
611 | |
612 #ifdef DEBUG | |
613 if (FLAG_print_global_handles) isolate_->global_handles()->Print(); | |
614 if (FLAG_print_handles) PrintHandles(); | |
615 if (FLAG_gc_verbose) Print(); | |
616 if (FLAG_code_stats) ReportCodeStatistics("After GC"); | |
617 #endif | |
618 if (FLAG_deopt_every_n_garbage_collections > 0) { | |
619 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that | |
620 // the topmost optimized frame can be deoptimized safely, because it | |
621 // might not have a lazy bailout point right after its current PC. | |
622 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) { | |
623 Deoptimizer::DeoptimizeAll(isolate()); | |
624 gcs_since_last_deopt_ = 0; | |
625 } | |
626 } | |
627 | |
628 UpdateMaximumCommitted(); | |
629 | |
630 isolate_->counters()->alive_after_last_gc()->Set( | |
631 static_cast<int>(SizeOfObjects())); | |
632 | |
633 isolate_->counters()->string_table_capacity()->Set( | |
634 string_table()->Capacity()); | |
635 isolate_->counters()->number_of_symbols()->Set( | |
636 string_table()->NumberOfElements()); | |
637 | |
638 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) { | |
639 isolate_->counters()->codegen_fraction_crankshaft()->AddSample( | |
640 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) / | |
641 (crankshaft_codegen_bytes_generated_ | |
642 + full_codegen_bytes_generated_))); | |
643 } | |
644 | |
645 if (CommittedMemory() > 0) { | |
646 isolate_->counters()->external_fragmentation_total()->AddSample( | |
647 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); | |
648 | |
649 isolate_->counters()->heap_fraction_new_space()-> | |
650 AddSample(static_cast<int>( | |
651 (new_space()->CommittedMemory() * 100.0) / CommittedMemory())); | |
652 isolate_->counters()->heap_fraction_old_pointer_space()->AddSample( | |
653 static_cast<int>( | |
654 (old_pointer_space()->CommittedMemory() * 100.0) / | |
655 CommittedMemory())); | |
656 isolate_->counters()->heap_fraction_old_data_space()->AddSample( | |
657 static_cast<int>( | |
658 (old_data_space()->CommittedMemory() * 100.0) / | |
659 CommittedMemory())); | |
660 isolate_->counters()->heap_fraction_code_space()-> | |
661 AddSample(static_cast<int>( | |
662 (code_space()->CommittedMemory() * 100.0) / CommittedMemory())); | |
663 isolate_->counters()->heap_fraction_map_space()->AddSample( | |
664 static_cast<int>( | |
665 (map_space()->CommittedMemory() * 100.0) / CommittedMemory())); | |
666 isolate_->counters()->heap_fraction_cell_space()->AddSample( | |
667 static_cast<int>( | |
668 (cell_space()->CommittedMemory() * 100.0) / CommittedMemory())); | |
669 isolate_->counters()->heap_fraction_property_cell_space()-> | |
670 AddSample(static_cast<int>( | |
671 (property_cell_space()->CommittedMemory() * 100.0) / | |
672 CommittedMemory())); | |
673 isolate_->counters()->heap_fraction_lo_space()-> | |
674 AddSample(static_cast<int>( | |
675 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory())); | |
676 | |
677 isolate_->counters()->heap_sample_total_committed()->AddSample( | |
678 static_cast<int>(CommittedMemory() / KB)); | |
679 isolate_->counters()->heap_sample_total_used()->AddSample( | |
680 static_cast<int>(SizeOfObjects() / KB)); | |
681 isolate_->counters()->heap_sample_map_space_committed()->AddSample( | |
682 static_cast<int>(map_space()->CommittedMemory() / KB)); | |
683 isolate_->counters()->heap_sample_cell_space_committed()->AddSample( | |
684 static_cast<int>(cell_space()->CommittedMemory() / KB)); | |
685 isolate_->counters()-> | |
686 heap_sample_property_cell_space_committed()-> | |
687 AddSample(static_cast<int>( | |
688 property_cell_space()->CommittedMemory() / KB)); | |
689 isolate_->counters()->heap_sample_code_space_committed()->AddSample( | |
690 static_cast<int>(code_space()->CommittedMemory() / KB)); | |
691 | |
692 isolate_->counters()->heap_sample_maximum_committed()->AddSample( | |
693 static_cast<int>(MaximumCommittedMemory() / KB)); | |
694 } | |
695 | |
696 #define UPDATE_COUNTERS_FOR_SPACE(space) \ | |
697 isolate_->counters()->space##_bytes_available()->Set( \ | |
698 static_cast<int>(space()->Available())); \ | |
699 isolate_->counters()->space##_bytes_committed()->Set( \ | |
700 static_cast<int>(space()->CommittedMemory())); \ | |
701 isolate_->counters()->space##_bytes_used()->Set( \ | |
702 static_cast<int>(space()->SizeOfObjects())); | |
703 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ | |
704 if (space()->CommittedMemory() > 0) { \ | |
705 isolate_->counters()->external_fragmentation_##space()->AddSample( \ | |
706 static_cast<int>(100 - \ | |
707 (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \ | |
708 } | |
709 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ | |
710 UPDATE_COUNTERS_FOR_SPACE(space) \ | |
711 UPDATE_FRAGMENTATION_FOR_SPACE(space) | |
712 | |
713 UPDATE_COUNTERS_FOR_SPACE(new_space) | |
714 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space) | |
715 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space) | |
716 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) | |
717 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) | |
718 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space) | |
719 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space) | |
720 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) | |
721 #undef UPDATE_COUNTERS_FOR_SPACE | |
722 #undef UPDATE_FRAGMENTATION_FOR_SPACE | |
723 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE | |
724 | |
725 #ifdef DEBUG | |
726 ReportStatisticsAfterGC(); | |
727 #endif // DEBUG | |
728 | |
729 // Remember the last top pointer so that we can later find out | |
730 // whether we allocated in new space since the last GC. | |
731 new_space_top_after_last_gc_ = new_space()->top(); | |
732 } | |
733 | |
734 | |
735 void Heap::CollectAllGarbage(int flags, | |
736 const char* gc_reason, | |
737 const v8::GCCallbackFlags gc_callback_flags) { | |
738 // Since we are ignoring the return value, the exact choice of space does | |
739 // not matter, so long as we do not specify NEW_SPACE, which would not | |
740 // cause a full GC. | |
741 mark_compact_collector_.SetFlags(flags); | |
742 CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags); | |
743 mark_compact_collector_.SetFlags(kNoGCFlags); | |
744 } | |
745 | |
746 | |
747 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { | |
748 // Since we are ignoring the return value, the exact choice of space does | |
749 // not matter, so long as we do not specify NEW_SPACE, which would not | |
750 // cause a full GC. | |
751 // Major GC would invoke weak handle callbacks on weakly reachable | |
752 // handles, but won't collect weakly reachable objects until next | |
753 // major GC. Therefore if we collect aggressively and weak handle callback | |
754 // has been invoked, we rerun major GC to release objects which become | |
755 // garbage. | |
756 // Note: as weak callbacks can execute arbitrary code, we cannot | |
757 // hope that eventually there will be no weak callbacks invocations. | |
758 // Therefore stop recollecting after several attempts. | |
759 if (isolate()->concurrent_recompilation_enabled()) { | |
760 // The optimizing compiler may be unnecessarily holding on to memory. | |
761 DisallowHeapAllocation no_recursive_gc; | |
762 isolate()->optimizing_compiler_thread()->Flush(); | |
763 } | |
764 mark_compact_collector()->SetFlags(kMakeHeapIterableMask | | |
765 kReduceMemoryFootprintMask); | |
766 isolate_->compilation_cache()->Clear(); | |
767 const int kMaxNumberOfAttempts = 7; | |
768 const int kMinNumberOfAttempts = 2; | |
769 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { | |
770 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) && | |
771 attempt + 1 >= kMinNumberOfAttempts) { | |
772 break; | |
773 } | |
774 } | |
775 mark_compact_collector()->SetFlags(kNoGCFlags); | |
776 new_space_.Shrink(); | |
777 UncommitFromSpace(); | |
778 incremental_marking()->UncommitMarkingDeque(); | |
779 } | |
780 | |
781 | |
782 void Heap::EnsureFillerObjectAtTop() { | |
783 // There may be an allocation memento behind every object in new space. | |
784 // If we evacuate a not full new space or if we are on the last page of | |
785 // the new space, then there may be uninitialized memory behind the top | |
786 // pointer of the new space page. We store a filler object there to | |
787 // identify the unused space. | |
788 Address from_top = new_space_.top(); | |
789 Address from_limit = new_space_.limit(); | |
790 if (from_top < from_limit) { | |
791 int remaining_in_page = static_cast<int>(from_limit - from_top); | |
792 CreateFillerObjectAt(from_top, remaining_in_page); | |
793 } | |
794 } | |
795 | |
796 | |
797 bool Heap::CollectGarbage(GarbageCollector collector, | |
798 const char* gc_reason, | |
799 const char* collector_reason, | |
800 const v8::GCCallbackFlags gc_callback_flags) { | |
801 // The VM is in the GC state until exiting this function. | |
802 VMState<GC> state(isolate_); | |
803 | |
804 #ifdef DEBUG | |
805 // Reset the allocation timeout to the GC interval, but make sure to | |
806 // allow at least a few allocations after a collection. The reason | |
807 // for this is that we have a lot of allocation sequences and we | |
808 // assume that a garbage collection will allow the subsequent | |
809 // allocation attempts to go through. | |
810 allocation_timeout_ = Max(6, FLAG_gc_interval); | |
811 #endif | |
812 | |
813 EnsureFillerObjectAtTop(); | |
814 | |
815 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { | |
816 if (FLAG_trace_incremental_marking) { | |
817 PrintF("[IncrementalMarking] Scavenge during marking.\n"); | |
818 } | |
819 } | |
820 | |
821 if (collector == MARK_COMPACTOR && | |
822 !mark_compact_collector()->abort_incremental_marking() && | |
823 !incremental_marking()->IsStopped() && | |
824 !incremental_marking()->should_hurry() && | |
825 FLAG_incremental_marking_steps) { | |
826 // Make progress in incremental marking. | |
827 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; | |
828 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, | |
829 IncrementalMarking::NO_GC_VIA_STACK_GUARD); | |
830 if (!incremental_marking()->IsComplete() && !FLAG_gc_global) { | |
831 if (FLAG_trace_incremental_marking) { | |
832 PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); | |
833 } | |
834 collector = SCAVENGER; | |
835 collector_reason = "incremental marking delaying mark-sweep"; | |
836 } | |
837 } | |
838 | |
839 bool next_gc_likely_to_collect_more = false; | |
840 | |
841 { | |
842 tracer()->Start(collector, gc_reason, collector_reason); | |
843 DCHECK(AllowHeapAllocation::IsAllowed()); | |
844 DisallowHeapAllocation no_allocation_during_gc; | |
845 GarbageCollectionPrologue(); | |
846 | |
847 { | |
848 HistogramTimerScope histogram_timer_scope( | |
849 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() | |
850 : isolate_->counters()->gc_compactor()); | |
851 next_gc_likely_to_collect_more = | |
852 PerformGarbageCollection(collector, gc_callback_flags); | |
853 } | |
854 | |
855 GarbageCollectionEpilogue(); | |
856 tracer()->Stop(); | |
857 } | |
858 | |
859 // Start incremental marking for the next cycle. The heap snapshot | |
860 // generator needs incremental marking to stay off after it aborted. | |
861 if (!mark_compact_collector()->abort_incremental_marking() && | |
862 incremental_marking()->IsStopped() && | |
863 incremental_marking()->WorthActivating() && | |
864 NextGCIsLikelyToBeFull()) { | |
865 incremental_marking()->Start(); | |
866 } | |
867 | |
868 return next_gc_likely_to_collect_more; | |
869 } | |
870 | |
871 | |
872 int Heap::NotifyContextDisposed() { | |
873 if (isolate()->concurrent_recompilation_enabled()) { | |
874 // Flush the queued recompilation tasks. | |
875 isolate()->optimizing_compiler_thread()->Flush(); | |
876 } | |
877 flush_monomorphic_ics_ = true; | |
878 AgeInlineCaches(); | |
879 return ++contexts_disposed_; | |
880 } | |
881 | |
882 | |
883 void Heap::MoveElements(FixedArray* array, | |
884 int dst_index, | |
885 int src_index, | |
886 int len) { | |
887 if (len == 0) return; | |
888 | |
889 DCHECK(array->map() != fixed_cow_array_map()); | |
890 Object** dst_objects = array->data_start() + dst_index; | |
891 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); | |
892 if (!InNewSpace(array)) { | |
893 for (int i = 0; i < len; i++) { | |
894 // TODO(hpayer): check store buffer for entries | |
895 if (InNewSpace(dst_objects[i])) { | |
896 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); | |
897 } | |
898 } | |
899 } | |
900 incremental_marking()->RecordWrites(array); | |
901 } | |
902 | |
903 | |
904 #ifdef VERIFY_HEAP | |
905 // Helper class for verifying the string table. | |
906 class StringTableVerifier : public ObjectVisitor { | |
907 public: | |
908 void VisitPointers(Object** start, Object** end) { | |
909 // Visit all HeapObject pointers in [start, end). | |
910 for (Object** p = start; p < end; p++) { | |
911 if ((*p)->IsHeapObject()) { | |
912 // Check that the string is actually internalized. | |
913 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || | |
914 (*p)->IsInternalizedString()); | |
915 } | |
916 } | |
917 } | |
918 }; | |
919 | |
920 | |
921 static void VerifyStringTable(Heap* heap) { | |
922 StringTableVerifier verifier; | |
923 heap->string_table()->IterateElements(&verifier); | |
924 } | |
925 #endif // VERIFY_HEAP | |
926 | |
927 | |
928 static bool AbortIncrementalMarkingAndCollectGarbage( | |
929 Heap* heap, | |
930 AllocationSpace space, | |
931 const char* gc_reason = NULL) { | |
932 heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask); | |
933 bool result = heap->CollectGarbage(space, gc_reason); | |
934 heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags); | |
935 return result; | |
936 } | |
937 | |
938 | |
939 void Heap::ReserveSpace(int *sizes, Address *locations_out) { | |
940 bool gc_performed = true; | |
941 int counter = 0; | |
942 static const int kThreshold = 20; | |
943 while (gc_performed && counter++ < kThreshold) { | |
944 gc_performed = false; | |
945 DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1); | |
946 for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) { | |
947 if (sizes[space] != 0) { | |
948 AllocationResult allocation; | |
949 if (space == NEW_SPACE) { | |
950 allocation = new_space()->AllocateRaw(sizes[space]); | |
951 } else { | |
952 allocation = paged_space(space)->AllocateRaw(sizes[space]); | |
953 } | |
954 FreeListNode* node; | |
955 if (!allocation.To(&node)) { | |
956 if (space == NEW_SPACE) { | |
957 Heap::CollectGarbage(NEW_SPACE, | |
958 "failed to reserve space in the new space"); | |
959 } else { | |
960 AbortIncrementalMarkingAndCollectGarbage( | |
961 this, | |
962 static_cast<AllocationSpace>(space), | |
963 "failed to reserve space in paged space"); | |
964 } | |
965 gc_performed = true; | |
966 break; | |
967 } else { | |
968 // Mark with a free list node, in case we have a GC before | |
969 // deserializing. | |
970 node->set_size(this, sizes[space]); | |
971 locations_out[space] = node->address(); | |
972 } | |
973 } | |
974 } | |
975 } | |
976 | |
977 if (gc_performed) { | |
978 // Failed to reserve the space after several attempts. | |
979 V8::FatalProcessOutOfMemory("Heap::ReserveSpace"); | |
980 } | |
981 } | |
982 | |
983 | |
984 void Heap::EnsureFromSpaceIsCommitted() { | |
985 if (new_space_.CommitFromSpaceIfNeeded()) return; | |
986 | |
987 // Committing memory to from space failed. | |
988 // Memory is exhausted and we will die. | |
989 V8::FatalProcessOutOfMemory("Committing semi space failed."); | |
990 } | |
991 | |
992 | |
993 void Heap::ClearJSFunctionResultCaches() { | |
994 if (isolate_->bootstrapper()->IsActive()) return; | |
995 | |
996 Object* context = native_contexts_list(); | |
997 while (!context->IsUndefined()) { | |
998 // Get the caches for this context. GC can happen when the context | |
999 // is not fully initialized, so the caches can be undefined. | |
1000 Object* caches_or_undefined = | |
1001 Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX); | |
1002 if (!caches_or_undefined->IsUndefined()) { | |
1003 FixedArray* caches = FixedArray::cast(caches_or_undefined); | |
1004 // Clear the caches: | |
1005 int length = caches->length(); | |
1006 for (int i = 0; i < length; i++) { | |
1007 JSFunctionResultCache::cast(caches->get(i))->Clear(); | |
1008 } | |
1009 } | |
1010 // Get the next context: | |
1011 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | |
1012 } | |
1013 } | |
1014 | |
1015 | |
1016 void Heap::ClearNormalizedMapCaches() { | |
1017 if (isolate_->bootstrapper()->IsActive() && | |
1018 !incremental_marking()->IsMarking()) { | |
1019 return; | |
1020 } | |
1021 | |
1022 Object* context = native_contexts_list(); | |
1023 while (!context->IsUndefined()) { | |
1024 // GC can happen when the context is not fully initialized, | |
1025 // so the cache can be undefined. | |
1026 Object* cache = | |
1027 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX); | |
1028 if (!cache->IsUndefined()) { | |
1029 NormalizedMapCache::cast(cache)->Clear(); | |
1030 } | |
1031 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | |
1032 } | |
1033 } | |
1034 | |
1035 | |
1036 void Heap::UpdateSurvivalStatistics(int start_new_space_size) { | |
1037 if (start_new_space_size == 0) return; | |
1038 | |
1039 promotion_rate_ = | |
1040 (static_cast<double>(promoted_objects_size_) / | |
1041 static_cast<double>(start_new_space_size) * 100); | |
1042 | |
1043 semi_space_copied_rate_ = | |
1044 (static_cast<double>(semi_space_copied_object_size_) / | |
1045 static_cast<double>(start_new_space_size) * 100); | |
1046 | |
1047 double survival_rate = promotion_rate_ + semi_space_copied_rate_; | |
1048 | |
1049 if (survival_rate > kYoungSurvivalRateHighThreshold) { | |
1050 high_survival_rate_period_length_++; | |
1051 } else { | |
1052 high_survival_rate_period_length_ = 0; | |
1053 } | |
1054 } | |
1055 | |
1056 bool Heap::PerformGarbageCollection( | |
1057 GarbageCollector collector, | |
1058 const v8::GCCallbackFlags gc_callback_flags) { | |
1059 int freed_global_handles = 0; | |
1060 | |
1061 if (collector != SCAVENGER) { | |
1062 PROFILE(isolate_, CodeMovingGCEvent()); | |
1063 } | |
1064 | |
1065 #ifdef VERIFY_HEAP | |
1066 if (FLAG_verify_heap) { | |
1067 VerifyStringTable(this); | |
1068 } | |
1069 #endif | |
1070 | |
1071 GCType gc_type = | |
1072 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; | |
1073 | |
1074 { GCCallbacksScope scope(this); | |
1075 if (scope.CheckReenter()) { | |
1076 AllowHeapAllocation allow_allocation; | |
1077 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | |
1078 VMState<EXTERNAL> state(isolate_); | |
1079 HandleScope handle_scope(isolate_); | |
1080 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); | |
1081 } | |
1082 } | |
1083 | |
1084 EnsureFromSpaceIsCommitted(); | |
1085 | |
1086 int start_new_space_size = Heap::new_space()->SizeAsInt(); | |
1087 | |
1088 if (IsHighSurvivalRate()) { | |
1089 // We speed up the incremental marker if it is running so that it | |
1090 // does not fall behind the rate of promotion, which would cause a | |
1091 // constantly growing old space. | |
1092 incremental_marking()->NotifyOfHighPromotionRate(); | |
1093 } | |
1094 | |
1095 if (collector == MARK_COMPACTOR) { | |
1096 // Perform mark-sweep with optional compaction. | |
1097 MarkCompact(); | |
1098 sweep_generation_++; | |
1099 // Temporarily set the limit for case when PostGarbageCollectionProcessing | |
1100 // allocates and triggers GC. The real limit is set at after | |
1101 // PostGarbageCollectionProcessing. | |
1102 old_generation_allocation_limit_ = | |
1103 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); | |
1104 old_gen_exhausted_ = false; | |
1105 } else { | |
1106 Scavenge(); | |
1107 } | |
1108 | |
1109 UpdateSurvivalStatistics(start_new_space_size); | |
1110 | |
1111 isolate_->counters()->objs_since_last_young()->Set(0); | |
1112 | |
1113 // Callbacks that fire after this point might trigger nested GCs and | |
1114 // restart incremental marking, the assertion can't be moved down. | |
1115 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); | |
1116 | |
1117 gc_post_processing_depth_++; | |
1118 { AllowHeapAllocation allow_allocation; | |
1119 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | |
1120 freed_global_handles = | |
1121 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); | |
1122 } | |
1123 gc_post_processing_depth_--; | |
1124 | |
1125 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | |
1126 | |
1127 // Update relocatables. | |
1128 Relocatable::PostGarbageCollectionProcessing(isolate_); | |
1129 | |
1130 if (collector == MARK_COMPACTOR) { | |
1131 // Register the amount of external allocated memory. | |
1132 amount_of_external_allocated_memory_at_last_global_gc_ = | |
1133 amount_of_external_allocated_memory_; | |
1134 old_generation_allocation_limit_ = | |
1135 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), | |
1136 freed_global_handles); | |
1137 } | |
1138 | |
1139 { GCCallbacksScope scope(this); | |
1140 if (scope.CheckReenter()) { | |
1141 AllowHeapAllocation allow_allocation; | |
1142 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | |
1143 VMState<EXTERNAL> state(isolate_); | |
1144 HandleScope handle_scope(isolate_); | |
1145 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); | |
1146 } | |
1147 } | |
1148 | |
1149 #ifdef VERIFY_HEAP | |
1150 if (FLAG_verify_heap) { | |
1151 VerifyStringTable(this); | |
1152 } | |
1153 #endif | |
1154 | |
1155 return freed_global_handles > 0; | |
1156 } | |
1157 | |
1158 | |
1159 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { | |
1160 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { | |
1161 if (gc_type & gc_prologue_callbacks_[i].gc_type) { | |
1162 if (!gc_prologue_callbacks_[i].pass_isolate_) { | |
1163 v8::GCPrologueCallback callback = | |
1164 reinterpret_cast<v8::GCPrologueCallback>( | |
1165 gc_prologue_callbacks_[i].callback); | |
1166 callback(gc_type, flags); | |
1167 } else { | |
1168 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); | |
1169 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags); | |
1170 } | |
1171 } | |
1172 } | |
1173 } | |
1174 | |
1175 | |
1176 void Heap::CallGCEpilogueCallbacks(GCType gc_type, | |
1177 GCCallbackFlags gc_callback_flags) { | |
1178 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { | |
1179 if (gc_type & gc_epilogue_callbacks_[i].gc_type) { | |
1180 if (!gc_epilogue_callbacks_[i].pass_isolate_) { | |
1181 v8::GCPrologueCallback callback = | |
1182 reinterpret_cast<v8::GCPrologueCallback>( | |
1183 gc_epilogue_callbacks_[i].callback); | |
1184 callback(gc_type, gc_callback_flags); | |
1185 } else { | |
1186 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); | |
1187 gc_epilogue_callbacks_[i].callback( | |
1188 isolate, gc_type, gc_callback_flags); | |
1189 } | |
1190 } | |
1191 } | |
1192 } | |
1193 | |
1194 | |
1195 void Heap::MarkCompact() { | |
1196 gc_state_ = MARK_COMPACT; | |
1197 LOG(isolate_, ResourceEvent("markcompact", "begin")); | |
1198 | |
1199 uint64_t size_of_objects_before_gc = SizeOfObjects(); | |
1200 | |
1201 mark_compact_collector_.Prepare(); | |
1202 | |
1203 ms_count_++; | |
1204 | |
1205 MarkCompactPrologue(); | |
1206 | |
1207 mark_compact_collector_.CollectGarbage(); | |
1208 | |
1209 LOG(isolate_, ResourceEvent("markcompact", "end")); | |
1210 | |
1211 gc_state_ = NOT_IN_GC; | |
1212 | |
1213 isolate_->counters()->objs_since_last_full()->Set(0); | |
1214 | |
1215 flush_monomorphic_ics_ = false; | |
1216 | |
1217 if (FLAG_allocation_site_pretenuring) { | |
1218 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); | |
1219 } | |
1220 } | |
1221 | |
1222 | |
1223 void Heap::MarkCompactPrologue() { | |
1224 // At any old GC clear the keyed lookup cache to enable collection of unused | |
1225 // maps. | |
1226 isolate_->keyed_lookup_cache()->Clear(); | |
1227 isolate_->context_slot_cache()->Clear(); | |
1228 isolate_->descriptor_lookup_cache()->Clear(); | |
1229 RegExpResultsCache::Clear(string_split_cache()); | |
1230 RegExpResultsCache::Clear(regexp_multiple_cache()); | |
1231 | |
1232 isolate_->compilation_cache()->MarkCompactPrologue(); | |
1233 | |
1234 CompletelyClearInstanceofCache(); | |
1235 | |
1236 FlushNumberStringCache(); | |
1237 if (FLAG_cleanup_code_caches_at_gc) { | |
1238 polymorphic_code_cache()->set_cache(undefined_value()); | |
1239 } | |
1240 | |
1241 ClearNormalizedMapCaches(); | |
1242 } | |
1243 | |
1244 | |
1245 // Helper class for copying HeapObjects | |
1246 class ScavengeVisitor: public ObjectVisitor { | |
1247 public: | |
1248 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {} | |
1249 | |
1250 void VisitPointer(Object** p) { ScavengePointer(p); } | |
1251 | |
1252 void VisitPointers(Object** start, Object** end) { | |
1253 // Copy all HeapObject pointers in [start, end) | |
1254 for (Object** p = start; p < end; p++) ScavengePointer(p); | |
1255 } | |
1256 | |
1257 private: | |
1258 void ScavengePointer(Object** p) { | |
1259 Object* object = *p; | |
1260 if (!heap_->InNewSpace(object)) return; | |
1261 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), | |
1262 reinterpret_cast<HeapObject*>(object)); | |
1263 } | |
1264 | |
1265 Heap* heap_; | |
1266 }; | |
1267 | |
1268 | |
1269 #ifdef VERIFY_HEAP | |
1270 // Visitor class to verify pointers in code or data space do not point into | |
1271 // new space. | |
1272 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { | |
1273 public: | |
1274 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {} | |
1275 void VisitPointers(Object** start, Object**end) { | |
1276 for (Object** current = start; current < end; current++) { | |
1277 if ((*current)->IsHeapObject()) { | |
1278 CHECK(!heap_->InNewSpace(HeapObject::cast(*current))); | |
1279 } | |
1280 } | |
1281 } | |
1282 | |
1283 private: | |
1284 Heap* heap_; | |
1285 }; | |
1286 | |
1287 | |
1288 static void VerifyNonPointerSpacePointers(Heap* heap) { | |
1289 // Verify that there are no pointers to new space in spaces where we | |
1290 // do not expect them. | |
1291 VerifyNonPointerSpacePointersVisitor v(heap); | |
1292 HeapObjectIterator code_it(heap->code_space()); | |
1293 for (HeapObject* object = code_it.Next(); | |
1294 object != NULL; object = code_it.Next()) | |
1295 object->Iterate(&v); | |
1296 | |
1297 // The old data space was normally swept conservatively so that the iterator | |
1298 // doesn't work, so we normally skip the next bit. | |
1299 if (heap->old_data_space()->swept_precisely()) { | |
1300 HeapObjectIterator data_it(heap->old_data_space()); | |
1301 for (HeapObject* object = data_it.Next(); | |
1302 object != NULL; object = data_it.Next()) | |
1303 object->Iterate(&v); | |
1304 } | |
1305 } | |
1306 #endif // VERIFY_HEAP | |
1307 | |
1308 | |
1309 void Heap::CheckNewSpaceExpansionCriteria() { | |
1310 if (new_space_.Capacity() < new_space_.MaximumCapacity() && | |
1311 survived_since_last_expansion_ > new_space_.Capacity()) { | |
1312 // Grow the size of new space if there is room to grow, enough data | |
1313 // has survived scavenge since the last expansion and we are not in | |
1314 // high promotion mode. | |
1315 new_space_.Grow(); | |
1316 survived_since_last_expansion_ = 0; | |
1317 } | |
1318 } | |
1319 | |
1320 | |
1321 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { | |
1322 return heap->InNewSpace(*p) && | |
1323 !HeapObject::cast(*p)->map_word().IsForwardingAddress(); | |
1324 } | |
1325 | |
1326 | |
1327 void Heap::ScavengeStoreBufferCallback( | |
1328 Heap* heap, | |
1329 MemoryChunk* page, | |
1330 StoreBufferEvent event) { | |
1331 heap->store_buffer_rebuilder_.Callback(page, event); | |
1332 } | |
1333 | |
1334 | |
1335 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { | |
1336 if (event == kStoreBufferStartScanningPagesEvent) { | |
1337 start_of_current_page_ = NULL; | |
1338 current_page_ = NULL; | |
1339 } else if (event == kStoreBufferScanningPageEvent) { | |
1340 if (current_page_ != NULL) { | |
1341 // If this page already overflowed the store buffer during this iteration. | |
1342 if (current_page_->scan_on_scavenge()) { | |
1343 // Then we should wipe out the entries that have been added for it. | |
1344 store_buffer_->SetTop(start_of_current_page_); | |
1345 } else if (store_buffer_->Top() - start_of_current_page_ >= | |
1346 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { | |
1347 // Did we find too many pointers in the previous page? The heuristic is | |
1348 // that no page can take more then 1/5 the remaining slots in the store | |
1349 // buffer. | |
1350 current_page_->set_scan_on_scavenge(true); | |
1351 store_buffer_->SetTop(start_of_current_page_); | |
1352 } else { | |
1353 // In this case the page we scanned took a reasonable number of slots in | |
1354 // the store buffer. It has now been rehabilitated and is no longer | |
1355 // marked scan_on_scavenge. | |
1356 DCHECK(!current_page_->scan_on_scavenge()); | |
1357 } | |
1358 } | |
1359 start_of_current_page_ = store_buffer_->Top(); | |
1360 current_page_ = page; | |
1361 } else if (event == kStoreBufferFullEvent) { | |
1362 // The current page overflowed the store buffer again. Wipe out its entries | |
1363 // in the store buffer and mark it scan-on-scavenge again. This may happen | |
1364 // several times while scanning. | |
1365 if (current_page_ == NULL) { | |
1366 // Store Buffer overflowed while scanning promoted objects. These are not | |
1367 // in any particular page, though they are likely to be clustered by the | |
1368 // allocation routines. | |
1369 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); | |
1370 } else { | |
1371 // Store Buffer overflowed while scanning a particular old space page for | |
1372 // pointers to new space. | |
1373 DCHECK(current_page_ == page); | |
1374 DCHECK(page != NULL); | |
1375 current_page_->set_scan_on_scavenge(true); | |
1376 DCHECK(start_of_current_page_ != store_buffer_->Top()); | |
1377 store_buffer_->SetTop(start_of_current_page_); | |
1378 } | |
1379 } else { | |
1380 UNREACHABLE(); | |
1381 } | |
1382 } | |
1383 | |
1384 | |
1385 void PromotionQueue::Initialize() { | |
1386 // Assumes that a NewSpacePage exactly fits a number of promotion queue | |
1387 // entries (where each is a pair of intptr_t). This allows us to simplify | |
1388 // the test fpr when to switch pages. | |
1389 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) | |
1390 == 0); | |
1391 limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart()); | |
1392 front_ = rear_ = | |
1393 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); | |
1394 emergency_stack_ = NULL; | |
1395 guard_ = false; | |
1396 } | |
1397 | |
1398 | |
1399 void PromotionQueue::RelocateQueueHead() { | |
1400 DCHECK(emergency_stack_ == NULL); | |
1401 | |
1402 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); | |
1403 intptr_t* head_start = rear_; | |
1404 intptr_t* head_end = | |
1405 Min(front_, reinterpret_cast<intptr_t*>(p->area_end())); | |
1406 | |
1407 int entries_count = | |
1408 static_cast<int>(head_end - head_start) / kEntrySizeInWords; | |
1409 | |
1410 emergency_stack_ = new List<Entry>(2 * entries_count); | |
1411 | |
1412 while (head_start != head_end) { | |
1413 int size = static_cast<int>(*(head_start++)); | |
1414 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++)); | |
1415 emergency_stack_->Add(Entry(obj, size)); | |
1416 } | |
1417 rear_ = head_end; | |
1418 } | |
1419 | |
1420 | |
1421 class ScavengeWeakObjectRetainer : public WeakObjectRetainer { | |
1422 public: | |
1423 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { } | |
1424 | |
1425 virtual Object* RetainAs(Object* object) { | |
1426 if (!heap_->InFromSpace(object)) { | |
1427 return object; | |
1428 } | |
1429 | |
1430 MapWord map_word = HeapObject::cast(object)->map_word(); | |
1431 if (map_word.IsForwardingAddress()) { | |
1432 return map_word.ToForwardingAddress(); | |
1433 } | |
1434 return NULL; | |
1435 } | |
1436 | |
1437 private: | |
1438 Heap* heap_; | |
1439 }; | |
1440 | |
1441 | |
1442 void Heap::Scavenge() { | |
1443 RelocationLock relocation_lock(this); | |
1444 | |
1445 #ifdef VERIFY_HEAP | |
1446 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); | |
1447 #endif | |
1448 | |
1449 gc_state_ = SCAVENGE; | |
1450 | |
1451 // Implements Cheney's copying algorithm | |
1452 LOG(isolate_, ResourceEvent("scavenge", "begin")); | |
1453 | |
1454 // Clear descriptor cache. | |
1455 isolate_->descriptor_lookup_cache()->Clear(); | |
1456 | |
1457 // Used for updating survived_since_last_expansion_ at function end. | |
1458 intptr_t survived_watermark = PromotedSpaceSizeOfObjects(); | |
1459 | |
1460 SelectScavengingVisitorsTable(); | |
1461 | |
1462 incremental_marking()->PrepareForScavenge(); | |
1463 | |
1464 // Flip the semispaces. After flipping, to space is empty, from space has | |
1465 // live objects. | |
1466 new_space_.Flip(); | |
1467 new_space_.ResetAllocationInfo(); | |
1468 | |
1469 // We need to sweep newly copied objects which can be either in the | |
1470 // to space or promoted to the old generation. For to-space | |
1471 // objects, we treat the bottom of the to space as a queue. Newly | |
1472 // copied and unswept objects lie between a 'front' mark and the | |
1473 // allocation pointer. | |
1474 // | |
1475 // Promoted objects can go into various old-generation spaces, and | |
1476 // can be allocated internally in the spaces (from the free list). | |
1477 // We treat the top of the to space as a queue of addresses of | |
1478 // promoted objects. The addresses of newly promoted and unswept | |
1479 // objects lie between a 'front' mark and a 'rear' mark that is | |
1480 // updated as a side effect of promoting an object. | |
1481 // | |
1482 // There is guaranteed to be enough room at the top of the to space | |
1483 // for the addresses of promoted objects: every object promoted | |
1484 // frees up its size in bytes from the top of the new space, and | |
1485 // objects are at least one pointer in size. | |
1486 Address new_space_front = new_space_.ToSpaceStart(); | |
1487 promotion_queue_.Initialize(); | |
1488 | |
1489 #ifdef DEBUG | |
1490 store_buffer()->Clean(); | |
1491 #endif | |
1492 | |
1493 ScavengeVisitor scavenge_visitor(this); | |
1494 // Copy roots. | |
1495 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); | |
1496 | |
1497 // Copy objects reachable from the old generation. | |
1498 { | |
1499 StoreBufferRebuildScope scope(this, | |
1500 store_buffer(), | |
1501 &ScavengeStoreBufferCallback); | |
1502 store_buffer()->IteratePointersToNewSpace(&ScavengeObject); | |
1503 } | |
1504 | |
1505 // Copy objects reachable from simple cells by scavenging cell values | |
1506 // directly. | |
1507 HeapObjectIterator cell_iterator(cell_space_); | |
1508 for (HeapObject* heap_object = cell_iterator.Next(); | |
1509 heap_object != NULL; | |
1510 heap_object = cell_iterator.Next()) { | |
1511 if (heap_object->IsCell()) { | |
1512 Cell* cell = Cell::cast(heap_object); | |
1513 Address value_address = cell->ValueAddress(); | |
1514 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | |
1515 } | |
1516 } | |
1517 | |
1518 // Copy objects reachable from global property cells by scavenging global | |
1519 // property cell values directly. | |
1520 HeapObjectIterator js_global_property_cell_iterator(property_cell_space_); | |
1521 for (HeapObject* heap_object = js_global_property_cell_iterator.Next(); | |
1522 heap_object != NULL; | |
1523 heap_object = js_global_property_cell_iterator.Next()) { | |
1524 if (heap_object->IsPropertyCell()) { | |
1525 PropertyCell* cell = PropertyCell::cast(heap_object); | |
1526 Address value_address = cell->ValueAddress(); | |
1527 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | |
1528 Address type_address = cell->TypeAddress(); | |
1529 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address)); | |
1530 } | |
1531 } | |
1532 | |
1533 // Copy objects reachable from the encountered weak collections list. | |
1534 scavenge_visitor.VisitPointer(&encountered_weak_collections_); | |
1535 | |
1536 // Copy objects reachable from the code flushing candidates list. | |
1537 MarkCompactCollector* collector = mark_compact_collector(); | |
1538 if (collector->is_code_flushing_enabled()) { | |
1539 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); | |
1540 } | |
1541 | |
1542 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); | |
1543 | |
1544 while (isolate()->global_handles()->IterateObjectGroups( | |
1545 &scavenge_visitor, &IsUnscavengedHeapObject)) { | |
1546 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); | |
1547 } | |
1548 isolate()->global_handles()->RemoveObjectGroups(); | |
1549 isolate()->global_handles()->RemoveImplicitRefGroups(); | |
1550 | |
1551 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( | |
1552 &IsUnscavengedHeapObject); | |
1553 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( | |
1554 &scavenge_visitor); | |
1555 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); | |
1556 | |
1557 UpdateNewSpaceReferencesInExternalStringTable( | |
1558 &UpdateNewSpaceReferenceInExternalStringTableEntry); | |
1559 | |
1560 promotion_queue_.Destroy(); | |
1561 | |
1562 incremental_marking()->UpdateMarkingDequeAfterScavenge(); | |
1563 | |
1564 ScavengeWeakObjectRetainer weak_object_retainer(this); | |
1565 ProcessWeakReferences(&weak_object_retainer); | |
1566 | |
1567 DCHECK(new_space_front == new_space_.top()); | |
1568 | |
1569 // Set age mark. | |
1570 new_space_.set_age_mark(new_space_.top()); | |
1571 | |
1572 new_space_.LowerInlineAllocationLimit( | |
1573 new_space_.inline_allocation_limit_step()); | |
1574 | |
1575 // Update how much has survived scavenge. | |
1576 IncrementYoungSurvivorsCounter(static_cast<int>( | |
1577 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size())); | |
1578 | |
1579 LOG(isolate_, ResourceEvent("scavenge", "end")); | |
1580 | |
1581 gc_state_ = NOT_IN_GC; | |
1582 | |
1583 scavenges_since_last_idle_round_++; | |
1584 } | |
1585 | |
1586 | |
1587 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, | |
1588 Object** p) { | |
1589 MapWord first_word = HeapObject::cast(*p)->map_word(); | |
1590 | |
1591 if (!first_word.IsForwardingAddress()) { | |
1592 // Unreachable external string can be finalized. | |
1593 heap->FinalizeExternalString(String::cast(*p)); | |
1594 return NULL; | |
1595 } | |
1596 | |
1597 // String is still reachable. | |
1598 return String::cast(first_word.ToForwardingAddress()); | |
1599 } | |
1600 | |
1601 | |
1602 void Heap::UpdateNewSpaceReferencesInExternalStringTable( | |
1603 ExternalStringTableUpdaterCallback updater_func) { | |
1604 #ifdef VERIFY_HEAP | |
1605 if (FLAG_verify_heap) { | |
1606 external_string_table_.Verify(); | |
1607 } | |
1608 #endif | |
1609 | |
1610 if (external_string_table_.new_space_strings_.is_empty()) return; | |
1611 | |
1612 Object** start = &external_string_table_.new_space_strings_[0]; | |
1613 Object** end = start + external_string_table_.new_space_strings_.length(); | |
1614 Object** last = start; | |
1615 | |
1616 for (Object** p = start; p < end; ++p) { | |
1617 DCHECK(InFromSpace(*p)); | |
1618 String* target = updater_func(this, p); | |
1619 | |
1620 if (target == NULL) continue; | |
1621 | |
1622 DCHECK(target->IsExternalString()); | |
1623 | |
1624 if (InNewSpace(target)) { | |
1625 // String is still in new space. Update the table entry. | |
1626 *last = target; | |
1627 ++last; | |
1628 } else { | |
1629 // String got promoted. Move it to the old string list. | |
1630 external_string_table_.AddOldString(target); | |
1631 } | |
1632 } | |
1633 | |
1634 DCHECK(last <= end); | |
1635 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); | |
1636 } | |
1637 | |
1638 | |
1639 void Heap::UpdateReferencesInExternalStringTable( | |
1640 ExternalStringTableUpdaterCallback updater_func) { | |
1641 | |
1642 // Update old space string references. | |
1643 if (external_string_table_.old_space_strings_.length() > 0) { | |
1644 Object** start = &external_string_table_.old_space_strings_[0]; | |
1645 Object** end = start + external_string_table_.old_space_strings_.length(); | |
1646 for (Object** p = start; p < end; ++p) *p = updater_func(this, p); | |
1647 } | |
1648 | |
1649 UpdateNewSpaceReferencesInExternalStringTable(updater_func); | |
1650 } | |
1651 | |
1652 | |
1653 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { | |
1654 ProcessArrayBuffers(retainer); | |
1655 ProcessNativeContexts(retainer); | |
1656 // TODO(mvstanton): AllocationSites only need to be processed during | |
1657 // MARK_COMPACT, as they live in old space. Verify and address. | |
1658 ProcessAllocationSites(retainer); | |
1659 } | |
1660 | |
1661 | |
1662 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) { | |
1663 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer); | |
1664 // Update the head of the list of contexts. | |
1665 set_native_contexts_list(head); | |
1666 } | |
1667 | |
1668 | |
1669 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) { | |
1670 Object* array_buffer_obj = | |
1671 VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer); | |
1672 set_array_buffers_list(array_buffer_obj); | |
1673 } | |
1674 | |
1675 | |
1676 void Heap::TearDownArrayBuffers() { | |
1677 Object* undefined = undefined_value(); | |
1678 for (Object* o = array_buffers_list(); o != undefined;) { | |
1679 JSArrayBuffer* buffer = JSArrayBuffer::cast(o); | |
1680 Runtime::FreeArrayBuffer(isolate(), buffer); | |
1681 o = buffer->weak_next(); | |
1682 } | |
1683 set_array_buffers_list(undefined); | |
1684 } | |
1685 | |
1686 | |
1687 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) { | |
1688 Object* allocation_site_obj = | |
1689 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer); | |
1690 set_allocation_sites_list(allocation_site_obj); | |
1691 } | |
1692 | |
1693 | |
1694 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { | |
1695 DisallowHeapAllocation no_allocation_scope; | |
1696 Object* cur = allocation_sites_list(); | |
1697 bool marked = false; | |
1698 while (cur->IsAllocationSite()) { | |
1699 AllocationSite* casted = AllocationSite::cast(cur); | |
1700 if (casted->GetPretenureMode() == flag) { | |
1701 casted->ResetPretenureDecision(); | |
1702 casted->set_deopt_dependent_code(true); | |
1703 marked = true; | |
1704 } | |
1705 cur = casted->weak_next(); | |
1706 } | |
1707 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); | |
1708 } | |
1709 | |
1710 | |
1711 void Heap::EvaluateOldSpaceLocalPretenuring( | |
1712 uint64_t size_of_objects_before_gc) { | |
1713 uint64_t size_of_objects_after_gc = SizeOfObjects(); | |
1714 double old_generation_survival_rate = | |
1715 (static_cast<double>(size_of_objects_after_gc) * 100) / | |
1716 static_cast<double>(size_of_objects_before_gc); | |
1717 | |
1718 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { | |
1719 // Too many objects died in the old generation, pretenuring of wrong | |
1720 // allocation sites may be the cause for that. We have to deopt all | |
1721 // dependent code registered in the allocation sites to re-evaluate | |
1722 // our pretenuring decisions. | |
1723 ResetAllAllocationSitesDependentCode(TENURED); | |
1724 if (FLAG_trace_pretenuring) { | |
1725 PrintF("Deopt all allocation sites dependent code due to low survival " | |
1726 "rate in the old generation %f\n", old_generation_survival_rate); | |
1727 } | |
1728 } | |
1729 } | |
1730 | |
1731 | |
1732 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { | |
1733 DisallowHeapAllocation no_allocation; | |
1734 // All external strings are listed in the external string table. | |
1735 | |
1736 class ExternalStringTableVisitorAdapter : public ObjectVisitor { | |
1737 public: | |
1738 explicit ExternalStringTableVisitorAdapter( | |
1739 v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} | |
1740 virtual void VisitPointers(Object** start, Object** end) { | |
1741 for (Object** p = start; p < end; p++) { | |
1742 DCHECK((*p)->IsExternalString()); | |
1743 visitor_->VisitExternalString(Utils::ToLocal( | |
1744 Handle<String>(String::cast(*p)))); | |
1745 } | |
1746 } | |
1747 private: | |
1748 v8::ExternalResourceVisitor* visitor_; | |
1749 } external_string_table_visitor(visitor); | |
1750 | |
1751 external_string_table_.Iterate(&external_string_table_visitor); | |
1752 } | |
1753 | |
1754 | |
1755 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> { | |
1756 public: | |
1757 static inline void VisitPointer(Heap* heap, Object** p) { | |
1758 Object* object = *p; | |
1759 if (!heap->InNewSpace(object)) return; | |
1760 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), | |
1761 reinterpret_cast<HeapObject*>(object)); | |
1762 } | |
1763 }; | |
1764 | |
1765 | |
1766 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, | |
1767 Address new_space_front) { | |
1768 do { | |
1769 SemiSpace::AssertValidRange(new_space_front, new_space_.top()); | |
1770 // The addresses new_space_front and new_space_.top() define a | |
1771 // queue of unprocessed copied objects. Process them until the | |
1772 // queue is empty. | |
1773 while (new_space_front != new_space_.top()) { | |
1774 if (!NewSpacePage::IsAtEnd(new_space_front)) { | |
1775 HeapObject* object = HeapObject::FromAddress(new_space_front); | |
1776 new_space_front += | |
1777 NewSpaceScavenger::IterateBody(object->map(), object); | |
1778 } else { | |
1779 new_space_front = | |
1780 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start(); | |
1781 } | |
1782 } | |
1783 | |
1784 // Promote and process all the to-be-promoted objects. | |
1785 { | |
1786 StoreBufferRebuildScope scope(this, | |
1787 store_buffer(), | |
1788 &ScavengeStoreBufferCallback); | |
1789 while (!promotion_queue()->is_empty()) { | |
1790 HeapObject* target; | |
1791 int size; | |
1792 promotion_queue()->remove(&target, &size); | |
1793 | |
1794 // Promoted object might be already partially visited | |
1795 // during old space pointer iteration. Thus we search specificly | |
1796 // for pointers to from semispace instead of looking for pointers | |
1797 // to new space. | |
1798 DCHECK(!target->IsMap()); | |
1799 IterateAndMarkPointersToFromSpace(target->address(), | |
1800 target->address() + size, | |
1801 &ScavengeObject); | |
1802 } | |
1803 } | |
1804 | |
1805 // Take another spin if there are now unswept objects in new space | |
1806 // (there are currently no more unswept promoted objects). | |
1807 } while (new_space_front != new_space_.top()); | |
1808 | |
1809 return new_space_front; | |
1810 } | |
1811 | |
1812 | |
1813 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & | |
1814 kDoubleAlignmentMask) == 0); // NOLINT | |
1815 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & | |
1816 kDoubleAlignmentMask) == 0); // NOLINT | |
1817 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset & | |
1818 kDoubleAlignmentMask) == 0); // NOLINT | |
1819 | |
1820 | |
1821 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, | |
1822 HeapObject* object, | |
1823 int size)); | |
1824 | |
1825 static HeapObject* EnsureDoubleAligned(Heap* heap, | |
1826 HeapObject* object, | |
1827 int size) { | |
1828 if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) { | |
1829 heap->CreateFillerObjectAt(object->address(), kPointerSize); | |
1830 return HeapObject::FromAddress(object->address() + kPointerSize); | |
1831 } else { | |
1832 heap->CreateFillerObjectAt(object->address() + size - kPointerSize, | |
1833 kPointerSize); | |
1834 return object; | |
1835 } | |
1836 } | |
1837 | |
1838 | |
1839 enum LoggingAndProfiling { | |
1840 LOGGING_AND_PROFILING_ENABLED, | |
1841 LOGGING_AND_PROFILING_DISABLED | |
1842 }; | |
1843 | |
1844 | |
1845 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; | |
1846 | |
1847 | |
1848 template<MarksHandling marks_handling, | |
1849 LoggingAndProfiling logging_and_profiling_mode> | |
1850 class ScavengingVisitor : public StaticVisitorBase { | |
1851 public: | |
1852 static void Initialize() { | |
1853 table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString); | |
1854 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); | |
1855 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); | |
1856 table_.Register(kVisitByteArray, &EvacuateByteArray); | |
1857 table_.Register(kVisitFixedArray, &EvacuateFixedArray); | |
1858 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); | |
1859 table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray); | |
1860 table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array); | |
1861 | |
1862 table_.Register(kVisitNativeContext, | |
1863 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1864 template VisitSpecialized<Context::kSize>); | |
1865 | |
1866 table_.Register(kVisitConsString, | |
1867 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1868 template VisitSpecialized<ConsString::kSize>); | |
1869 | |
1870 table_.Register(kVisitSlicedString, | |
1871 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1872 template VisitSpecialized<SlicedString::kSize>); | |
1873 | |
1874 table_.Register(kVisitSymbol, | |
1875 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1876 template VisitSpecialized<Symbol::kSize>); | |
1877 | |
1878 table_.Register(kVisitSharedFunctionInfo, | |
1879 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1880 template VisitSpecialized<SharedFunctionInfo::kSize>); | |
1881 | |
1882 table_.Register(kVisitJSWeakCollection, | |
1883 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1884 Visit); | |
1885 | |
1886 table_.Register(kVisitJSArrayBuffer, | |
1887 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1888 Visit); | |
1889 | |
1890 table_.Register(kVisitJSTypedArray, | |
1891 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1892 Visit); | |
1893 | |
1894 table_.Register(kVisitJSDataView, | |
1895 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1896 Visit); | |
1897 | |
1898 table_.Register(kVisitJSRegExp, | |
1899 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1900 Visit); | |
1901 | |
1902 if (marks_handling == IGNORE_MARKS) { | |
1903 table_.Register(kVisitJSFunction, | |
1904 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
1905 template VisitSpecialized<JSFunction::kSize>); | |
1906 } else { | |
1907 table_.Register(kVisitJSFunction, &EvacuateJSFunction); | |
1908 } | |
1909 | |
1910 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, | |
1911 kVisitDataObject, | |
1912 kVisitDataObjectGeneric>(); | |
1913 | |
1914 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | |
1915 kVisitJSObject, | |
1916 kVisitJSObjectGeneric>(); | |
1917 | |
1918 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | |
1919 kVisitStruct, | |
1920 kVisitStructGeneric>(); | |
1921 } | |
1922 | |
1923 static VisitorDispatchTable<ScavengingCallback>* GetTable() { | |
1924 return &table_; | |
1925 } | |
1926 | |
1927 private: | |
1928 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; | |
1929 | |
1930 static void RecordCopiedObject(Heap* heap, HeapObject* obj) { | |
1931 bool should_record = false; | |
1932 #ifdef DEBUG | |
1933 should_record = FLAG_heap_stats; | |
1934 #endif | |
1935 should_record = should_record || FLAG_log_gc; | |
1936 if (should_record) { | |
1937 if (heap->new_space()->Contains(obj)) { | |
1938 heap->new_space()->RecordAllocation(obj); | |
1939 } else { | |
1940 heap->new_space()->RecordPromotion(obj); | |
1941 } | |
1942 } | |
1943 } | |
1944 | |
1945 // Helper function used by CopyObject to copy a source object to an | |
1946 // allocated target object and update the forwarding pointer in the source | |
1947 // object. Returns the target object. | |
1948 INLINE(static void MigrateObject(Heap* heap, | |
1949 HeapObject* source, | |
1950 HeapObject* target, | |
1951 int size)) { | |
1952 // If we migrate into to-space, then the to-space top pointer should be | |
1953 // right after the target object. Incorporate double alignment | |
1954 // over-allocation. | |
1955 DCHECK(!heap->InToSpace(target) || | |
1956 target->address() + size == heap->new_space()->top() || | |
1957 target->address() + size + kPointerSize == heap->new_space()->top()); | |
1958 | |
1959 // Make sure that we do not overwrite the promotion queue which is at | |
1960 // the end of to-space. | |
1961 DCHECK(!heap->InToSpace(target) || | |
1962 heap->promotion_queue()->IsBelowPromotionQueue( | |
1963 heap->new_space()->top())); | |
1964 | |
1965 // Copy the content of source to target. | |
1966 heap->CopyBlock(target->address(), source->address(), size); | |
1967 | |
1968 // Set the forwarding address. | |
1969 source->set_map_word(MapWord::FromForwardingAddress(target)); | |
1970 | |
1971 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { | |
1972 // Update NewSpace stats if necessary. | |
1973 RecordCopiedObject(heap, target); | |
1974 heap->OnMoveEvent(target, source, size); | |
1975 } | |
1976 | |
1977 if (marks_handling == TRANSFER_MARKS) { | |
1978 if (Marking::TransferColor(source, target)) { | |
1979 MemoryChunk::IncrementLiveBytesFromGC(target->address(), size); | |
1980 } | |
1981 } | |
1982 } | |
1983 | |
1984 template<int alignment> | |
1985 static inline bool SemiSpaceCopyObject(Map* map, | |
1986 HeapObject** slot, | |
1987 HeapObject* object, | |
1988 int object_size) { | |
1989 Heap* heap = map->GetHeap(); | |
1990 | |
1991 int allocation_size = object_size; | |
1992 if (alignment != kObjectAlignment) { | |
1993 DCHECK(alignment == kDoubleAlignment); | |
1994 allocation_size += kPointerSize; | |
1995 } | |
1996 | |
1997 DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); | |
1998 AllocationResult allocation = | |
1999 heap->new_space()->AllocateRaw(allocation_size); | |
2000 | |
2001 HeapObject* target = NULL; // Initialization to please compiler. | |
2002 if (allocation.To(&target)) { | |
2003 if (alignment != kObjectAlignment) { | |
2004 target = EnsureDoubleAligned(heap, target, allocation_size); | |
2005 } | |
2006 | |
2007 // Order is important here: Set the promotion limit before migrating | |
2008 // the object. Otherwise we may end up overwriting promotion queue | |
2009 // entries when we migrate the object. | |
2010 heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); | |
2011 | |
2012 // Order is important: slot might be inside of the target if target | |
2013 // was allocated over a dead object and slot comes from the store | |
2014 // buffer. | |
2015 *slot = target; | |
2016 MigrateObject(heap, object, target, object_size); | |
2017 | |
2018 heap->IncrementSemiSpaceCopiedObjectSize(object_size); | |
2019 return true; | |
2020 } | |
2021 return false; | |
2022 } | |
2023 | |
2024 | |
2025 template<ObjectContents object_contents, int alignment> | |
2026 static inline bool PromoteObject(Map* map, | |
2027 HeapObject** slot, | |
2028 HeapObject* object, | |
2029 int object_size) { | |
2030 Heap* heap = map->GetHeap(); | |
2031 | |
2032 int allocation_size = object_size; | |
2033 if (alignment != kObjectAlignment) { | |
2034 DCHECK(alignment == kDoubleAlignment); | |
2035 allocation_size += kPointerSize; | |
2036 } | |
2037 | |
2038 AllocationResult allocation; | |
2039 if (object_contents == DATA_OBJECT) { | |
2040 DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); | |
2041 allocation = heap->old_data_space()->AllocateRaw(allocation_size); | |
2042 } else { | |
2043 DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); | |
2044 allocation = heap->old_pointer_space()->AllocateRaw(allocation_size); | |
2045 } | |
2046 | |
2047 HeapObject* target = NULL; // Initialization to please compiler. | |
2048 if (allocation.To(&target)) { | |
2049 if (alignment != kObjectAlignment) { | |
2050 target = EnsureDoubleAligned(heap, target, allocation_size); | |
2051 } | |
2052 | |
2053 // Order is important: slot might be inside of the target if target | |
2054 // was allocated over a dead object and slot comes from the store | |
2055 // buffer. | |
2056 *slot = target; | |
2057 MigrateObject(heap, object, target, object_size); | |
2058 | |
2059 if (object_contents == POINTER_OBJECT) { | |
2060 if (map->instance_type() == JS_FUNCTION_TYPE) { | |
2061 heap->promotion_queue()->insert( | |
2062 target, JSFunction::kNonWeakFieldsEndOffset); | |
2063 } else { | |
2064 heap->promotion_queue()->insert(target, object_size); | |
2065 } | |
2066 } | |
2067 heap->IncrementPromotedObjectsSize(object_size); | |
2068 return true; | |
2069 } | |
2070 return false; | |
2071 } | |
2072 | |
2073 | |
2074 template<ObjectContents object_contents, int alignment> | |
2075 static inline void EvacuateObject(Map* map, | |
2076 HeapObject** slot, | |
2077 HeapObject* object, | |
2078 int object_size) { | |
2079 SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); | |
2080 SLOW_DCHECK(object->Size() == object_size); | |
2081 Heap* heap = map->GetHeap(); | |
2082 | |
2083 if (!heap->ShouldBePromoted(object->address(), object_size)) { | |
2084 // A semi-space copy may fail due to fragmentation. In that case, we | |
2085 // try to promote the object. | |
2086 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) { | |
2087 return; | |
2088 } | |
2089 } | |
2090 | |
2091 if (PromoteObject<object_contents, alignment>( | |
2092 map, slot, object, object_size)) { | |
2093 return; | |
2094 } | |
2095 | |
2096 // If promotion failed, we try to copy the object to the other semi-space | |
2097 if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return; | |
2098 | |
2099 UNREACHABLE(); | |
2100 } | |
2101 | |
2102 | |
2103 static inline void EvacuateJSFunction(Map* map, | |
2104 HeapObject** slot, | |
2105 HeapObject* object) { | |
2106 ObjectEvacuationStrategy<POINTER_OBJECT>:: | |
2107 template VisitSpecialized<JSFunction::kSize>(map, slot, object); | |
2108 | |
2109 HeapObject* target = *slot; | |
2110 MarkBit mark_bit = Marking::MarkBitFrom(target); | |
2111 if (Marking::IsBlack(mark_bit)) { | |
2112 // This object is black and it might not be rescanned by marker. | |
2113 // We should explicitly record code entry slot for compaction because | |
2114 // promotion queue processing (IterateAndMarkPointersToFromSpace) will | |
2115 // miss it as it is not HeapObject-tagged. | |
2116 Address code_entry_slot = | |
2117 target->address() + JSFunction::kCodeEntryOffset; | |
2118 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); | |
2119 map->GetHeap()->mark_compact_collector()-> | |
2120 RecordCodeEntrySlot(code_entry_slot, code); | |
2121 } | |
2122 } | |
2123 | |
2124 | |
2125 static inline void EvacuateFixedArray(Map* map, | |
2126 HeapObject** slot, | |
2127 HeapObject* object) { | |
2128 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); | |
2129 EvacuateObject<POINTER_OBJECT, kObjectAlignment>( | |
2130 map, slot, object, object_size); | |
2131 } | |
2132 | |
2133 | |
2134 static inline void EvacuateFixedDoubleArray(Map* map, | |
2135 HeapObject** slot, | |
2136 HeapObject* object) { | |
2137 int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); | |
2138 int object_size = FixedDoubleArray::SizeFor(length); | |
2139 EvacuateObject<DATA_OBJECT, kDoubleAlignment>( | |
2140 map, slot, object, object_size); | |
2141 } | |
2142 | |
2143 | |
2144 static inline void EvacuateFixedTypedArray(Map* map, | |
2145 HeapObject** slot, | |
2146 HeapObject* object) { | |
2147 int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size(); | |
2148 EvacuateObject<DATA_OBJECT, kObjectAlignment>( | |
2149 map, slot, object, object_size); | |
2150 } | |
2151 | |
2152 | |
2153 static inline void EvacuateFixedFloat64Array(Map* map, | |
2154 HeapObject** slot, | |
2155 HeapObject* object) { | |
2156 int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size(); | |
2157 EvacuateObject<DATA_OBJECT, kDoubleAlignment>( | |
2158 map, slot, object, object_size); | |
2159 } | |
2160 | |
2161 | |
2162 static inline void EvacuateByteArray(Map* map, | |
2163 HeapObject** slot, | |
2164 HeapObject* object) { | |
2165 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize(); | |
2166 EvacuateObject<DATA_OBJECT, kObjectAlignment>( | |
2167 map, slot, object, object_size); | |
2168 } | |
2169 | |
2170 | |
2171 static inline void EvacuateSeqOneByteString(Map* map, | |
2172 HeapObject** slot, | |
2173 HeapObject* object) { | |
2174 int object_size = SeqOneByteString::cast(object)-> | |
2175 SeqOneByteStringSize(map->instance_type()); | |
2176 EvacuateObject<DATA_OBJECT, kObjectAlignment>( | |
2177 map, slot, object, object_size); | |
2178 } | |
2179 | |
2180 | |
2181 static inline void EvacuateSeqTwoByteString(Map* map, | |
2182 HeapObject** slot, | |
2183 HeapObject* object) { | |
2184 int object_size = SeqTwoByteString::cast(object)-> | |
2185 SeqTwoByteStringSize(map->instance_type()); | |
2186 EvacuateObject<DATA_OBJECT, kObjectAlignment>( | |
2187 map, slot, object, object_size); | |
2188 } | |
2189 | |
2190 | |
2191 static inline void EvacuateShortcutCandidate(Map* map, | |
2192 HeapObject** slot, | |
2193 HeapObject* object) { | |
2194 DCHECK(IsShortcutCandidate(map->instance_type())); | |
2195 | |
2196 Heap* heap = map->GetHeap(); | |
2197 | |
2198 if (marks_handling == IGNORE_MARKS && | |
2199 ConsString::cast(object)->unchecked_second() == | |
2200 heap->empty_string()) { | |
2201 HeapObject* first = | |
2202 HeapObject::cast(ConsString::cast(object)->unchecked_first()); | |
2203 | |
2204 *slot = first; | |
2205 | |
2206 if (!heap->InNewSpace(first)) { | |
2207 object->set_map_word(MapWord::FromForwardingAddress(first)); | |
2208 return; | |
2209 } | |
2210 | |
2211 MapWord first_word = first->map_word(); | |
2212 if (first_word.IsForwardingAddress()) { | |
2213 HeapObject* target = first_word.ToForwardingAddress(); | |
2214 | |
2215 *slot = target; | |
2216 object->set_map_word(MapWord::FromForwardingAddress(target)); | |
2217 return; | |
2218 } | |
2219 | |
2220 heap->DoScavengeObject(first->map(), slot, first); | |
2221 object->set_map_word(MapWord::FromForwardingAddress(*slot)); | |
2222 return; | |
2223 } | |
2224 | |
2225 int object_size = ConsString::kSize; | |
2226 EvacuateObject<POINTER_OBJECT, kObjectAlignment>( | |
2227 map, slot, object, object_size); | |
2228 } | |
2229 | |
2230 template<ObjectContents object_contents> | |
2231 class ObjectEvacuationStrategy { | |
2232 public: | |
2233 template<int object_size> | |
2234 static inline void VisitSpecialized(Map* map, | |
2235 HeapObject** slot, | |
2236 HeapObject* object) { | |
2237 EvacuateObject<object_contents, kObjectAlignment>( | |
2238 map, slot, object, object_size); | |
2239 } | |
2240 | |
2241 static inline void Visit(Map* map, | |
2242 HeapObject** slot, | |
2243 HeapObject* object) { | |
2244 int object_size = map->instance_size(); | |
2245 EvacuateObject<object_contents, kObjectAlignment>( | |
2246 map, slot, object, object_size); | |
2247 } | |
2248 }; | |
2249 | |
2250 static VisitorDispatchTable<ScavengingCallback> table_; | |
2251 }; | |
2252 | |
2253 | |
2254 template<MarksHandling marks_handling, | |
2255 LoggingAndProfiling logging_and_profiling_mode> | |
2256 VisitorDispatchTable<ScavengingCallback> | |
2257 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_; | |
2258 | |
2259 | |
2260 static void InitializeScavengingVisitorsTables() { | |
2261 ScavengingVisitor<TRANSFER_MARKS, | |
2262 LOGGING_AND_PROFILING_DISABLED>::Initialize(); | |
2263 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize(); | |
2264 ScavengingVisitor<TRANSFER_MARKS, | |
2265 LOGGING_AND_PROFILING_ENABLED>::Initialize(); | |
2266 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize(); | |
2267 } | |
2268 | |
2269 | |
2270 void Heap::SelectScavengingVisitorsTable() { | |
2271 bool logging_and_profiling = | |
2272 FLAG_verify_predictable || | |
2273 isolate()->logger()->is_logging() || | |
2274 isolate()->cpu_profiler()->is_profiling() || | |
2275 (isolate()->heap_profiler() != NULL && | |
2276 isolate()->heap_profiler()->is_tracking_object_moves()); | |
2277 | |
2278 if (!incremental_marking()->IsMarking()) { | |
2279 if (!logging_and_profiling) { | |
2280 scavenging_visitors_table_.CopyFrom( | |
2281 ScavengingVisitor<IGNORE_MARKS, | |
2282 LOGGING_AND_PROFILING_DISABLED>::GetTable()); | |
2283 } else { | |
2284 scavenging_visitors_table_.CopyFrom( | |
2285 ScavengingVisitor<IGNORE_MARKS, | |
2286 LOGGING_AND_PROFILING_ENABLED>::GetTable()); | |
2287 } | |
2288 } else { | |
2289 if (!logging_and_profiling) { | |
2290 scavenging_visitors_table_.CopyFrom( | |
2291 ScavengingVisitor<TRANSFER_MARKS, | |
2292 LOGGING_AND_PROFILING_DISABLED>::GetTable()); | |
2293 } else { | |
2294 scavenging_visitors_table_.CopyFrom( | |
2295 ScavengingVisitor<TRANSFER_MARKS, | |
2296 LOGGING_AND_PROFILING_ENABLED>::GetTable()); | |
2297 } | |
2298 | |
2299 if (incremental_marking()->IsCompacting()) { | |
2300 // When compacting forbid short-circuiting of cons-strings. | |
2301 // Scavenging code relies on the fact that new space object | |
2302 // can't be evacuated into evacuation candidate but | |
2303 // short-circuiting violates this assumption. | |
2304 scavenging_visitors_table_.Register( | |
2305 StaticVisitorBase::kVisitShortcutCandidate, | |
2306 scavenging_visitors_table_.GetVisitorById( | |
2307 StaticVisitorBase::kVisitConsString)); | |
2308 } | |
2309 } | |
2310 } | |
2311 | |
2312 | |
2313 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { | |
2314 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); | |
2315 MapWord first_word = object->map_word(); | |
2316 SLOW_DCHECK(!first_word.IsForwardingAddress()); | |
2317 Map* map = first_word.ToMap(); | |
2318 map->GetHeap()->DoScavengeObject(map, p, object); | |
2319 } | |
2320 | |
2321 | |
2322 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, | |
2323 int instance_size) { | |
2324 Object* result; | |
2325 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); | |
2326 if (!allocation.To(&result)) return allocation; | |
2327 | |
2328 // Map::cast cannot be used due to uninitialized map field. | |
2329 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); | |
2330 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); | |
2331 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); | |
2332 reinterpret_cast<Map*>(result)->set_visitor_id( | |
2333 StaticVisitorBase::GetVisitorId(instance_type, instance_size)); | |
2334 reinterpret_cast<Map*>(result)->set_inobject_properties(0); | |
2335 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); | |
2336 reinterpret_cast<Map*>(result)->set_unused_property_fields(0); | |
2337 reinterpret_cast<Map*>(result)->set_bit_field(0); | |
2338 reinterpret_cast<Map*>(result)->set_bit_field2(0); | |
2339 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | | |
2340 Map::OwnsDescriptors::encode(true); | |
2341 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3); | |
2342 return result; | |
2343 } | |
2344 | |
2345 | |
2346 AllocationResult Heap::AllocateMap(InstanceType instance_type, | |
2347 int instance_size, | |
2348 ElementsKind elements_kind) { | |
2349 HeapObject* result; | |
2350 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); | |
2351 if (!allocation.To(&result)) return allocation; | |
2352 | |
2353 result->set_map_no_write_barrier(meta_map()); | |
2354 Map* map = Map::cast(result); | |
2355 map->set_instance_type(instance_type); | |
2356 map->set_visitor_id( | |
2357 StaticVisitorBase::GetVisitorId(instance_type, instance_size)); | |
2358 map->set_prototype(null_value(), SKIP_WRITE_BARRIER); | |
2359 map->set_constructor(null_value(), SKIP_WRITE_BARRIER); | |
2360 map->set_instance_size(instance_size); | |
2361 map->set_inobject_properties(0); | |
2362 map->set_pre_allocated_property_fields(0); | |
2363 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); | |
2364 map->set_dependent_code(DependentCode::cast(empty_fixed_array()), | |
2365 SKIP_WRITE_BARRIER); | |
2366 map->init_back_pointer(undefined_value()); | |
2367 map->set_unused_property_fields(0); | |
2368 map->set_instance_descriptors(empty_descriptor_array()); | |
2369 map->set_bit_field(0); | |
2370 map->set_bit_field2(1 << Map::kIsExtensible); | |
2371 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | | |
2372 Map::OwnsDescriptors::encode(true); | |
2373 map->set_bit_field3(bit_field3); | |
2374 map->set_elements_kind(elements_kind); | |
2375 | |
2376 return map; | |
2377 } | |
2378 | |
2379 | |
2380 AllocationResult Heap::AllocateFillerObject(int size, | |
2381 bool double_align, | |
2382 AllocationSpace space) { | |
2383 HeapObject* obj; | |
2384 { AllocationResult allocation = AllocateRaw(size, space, space); | |
2385 if (!allocation.To(&obj)) return allocation; | |
2386 } | |
2387 #ifdef DEBUG | |
2388 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
2389 DCHECK(chunk->owner()->identity() == space); | |
2390 #endif | |
2391 CreateFillerObjectAt(obj->address(), size); | |
2392 return obj; | |
2393 } | |
2394 | |
2395 | |
2396 const Heap::StringTypeTable Heap::string_type_table[] = { | |
2397 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ | |
2398 {type, size, k##camel_name##MapRootIndex}, | |
2399 STRING_TYPE_LIST(STRING_TYPE_ELEMENT) | |
2400 #undef STRING_TYPE_ELEMENT | |
2401 }; | |
2402 | |
2403 | |
2404 const Heap::ConstantStringTable Heap::constant_string_table[] = { | |
2405 #define CONSTANT_STRING_ELEMENT(name, contents) \ | |
2406 {contents, k##name##RootIndex}, | |
2407 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT) | |
2408 #undef CONSTANT_STRING_ELEMENT | |
2409 }; | |
2410 | |
2411 | |
2412 const Heap::StructTable Heap::struct_table[] = { | |
2413 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ | |
2414 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex }, | |
2415 STRUCT_LIST(STRUCT_TABLE_ELEMENT) | |
2416 #undef STRUCT_TABLE_ELEMENT | |
2417 }; | |
2418 | |
2419 | |
2420 bool Heap::CreateInitialMaps() { | |
2421 HeapObject* obj; | |
2422 { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize); | |
2423 if (!allocation.To(&obj)) return false; | |
2424 } | |
2425 // Map::cast cannot be used due to uninitialized map field. | |
2426 Map* new_meta_map = reinterpret_cast<Map*>(obj); | |
2427 set_meta_map(new_meta_map); | |
2428 new_meta_map->set_map(new_meta_map); | |
2429 | |
2430 { // Partial map allocation | |
2431 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \ | |
2432 { Map* map; \ | |
2433 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \ | |
2434 set_##field_name##_map(map); \ | |
2435 } | |
2436 | |
2437 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array); | |
2438 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined); | |
2439 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null); | |
2440 ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel, | |
2441 constant_pool_array); | |
2442 | |
2443 #undef ALLOCATE_PARTIAL_MAP | |
2444 } | |
2445 | |
2446 // Allocate the empty array. | |
2447 { AllocationResult allocation = AllocateEmptyFixedArray(); | |
2448 if (!allocation.To(&obj)) return false; | |
2449 } | |
2450 set_empty_fixed_array(FixedArray::cast(obj)); | |
2451 | |
2452 { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE); | |
2453 if (!allocation.To(&obj)) return false; | |
2454 } | |
2455 set_null_value(Oddball::cast(obj)); | |
2456 Oddball::cast(obj)->set_kind(Oddball::kNull); | |
2457 | |
2458 { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE); | |
2459 if (!allocation.To(&obj)) return false; | |
2460 } | |
2461 set_undefined_value(Oddball::cast(obj)); | |
2462 Oddball::cast(obj)->set_kind(Oddball::kUndefined); | |
2463 DCHECK(!InNewSpace(undefined_value())); | |
2464 | |
2465 // Set preliminary exception sentinel value before actually initializing it. | |
2466 set_exception(null_value()); | |
2467 | |
2468 // Allocate the empty descriptor array. | |
2469 { AllocationResult allocation = AllocateEmptyFixedArray(); | |
2470 if (!allocation.To(&obj)) return false; | |
2471 } | |
2472 set_empty_descriptor_array(DescriptorArray::cast(obj)); | |
2473 | |
2474 // Allocate the constant pool array. | |
2475 { AllocationResult allocation = AllocateEmptyConstantPoolArray(); | |
2476 if (!allocation.To(&obj)) return false; | |
2477 } | |
2478 set_empty_constant_pool_array(ConstantPoolArray::cast(obj)); | |
2479 | |
2480 // Fix the instance_descriptors for the existing maps. | |
2481 meta_map()->set_code_cache(empty_fixed_array()); | |
2482 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); | |
2483 meta_map()->init_back_pointer(undefined_value()); | |
2484 meta_map()->set_instance_descriptors(empty_descriptor_array()); | |
2485 | |
2486 fixed_array_map()->set_code_cache(empty_fixed_array()); | |
2487 fixed_array_map()->set_dependent_code( | |
2488 DependentCode::cast(empty_fixed_array())); | |
2489 fixed_array_map()->init_back_pointer(undefined_value()); | |
2490 fixed_array_map()->set_instance_descriptors(empty_descriptor_array()); | |
2491 | |
2492 undefined_map()->set_code_cache(empty_fixed_array()); | |
2493 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); | |
2494 undefined_map()->init_back_pointer(undefined_value()); | |
2495 undefined_map()->set_instance_descriptors(empty_descriptor_array()); | |
2496 | |
2497 null_map()->set_code_cache(empty_fixed_array()); | |
2498 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); | |
2499 null_map()->init_back_pointer(undefined_value()); | |
2500 null_map()->set_instance_descriptors(empty_descriptor_array()); | |
2501 | |
2502 constant_pool_array_map()->set_code_cache(empty_fixed_array()); | |
2503 constant_pool_array_map()->set_dependent_code( | |
2504 DependentCode::cast(empty_fixed_array())); | |
2505 constant_pool_array_map()->init_back_pointer(undefined_value()); | |
2506 constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array()); | |
2507 | |
2508 // Fix prototype object for existing maps. | |
2509 meta_map()->set_prototype(null_value()); | |
2510 meta_map()->set_constructor(null_value()); | |
2511 | |
2512 fixed_array_map()->set_prototype(null_value()); | |
2513 fixed_array_map()->set_constructor(null_value()); | |
2514 | |
2515 undefined_map()->set_prototype(null_value()); | |
2516 undefined_map()->set_constructor(null_value()); | |
2517 | |
2518 null_map()->set_prototype(null_value()); | |
2519 null_map()->set_constructor(null_value()); | |
2520 | |
2521 constant_pool_array_map()->set_prototype(null_value()); | |
2522 constant_pool_array_map()->set_constructor(null_value()); | |
2523 | |
2524 { // Map allocation | |
2525 #define ALLOCATE_MAP(instance_type, size, field_name) \ | |
2526 { Map* map; \ | |
2527 if (!AllocateMap((instance_type), size).To(&map)) return false; \ | |
2528 set_##field_name##_map(map); \ | |
2529 } | |
2530 | |
2531 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \ | |
2532 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name) | |
2533 | |
2534 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array) | |
2535 DCHECK(fixed_array_map() != fixed_cow_array_map()); | |
2536 | |
2537 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info) | |
2538 ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number) | |
2539 ALLOCATE_MAP( | |
2540 MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number) | |
2541 ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol) | |
2542 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) | |
2543 | |
2544 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole); | |
2545 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean); | |
2546 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized); | |
2547 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker); | |
2548 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel); | |
2549 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception); | |
2550 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception); | |
2551 | |
2552 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { | |
2553 const StringTypeTable& entry = string_type_table[i]; | |
2554 { AllocationResult allocation = AllocateMap(entry.type, entry.size); | |
2555 if (!allocation.To(&obj)) return false; | |
2556 } | |
2557 // Mark cons string maps as unstable, because their objects can change | |
2558 // maps during GC. | |
2559 Map* map = Map::cast(obj); | |
2560 if (StringShape(entry.type).IsCons()) map->mark_unstable(); | |
2561 roots_[entry.index] = map; | |
2562 } | |
2563 | |
2564 ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string) | |
2565 undetectable_string_map()->set_is_undetectable(); | |
2566 | |
2567 ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string); | |
2568 undetectable_ascii_string_map()->set_is_undetectable(); | |
2569 | |
2570 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array) | |
2571 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array) | |
2572 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space) | |
2573 | |
2574 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \ | |
2575 ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \ | |
2576 external_##type##_array) | |
2577 | |
2578 TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP) | |
2579 #undef ALLOCATE_EXTERNAL_ARRAY_MAP | |
2580 | |
2581 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \ | |
2582 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, \ | |
2583 fixed_##type##_array) | |
2584 | |
2585 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP) | |
2586 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP | |
2587 | |
2588 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements) | |
2589 | |
2590 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code) | |
2591 | |
2592 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell) | |
2593 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell) | |
2594 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler) | |
2595 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler) | |
2596 | |
2597 | |
2598 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { | |
2599 const StructTable& entry = struct_table[i]; | |
2600 Map* map; | |
2601 if (!AllocateMap(entry.type, entry.size).To(&map)) | |
2602 return false; | |
2603 roots_[entry.index] = map; | |
2604 } | |
2605 | |
2606 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table) | |
2607 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table) | |
2608 | |
2609 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context) | |
2610 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context) | |
2611 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context) | |
2612 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context) | |
2613 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context) | |
2614 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context) | |
2615 | |
2616 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context) | |
2617 native_context_map()->set_dictionary_map(true); | |
2618 native_context_map()->set_visitor_id( | |
2619 StaticVisitorBase::kVisitNativeContext); | |
2620 | |
2621 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize, | |
2622 shared_function_info) | |
2623 | |
2624 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, | |
2625 message_object) | |
2626 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, | |
2627 external) | |
2628 external_map()->set_is_extensible(false); | |
2629 #undef ALLOCATE_VARSIZE_MAP | |
2630 #undef ALLOCATE_MAP | |
2631 } | |
2632 | |
2633 { // Empty arrays | |
2634 { ByteArray* byte_array; | |
2635 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false; | |
2636 set_empty_byte_array(byte_array); | |
2637 } | |
2638 | |
2639 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \ | |
2640 { ExternalArray* obj; \ | |
2641 if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \ | |
2642 return false; \ | |
2643 set_empty_external_##type##_array(obj); \ | |
2644 } | |
2645 | |
2646 TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY) | |
2647 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY | |
2648 | |
2649 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ | |
2650 { FixedTypedArrayBase* obj; \ | |
2651 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \ | |
2652 return false; \ | |
2653 set_empty_fixed_##type##_array(obj); \ | |
2654 } | |
2655 | |
2656 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY) | |
2657 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY | |
2658 } | |
2659 DCHECK(!InNewSpace(empty_fixed_array())); | |
2660 return true; | |
2661 } | |
2662 | |
2663 | |
2664 AllocationResult Heap::AllocateHeapNumber(double value, | |
2665 MutableMode mode, | |
2666 PretenureFlag pretenure) { | |
2667 // Statically ensure that it is safe to allocate heap numbers in paged | |
2668 // spaces. | |
2669 int size = HeapNumber::kSize; | |
2670 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize); | |
2671 | |
2672 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
2673 | |
2674 HeapObject* result; | |
2675 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
2676 if (!allocation.To(&result)) return allocation; | |
2677 } | |
2678 | |
2679 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map(); | |
2680 HeapObject::cast(result)->set_map_no_write_barrier(map); | |
2681 HeapNumber::cast(result)->set_value(value); | |
2682 return result; | |
2683 } | |
2684 | |
2685 | |
2686 AllocationResult Heap::AllocateCell(Object* value) { | |
2687 int size = Cell::kSize; | |
2688 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize); | |
2689 | |
2690 HeapObject* result; | |
2691 { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE); | |
2692 if (!allocation.To(&result)) return allocation; | |
2693 } | |
2694 result->set_map_no_write_barrier(cell_map()); | |
2695 Cell::cast(result)->set_value(value); | |
2696 return result; | |
2697 } | |
2698 | |
2699 | |
2700 AllocationResult Heap::AllocatePropertyCell() { | |
2701 int size = PropertyCell::kSize; | |
2702 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize); | |
2703 | |
2704 HeapObject* result; | |
2705 AllocationResult allocation = | |
2706 AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE); | |
2707 if (!allocation.To(&result)) return allocation; | |
2708 | |
2709 result->set_map_no_write_barrier(global_property_cell_map()); | |
2710 PropertyCell* cell = PropertyCell::cast(result); | |
2711 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()), | |
2712 SKIP_WRITE_BARRIER); | |
2713 cell->set_value(the_hole_value()); | |
2714 cell->set_type(HeapType::None()); | |
2715 return result; | |
2716 } | |
2717 | |
2718 | |
2719 void Heap::CreateApiObjects() { | |
2720 HandleScope scope(isolate()); | |
2721 Factory* factory = isolate()->factory(); | |
2722 Handle<Map> new_neander_map = | |
2723 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); | |
2724 | |
2725 // Don't use Smi-only elements optimizations for objects with the neander | |
2726 // map. There are too many cases where element values are set directly with a | |
2727 // bottleneck to trap the Smi-only -> fast elements transition, and there | |
2728 // appears to be no benefit for optimize this case. | |
2729 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); | |
2730 set_neander_map(*new_neander_map); | |
2731 | |
2732 Handle<JSObject> listeners = factory->NewNeanderObject(); | |
2733 Handle<FixedArray> elements = factory->NewFixedArray(2); | |
2734 elements->set(0, Smi::FromInt(0)); | |
2735 listeners->set_elements(*elements); | |
2736 set_message_listeners(*listeners); | |
2737 } | |
2738 | |
2739 | |
2740 void Heap::CreateJSEntryStub() { | |
2741 JSEntryStub stub(isolate()); | |
2742 set_js_entry_code(*stub.GetCode()); | |
2743 } | |
2744 | |
2745 | |
2746 void Heap::CreateJSConstructEntryStub() { | |
2747 JSConstructEntryStub stub(isolate()); | |
2748 set_js_construct_entry_code(*stub.GetCode()); | |
2749 } | |
2750 | |
2751 | |
2752 void Heap::CreateFixedStubs() { | |
2753 // Here we create roots for fixed stubs. They are needed at GC | |
2754 // for cooking and uncooking (check out frames.cc). | |
2755 // The eliminates the need for doing dictionary lookup in the | |
2756 // stub cache for these stubs. | |
2757 HandleScope scope(isolate()); | |
2758 | |
2759 // Create stubs that should be there, so we don't unexpectedly have to | |
2760 // create them if we need them during the creation of another stub. | |
2761 // Stub creation mixes raw pointers and handles in an unsafe manner so | |
2762 // we cannot create stubs while we are creating stubs. | |
2763 CodeStub::GenerateStubsAheadOfTime(isolate()); | |
2764 | |
2765 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on | |
2766 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub | |
2767 // is created. | |
2768 | |
2769 // gcc-4.4 has problem generating correct code of following snippet: | |
2770 // { JSEntryStub stub; | |
2771 // js_entry_code_ = *stub.GetCode(); | |
2772 // } | |
2773 // { JSConstructEntryStub stub; | |
2774 // js_construct_entry_code_ = *stub.GetCode(); | |
2775 // } | |
2776 // To workaround the problem, make separate functions without inlining. | |
2777 Heap::CreateJSEntryStub(); | |
2778 Heap::CreateJSConstructEntryStub(); | |
2779 } | |
2780 | |
2781 | |
2782 void Heap::CreateInitialObjects() { | |
2783 HandleScope scope(isolate()); | |
2784 Factory* factory = isolate()->factory(); | |
2785 | |
2786 // The -0 value must be set before NewNumber works. | |
2787 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED)); | |
2788 DCHECK(std::signbit(minus_zero_value()->Number()) != 0); | |
2789 | |
2790 set_nan_value( | |
2791 *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED)); | |
2792 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED)); | |
2793 | |
2794 // The hole has not been created yet, but we want to put something | |
2795 // predictable in the gaps in the string table, so lets make that Smi zero. | |
2796 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); | |
2797 | |
2798 // Allocate initial string table. | |
2799 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize)); | |
2800 | |
2801 // Finish initializing oddballs after creating the string table. | |
2802 Oddball::Initialize(isolate(), | |
2803 factory->undefined_value(), | |
2804 "undefined", | |
2805 factory->nan_value(), | |
2806 Oddball::kUndefined); | |
2807 | |
2808 // Initialize the null_value. | |
2809 Oddball::Initialize(isolate(), | |
2810 factory->null_value(), | |
2811 "null", | |
2812 handle(Smi::FromInt(0), isolate()), | |
2813 Oddball::kNull); | |
2814 | |
2815 set_true_value(*factory->NewOddball(factory->boolean_map(), | |
2816 "true", | |
2817 handle(Smi::FromInt(1), isolate()), | |
2818 Oddball::kTrue)); | |
2819 | |
2820 set_false_value(*factory->NewOddball(factory->boolean_map(), | |
2821 "false", | |
2822 handle(Smi::FromInt(0), isolate()), | |
2823 Oddball::kFalse)); | |
2824 | |
2825 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), | |
2826 "hole", | |
2827 handle(Smi::FromInt(-1), isolate()), | |
2828 Oddball::kTheHole)); | |
2829 | |
2830 set_uninitialized_value( | |
2831 *factory->NewOddball(factory->uninitialized_map(), | |
2832 "uninitialized", | |
2833 handle(Smi::FromInt(-1), isolate()), | |
2834 Oddball::kUninitialized)); | |
2835 | |
2836 set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(), | |
2837 "arguments_marker", | |
2838 handle(Smi::FromInt(-4), isolate()), | |
2839 Oddball::kArgumentMarker)); | |
2840 | |
2841 set_no_interceptor_result_sentinel( | |
2842 *factory->NewOddball(factory->no_interceptor_result_sentinel_map(), | |
2843 "no_interceptor_result_sentinel", | |
2844 handle(Smi::FromInt(-2), isolate()), | |
2845 Oddball::kOther)); | |
2846 | |
2847 set_termination_exception( | |
2848 *factory->NewOddball(factory->termination_exception_map(), | |
2849 "termination_exception", | |
2850 handle(Smi::FromInt(-3), isolate()), | |
2851 Oddball::kOther)); | |
2852 | |
2853 set_exception( | |
2854 *factory->NewOddball(factory->exception_map(), | |
2855 "exception", | |
2856 handle(Smi::FromInt(-5), isolate()), | |
2857 Oddball::kException)); | |
2858 | |
2859 for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) { | |
2860 Handle<String> str = | |
2861 factory->InternalizeUtf8String(constant_string_table[i].contents); | |
2862 roots_[constant_string_table[i].index] = *str; | |
2863 } | |
2864 | |
2865 // Allocate the hidden string which is used to identify the hidden properties | |
2866 // in JSObjects. The hash code has a special value so that it will not match | |
2867 // the empty string when searching for the property. It cannot be part of the | |
2868 // loop above because it needs to be allocated manually with the special | |
2869 // hash code in place. The hash code for the hidden_string is zero to ensure | |
2870 // that it will always be at the first entry in property descriptors. | |
2871 hidden_string_ = *factory->NewOneByteInternalizedString( | |
2872 OneByteVector("", 0), String::kEmptyStringHash); | |
2873 | |
2874 // Create the code_stubs dictionary. The initial size is set to avoid | |
2875 // expanding the dictionary during bootstrapping. | |
2876 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128)); | |
2877 | |
2878 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size | |
2879 // is set to avoid expanding the dictionary during bootstrapping. | |
2880 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64)); | |
2881 | |
2882 set_polymorphic_code_cache(PolymorphicCodeCache::cast( | |
2883 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE))); | |
2884 | |
2885 set_instanceof_cache_function(Smi::FromInt(0)); | |
2886 set_instanceof_cache_map(Smi::FromInt(0)); | |
2887 set_instanceof_cache_answer(Smi::FromInt(0)); | |
2888 | |
2889 CreateFixedStubs(); | |
2890 | |
2891 // Allocate the dictionary of intrinsic function names. | |
2892 Handle<NameDictionary> intrinsic_names = | |
2893 NameDictionary::New(isolate(), Runtime::kNumFunctions); | |
2894 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names); | |
2895 set_intrinsic_function_names(*intrinsic_names); | |
2896 | |
2897 set_number_string_cache(*factory->NewFixedArray( | |
2898 kInitialNumberStringCacheSize * 2, TENURED)); | |
2899 | |
2900 // Allocate cache for single character one byte strings. | |
2901 set_single_character_string_cache(*factory->NewFixedArray( | |
2902 String::kMaxOneByteCharCode + 1, TENURED)); | |
2903 | |
2904 // Allocate cache for string split and regexp-multiple. | |
2905 set_string_split_cache(*factory->NewFixedArray( | |
2906 RegExpResultsCache::kRegExpResultsCacheSize, TENURED)); | |
2907 set_regexp_multiple_cache(*factory->NewFixedArray( | |
2908 RegExpResultsCache::kRegExpResultsCacheSize, TENURED)); | |
2909 | |
2910 // Allocate cache for external strings pointing to native source code. | |
2911 set_natives_source_cache(*factory->NewFixedArray( | |
2912 Natives::GetBuiltinsCount())); | |
2913 | |
2914 set_undefined_cell(*factory->NewCell(factory->undefined_value())); | |
2915 | |
2916 // The symbol registry is initialized lazily. | |
2917 set_symbol_registry(undefined_value()); | |
2918 | |
2919 // Allocate object to hold object observation state. | |
2920 set_observation_state(*factory->NewJSObjectFromMap( | |
2921 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize))); | |
2922 | |
2923 // Microtask queue uses the empty fixed array as a sentinel for "empty". | |
2924 // Number of queued microtasks stored in Isolate::pending_microtask_count(). | |
2925 set_microtask_queue(empty_fixed_array()); | |
2926 | |
2927 set_detailed_stack_trace_symbol(*factory->NewPrivateSymbol()); | |
2928 set_elements_transition_symbol(*factory->NewPrivateSymbol()); | |
2929 set_frozen_symbol(*factory->NewPrivateSymbol()); | |
2930 set_megamorphic_symbol(*factory->NewPrivateSymbol()); | |
2931 set_nonexistent_symbol(*factory->NewPrivateSymbol()); | |
2932 set_normal_ic_symbol(*factory->NewPrivateSymbol()); | |
2933 set_observed_symbol(*factory->NewPrivateSymbol()); | |
2934 set_stack_trace_symbol(*factory->NewPrivateSymbol()); | |
2935 set_uninitialized_symbol(*factory->NewPrivateSymbol()); | |
2936 | |
2937 Handle<SeededNumberDictionary> slow_element_dictionary = | |
2938 SeededNumberDictionary::New(isolate(), 0, TENURED); | |
2939 slow_element_dictionary->set_requires_slow_elements(); | |
2940 set_empty_slow_element_dictionary(*slow_element_dictionary); | |
2941 | |
2942 set_materialized_objects(*factory->NewFixedArray(0, TENURED)); | |
2943 | |
2944 // Handling of script id generation is in Factory::NewScript. | |
2945 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId)); | |
2946 | |
2947 set_allocation_sites_scratchpad(*factory->NewFixedArray( | |
2948 kAllocationSiteScratchpadSize, TENURED)); | |
2949 InitializeAllocationSitesScratchpad(); | |
2950 | |
2951 // Initialize keyed lookup cache. | |
2952 isolate_->keyed_lookup_cache()->Clear(); | |
2953 | |
2954 // Initialize context slot cache. | |
2955 isolate_->context_slot_cache()->Clear(); | |
2956 | |
2957 // Initialize descriptor cache. | |
2958 isolate_->descriptor_lookup_cache()->Clear(); | |
2959 | |
2960 // Initialize compilation cache. | |
2961 isolate_->compilation_cache()->Clear(); | |
2962 } | |
2963 | |
2964 | |
2965 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { | |
2966 RootListIndex writable_roots[] = { | |
2967 kStoreBufferTopRootIndex, | |
2968 kStackLimitRootIndex, | |
2969 kNumberStringCacheRootIndex, | |
2970 kInstanceofCacheFunctionRootIndex, | |
2971 kInstanceofCacheMapRootIndex, | |
2972 kInstanceofCacheAnswerRootIndex, | |
2973 kCodeStubsRootIndex, | |
2974 kNonMonomorphicCacheRootIndex, | |
2975 kPolymorphicCodeCacheRootIndex, | |
2976 kLastScriptIdRootIndex, | |
2977 kEmptyScriptRootIndex, | |
2978 kRealStackLimitRootIndex, | |
2979 kArgumentsAdaptorDeoptPCOffsetRootIndex, | |
2980 kConstructStubDeoptPCOffsetRootIndex, | |
2981 kGetterStubDeoptPCOffsetRootIndex, | |
2982 kSetterStubDeoptPCOffsetRootIndex, | |
2983 kStringTableRootIndex, | |
2984 }; | |
2985 | |
2986 for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) { | |
2987 if (root_index == writable_roots[i]) | |
2988 return true; | |
2989 } | |
2990 return false; | |
2991 } | |
2992 | |
2993 | |
2994 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) { | |
2995 return !RootCanBeWrittenAfterInitialization(root_index) && | |
2996 !InNewSpace(roots_array_start()[root_index]); | |
2997 } | |
2998 | |
2999 | |
3000 Object* RegExpResultsCache::Lookup(Heap* heap, | |
3001 String* key_string, | |
3002 Object* key_pattern, | |
3003 ResultsCacheType type) { | |
3004 FixedArray* cache; | |
3005 if (!key_string->IsInternalizedString()) return Smi::FromInt(0); | |
3006 if (type == STRING_SPLIT_SUBSTRINGS) { | |
3007 DCHECK(key_pattern->IsString()); | |
3008 if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); | |
3009 cache = heap->string_split_cache(); | |
3010 } else { | |
3011 DCHECK(type == REGEXP_MULTIPLE_INDICES); | |
3012 DCHECK(key_pattern->IsFixedArray()); | |
3013 cache = heap->regexp_multiple_cache(); | |
3014 } | |
3015 | |
3016 uint32_t hash = key_string->Hash(); | |
3017 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & | |
3018 ~(kArrayEntriesPerCacheEntry - 1)); | |
3019 if (cache->get(index + kStringOffset) == key_string && | |
3020 cache->get(index + kPatternOffset) == key_pattern) { | |
3021 return cache->get(index + kArrayOffset); | |
3022 } | |
3023 index = | |
3024 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); | |
3025 if (cache->get(index + kStringOffset) == key_string && | |
3026 cache->get(index + kPatternOffset) == key_pattern) { | |
3027 return cache->get(index + kArrayOffset); | |
3028 } | |
3029 return Smi::FromInt(0); | |
3030 } | |
3031 | |
3032 | |
3033 void RegExpResultsCache::Enter(Isolate* isolate, | |
3034 Handle<String> key_string, | |
3035 Handle<Object> key_pattern, | |
3036 Handle<FixedArray> value_array, | |
3037 ResultsCacheType type) { | |
3038 Factory* factory = isolate->factory(); | |
3039 Handle<FixedArray> cache; | |
3040 if (!key_string->IsInternalizedString()) return; | |
3041 if (type == STRING_SPLIT_SUBSTRINGS) { | |
3042 DCHECK(key_pattern->IsString()); | |
3043 if (!key_pattern->IsInternalizedString()) return; | |
3044 cache = factory->string_split_cache(); | |
3045 } else { | |
3046 DCHECK(type == REGEXP_MULTIPLE_INDICES); | |
3047 DCHECK(key_pattern->IsFixedArray()); | |
3048 cache = factory->regexp_multiple_cache(); | |
3049 } | |
3050 | |
3051 uint32_t hash = key_string->Hash(); | |
3052 uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) & | |
3053 ~(kArrayEntriesPerCacheEntry - 1)); | |
3054 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) { | |
3055 cache->set(index + kStringOffset, *key_string); | |
3056 cache->set(index + kPatternOffset, *key_pattern); | |
3057 cache->set(index + kArrayOffset, *value_array); | |
3058 } else { | |
3059 uint32_t index2 = | |
3060 ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1)); | |
3061 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) { | |
3062 cache->set(index2 + kStringOffset, *key_string); | |
3063 cache->set(index2 + kPatternOffset, *key_pattern); | |
3064 cache->set(index2 + kArrayOffset, *value_array); | |
3065 } else { | |
3066 cache->set(index2 + kStringOffset, Smi::FromInt(0)); | |
3067 cache->set(index2 + kPatternOffset, Smi::FromInt(0)); | |
3068 cache->set(index2 + kArrayOffset, Smi::FromInt(0)); | |
3069 cache->set(index + kStringOffset, *key_string); | |
3070 cache->set(index + kPatternOffset, *key_pattern); | |
3071 cache->set(index + kArrayOffset, *value_array); | |
3072 } | |
3073 } | |
3074 // If the array is a reasonably short list of substrings, convert it into a | |
3075 // list of internalized strings. | |
3076 if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) { | |
3077 for (int i = 0; i < value_array->length(); i++) { | |
3078 Handle<String> str(String::cast(value_array->get(i)), isolate); | |
3079 Handle<String> internalized_str = factory->InternalizeString(str); | |
3080 value_array->set(i, *internalized_str); | |
3081 } | |
3082 } | |
3083 // Convert backing store to a copy-on-write array. | |
3084 value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map()); | |
3085 } | |
3086 | |
3087 | |
3088 void RegExpResultsCache::Clear(FixedArray* cache) { | |
3089 for (int i = 0; i < kRegExpResultsCacheSize; i++) { | |
3090 cache->set(i, Smi::FromInt(0)); | |
3091 } | |
3092 } | |
3093 | |
3094 | |
3095 int Heap::FullSizeNumberStringCacheLength() { | |
3096 // Compute the size of the number string cache based on the max newspace size. | |
3097 // The number string cache has a minimum size based on twice the initial cache | |
3098 // size to ensure that it is bigger after being made 'full size'. | |
3099 int number_string_cache_size = max_semi_space_size_ / 512; | |
3100 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2, | |
3101 Min(0x4000, number_string_cache_size)); | |
3102 // There is a string and a number per entry so the length is twice the number | |
3103 // of entries. | |
3104 return number_string_cache_size * 2; | |
3105 } | |
3106 | |
3107 | |
3108 void Heap::FlushNumberStringCache() { | |
3109 // Flush the number to string cache. | |
3110 int len = number_string_cache()->length(); | |
3111 for (int i = 0; i < len; i++) { | |
3112 number_string_cache()->set_undefined(i); | |
3113 } | |
3114 } | |
3115 | |
3116 | |
3117 void Heap::FlushAllocationSitesScratchpad() { | |
3118 for (int i = 0; i < allocation_sites_scratchpad_length_; i++) { | |
3119 allocation_sites_scratchpad()->set_undefined(i); | |
3120 } | |
3121 allocation_sites_scratchpad_length_ = 0; | |
3122 } | |
3123 | |
3124 | |
3125 void Heap::InitializeAllocationSitesScratchpad() { | |
3126 DCHECK(allocation_sites_scratchpad()->length() == | |
3127 kAllocationSiteScratchpadSize); | |
3128 for (int i = 0; i < kAllocationSiteScratchpadSize; i++) { | |
3129 allocation_sites_scratchpad()->set_undefined(i); | |
3130 } | |
3131 } | |
3132 | |
3133 | |
3134 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site, | |
3135 ScratchpadSlotMode mode) { | |
3136 if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) { | |
3137 // We cannot use the normal write-barrier because slots need to be | |
3138 // recorded with non-incremental marking as well. We have to explicitly | |
3139 // record the slot to take evacuation candidates into account. | |
3140 allocation_sites_scratchpad()->set( | |
3141 allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER); | |
3142 Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt( | |
3143 allocation_sites_scratchpad_length_); | |
3144 | |
3145 if (mode == RECORD_SCRATCHPAD_SLOT) { | |
3146 // We need to allow slots buffer overflow here since the evacuation | |
3147 // candidates are not part of the global list of old space pages and | |
3148 // releasing an evacuation candidate due to a slots buffer overflow | |
3149 // results in lost pages. | |
3150 mark_compact_collector()->RecordSlot( | |
3151 slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW); | |
3152 } | |
3153 allocation_sites_scratchpad_length_++; | |
3154 } | |
3155 } | |
3156 | |
3157 | |
3158 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { | |
3159 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); | |
3160 } | |
3161 | |
3162 | |
3163 Heap::RootListIndex Heap::RootIndexForExternalArrayType( | |
3164 ExternalArrayType array_type) { | |
3165 switch (array_type) { | |
3166 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ | |
3167 case kExternal##Type##Array: \ | |
3168 return kExternal##Type##ArrayMapRootIndex; | |
3169 | |
3170 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) | |
3171 #undef ARRAY_TYPE_TO_ROOT_INDEX | |
3172 | |
3173 default: | |
3174 UNREACHABLE(); | |
3175 return kUndefinedValueRootIndex; | |
3176 } | |
3177 } | |
3178 | |
3179 | |
3180 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { | |
3181 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); | |
3182 } | |
3183 | |
3184 | |
3185 Heap::RootListIndex Heap::RootIndexForFixedTypedArray( | |
3186 ExternalArrayType array_type) { | |
3187 switch (array_type) { | |
3188 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ | |
3189 case kExternal##Type##Array: \ | |
3190 return kFixed##Type##ArrayMapRootIndex; | |
3191 | |
3192 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) | |
3193 #undef ARRAY_TYPE_TO_ROOT_INDEX | |
3194 | |
3195 default: | |
3196 UNREACHABLE(); | |
3197 return kUndefinedValueRootIndex; | |
3198 } | |
3199 } | |
3200 | |
3201 | |
3202 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray( | |
3203 ElementsKind elementsKind) { | |
3204 switch (elementsKind) { | |
3205 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ | |
3206 case EXTERNAL_##TYPE##_ELEMENTS: \ | |
3207 return kEmptyExternal##Type##ArrayRootIndex; | |
3208 | |
3209 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) | |
3210 #undef ELEMENT_KIND_TO_ROOT_INDEX | |
3211 | |
3212 default: | |
3213 UNREACHABLE(); | |
3214 return kUndefinedValueRootIndex; | |
3215 } | |
3216 } | |
3217 | |
3218 | |
3219 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray( | |
3220 ElementsKind elementsKind) { | |
3221 switch (elementsKind) { | |
3222 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ | |
3223 case TYPE##_ELEMENTS: \ | |
3224 return kEmptyFixed##Type##ArrayRootIndex; | |
3225 | |
3226 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) | |
3227 #undef ELEMENT_KIND_TO_ROOT_INDEX | |
3228 default: | |
3229 UNREACHABLE(); | |
3230 return kUndefinedValueRootIndex; | |
3231 } | |
3232 } | |
3233 | |
3234 | |
3235 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) { | |
3236 return ExternalArray::cast( | |
3237 roots_[RootIndexForEmptyExternalArray(map->elements_kind())]); | |
3238 } | |
3239 | |
3240 | |
3241 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) { | |
3242 return FixedTypedArrayBase::cast( | |
3243 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]); | |
3244 } | |
3245 | |
3246 | |
3247 AllocationResult Heap::AllocateForeign(Address address, | |
3248 PretenureFlag pretenure) { | |
3249 // Statically ensure that it is safe to allocate foreigns in paged spaces. | |
3250 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize); | |
3251 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; | |
3252 Foreign* result; | |
3253 AllocationResult allocation = Allocate(foreign_map(), space); | |
3254 if (!allocation.To(&result)) return allocation; | |
3255 result->set_foreign_address(address); | |
3256 return result; | |
3257 } | |
3258 | |
3259 | |
3260 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) { | |
3261 if (length < 0 || length > ByteArray::kMaxLength) { | |
3262 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); | |
3263 } | |
3264 int size = ByteArray::SizeFor(length); | |
3265 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
3266 HeapObject* result; | |
3267 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3268 if (!allocation.To(&result)) return allocation; | |
3269 } | |
3270 | |
3271 result->set_map_no_write_barrier(byte_array_map()); | |
3272 ByteArray::cast(result)->set_length(length); | |
3273 return result; | |
3274 } | |
3275 | |
3276 | |
3277 void Heap::CreateFillerObjectAt(Address addr, int size) { | |
3278 if (size == 0) return; | |
3279 HeapObject* filler = HeapObject::FromAddress(addr); | |
3280 if (size == kPointerSize) { | |
3281 filler->set_map_no_write_barrier(one_pointer_filler_map()); | |
3282 } else if (size == 2 * kPointerSize) { | |
3283 filler->set_map_no_write_barrier(two_pointer_filler_map()); | |
3284 } else { | |
3285 filler->set_map_no_write_barrier(free_space_map()); | |
3286 FreeSpace::cast(filler)->set_size(size); | |
3287 } | |
3288 } | |
3289 | |
3290 | |
3291 bool Heap::CanMoveObjectStart(HeapObject* object) { | |
3292 Address address = object->address(); | |
3293 bool is_in_old_pointer_space = InOldPointerSpace(address); | |
3294 bool is_in_old_data_space = InOldDataSpace(address); | |
3295 | |
3296 if (lo_space()->Contains(object)) return false; | |
3297 | |
3298 Page* page = Page::FromAddress(address); | |
3299 // We can move the object start if: | |
3300 // (1) the object is not in old pointer or old data space, | |
3301 // (2) the page of the object was already swept, | |
3302 // (3) the page was already concurrently swept. This case is an optimization | |
3303 // for concurrent sweeping. The WasSwept predicate for concurrently swept | |
3304 // pages is set after sweeping all pages. | |
3305 return (!is_in_old_pointer_space && !is_in_old_data_space) || | |
3306 page->WasSwept() || page->SweepingCompleted(); | |
3307 } | |
3308 | |
3309 | |
3310 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) { | |
3311 if (incremental_marking()->IsMarking() && | |
3312 Marking::IsBlack(Marking::MarkBitFrom(address))) { | |
3313 if (mode == FROM_GC) { | |
3314 MemoryChunk::IncrementLiveBytesFromGC(address, by); | |
3315 } else { | |
3316 MemoryChunk::IncrementLiveBytesFromMutator(address, by); | |
3317 } | |
3318 } | |
3319 } | |
3320 | |
3321 | |
3322 AllocationResult Heap::AllocateExternalArray(int length, | |
3323 ExternalArrayType array_type, | |
3324 void* external_pointer, | |
3325 PretenureFlag pretenure) { | |
3326 int size = ExternalArray::kAlignedSize; | |
3327 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
3328 HeapObject* result; | |
3329 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3330 if (!allocation.To(&result)) return allocation; | |
3331 } | |
3332 | |
3333 result->set_map_no_write_barrier( | |
3334 MapForExternalArrayType(array_type)); | |
3335 ExternalArray::cast(result)->set_length(length); | |
3336 ExternalArray::cast(result)->set_external_pointer(external_pointer); | |
3337 return result; | |
3338 } | |
3339 | |
3340 static void ForFixedTypedArray(ExternalArrayType array_type, | |
3341 int* element_size, | |
3342 ElementsKind* element_kind) { | |
3343 switch (array_type) { | |
3344 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \ | |
3345 case kExternal##Type##Array: \ | |
3346 *element_size = size; \ | |
3347 *element_kind = TYPE##_ELEMENTS; \ | |
3348 return; | |
3349 | |
3350 TYPED_ARRAYS(TYPED_ARRAY_CASE) | |
3351 #undef TYPED_ARRAY_CASE | |
3352 | |
3353 default: | |
3354 *element_size = 0; // Bogus | |
3355 *element_kind = UINT8_ELEMENTS; // Bogus | |
3356 UNREACHABLE(); | |
3357 } | |
3358 } | |
3359 | |
3360 | |
3361 AllocationResult Heap::AllocateFixedTypedArray(int length, | |
3362 ExternalArrayType array_type, | |
3363 PretenureFlag pretenure) { | |
3364 int element_size; | |
3365 ElementsKind elements_kind; | |
3366 ForFixedTypedArray(array_type, &element_size, &elements_kind); | |
3367 int size = OBJECT_POINTER_ALIGN( | |
3368 length * element_size + FixedTypedArrayBase::kDataOffset); | |
3369 #ifndef V8_HOST_ARCH_64_BIT | |
3370 if (array_type == kExternalFloat64Array) { | |
3371 size += kPointerSize; | |
3372 } | |
3373 #endif | |
3374 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
3375 | |
3376 HeapObject* object; | |
3377 AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3378 if (!allocation.To(&object)) return allocation; | |
3379 | |
3380 if (array_type == kExternalFloat64Array) { | |
3381 object = EnsureDoubleAligned(this, object, size); | |
3382 } | |
3383 | |
3384 object->set_map(MapForFixedTypedArray(array_type)); | |
3385 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object); | |
3386 elements->set_length(length); | |
3387 memset(elements->DataPtr(), 0, elements->DataSize()); | |
3388 return elements; | |
3389 } | |
3390 | |
3391 | |
3392 AllocationResult Heap::AllocateCode(int object_size, bool immovable) { | |
3393 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment)); | |
3394 AllocationResult allocation = | |
3395 AllocateRaw(object_size, CODE_SPACE, CODE_SPACE); | |
3396 | |
3397 HeapObject* result; | |
3398 if (!allocation.To(&result)) return allocation; | |
3399 | |
3400 if (immovable) { | |
3401 Address address = result->address(); | |
3402 // Code objects which should stay at a fixed address are allocated either | |
3403 // in the first page of code space (objects on the first page of each space | |
3404 // are never moved) or in large object space. | |
3405 if (!code_space_->FirstPage()->Contains(address) && | |
3406 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) { | |
3407 // Discard the first code allocation, which was on a page where it could | |
3408 // be moved. | |
3409 CreateFillerObjectAt(result->address(), object_size); | |
3410 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE); | |
3411 if (!allocation.To(&result)) return allocation; | |
3412 OnAllocationEvent(result, object_size); | |
3413 } | |
3414 } | |
3415 | |
3416 result->set_map_no_write_barrier(code_map()); | |
3417 Code* code = Code::cast(result); | |
3418 DCHECK(isolate_->code_range() == NULL || | |
3419 !isolate_->code_range()->valid() || | |
3420 isolate_->code_range()->contains(code->address())); | |
3421 code->set_gc_metadata(Smi::FromInt(0)); | |
3422 code->set_ic_age(global_ic_age_); | |
3423 return code; | |
3424 } | |
3425 | |
3426 | |
3427 AllocationResult Heap::CopyCode(Code* code) { | |
3428 AllocationResult allocation; | |
3429 HeapObject* new_constant_pool; | |
3430 if (FLAG_enable_ool_constant_pool && | |
3431 code->constant_pool() != empty_constant_pool_array()) { | |
3432 // Copy the constant pool, since edits to the copied code may modify | |
3433 // the constant pool. | |
3434 allocation = CopyConstantPoolArray(code->constant_pool()); | |
3435 if (!allocation.To(&new_constant_pool)) return allocation; | |
3436 } else { | |
3437 new_constant_pool = empty_constant_pool_array(); | |
3438 } | |
3439 | |
3440 HeapObject* result; | |
3441 // Allocate an object the same size as the code object. | |
3442 int obj_size = code->Size(); | |
3443 allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE); | |
3444 if (!allocation.To(&result)) return allocation; | |
3445 | |
3446 // Copy code object. | |
3447 Address old_addr = code->address(); | |
3448 Address new_addr = result->address(); | |
3449 CopyBlock(new_addr, old_addr, obj_size); | |
3450 Code* new_code = Code::cast(result); | |
3451 | |
3452 // Update the constant pool. | |
3453 new_code->set_constant_pool(new_constant_pool); | |
3454 | |
3455 // Relocate the copy. | |
3456 DCHECK(isolate_->code_range() == NULL || | |
3457 !isolate_->code_range()->valid() || | |
3458 isolate_->code_range()->contains(code->address())); | |
3459 new_code->Relocate(new_addr - old_addr); | |
3460 return new_code; | |
3461 } | |
3462 | |
3463 | |
3464 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) { | |
3465 // Allocate ByteArray and ConstantPoolArray before the Code object, so that we | |
3466 // do not risk leaving uninitialized Code object (and breaking the heap). | |
3467 ByteArray* reloc_info_array; | |
3468 { AllocationResult allocation = | |
3469 AllocateByteArray(reloc_info.length(), TENURED); | |
3470 if (!allocation.To(&reloc_info_array)) return allocation; | |
3471 } | |
3472 HeapObject* new_constant_pool; | |
3473 if (FLAG_enable_ool_constant_pool && | |
3474 code->constant_pool() != empty_constant_pool_array()) { | |
3475 // Copy the constant pool, since edits to the copied code may modify | |
3476 // the constant pool. | |
3477 AllocationResult allocation = | |
3478 CopyConstantPoolArray(code->constant_pool()); | |
3479 if (!allocation.To(&new_constant_pool)) return allocation; | |
3480 } else { | |
3481 new_constant_pool = empty_constant_pool_array(); | |
3482 } | |
3483 | |
3484 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment); | |
3485 | |
3486 int new_obj_size = Code::SizeFor(new_body_size); | |
3487 | |
3488 Address old_addr = code->address(); | |
3489 | |
3490 size_t relocation_offset = | |
3491 static_cast<size_t>(code->instruction_end() - old_addr); | |
3492 | |
3493 HeapObject* result; | |
3494 AllocationResult allocation = | |
3495 AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE); | |
3496 if (!allocation.To(&result)) return allocation; | |
3497 | |
3498 // Copy code object. | |
3499 Address new_addr = result->address(); | |
3500 | |
3501 // Copy header and instructions. | |
3502 CopyBytes(new_addr, old_addr, relocation_offset); | |
3503 | |
3504 Code* new_code = Code::cast(result); | |
3505 new_code->set_relocation_info(reloc_info_array); | |
3506 | |
3507 // Update constant pool. | |
3508 new_code->set_constant_pool(new_constant_pool); | |
3509 | |
3510 // Copy patched rinfo. | |
3511 CopyBytes(new_code->relocation_start(), | |
3512 reloc_info.start(), | |
3513 static_cast<size_t>(reloc_info.length())); | |
3514 | |
3515 // Relocate the copy. | |
3516 DCHECK(isolate_->code_range() == NULL || | |
3517 !isolate_->code_range()->valid() || | |
3518 isolate_->code_range()->contains(code->address())); | |
3519 new_code->Relocate(new_addr - old_addr); | |
3520 | |
3521 #ifdef VERIFY_HEAP | |
3522 if (FLAG_verify_heap) code->ObjectVerify(); | |
3523 #endif | |
3524 return new_code; | |
3525 } | |
3526 | |
3527 | |
3528 void Heap::InitializeAllocationMemento(AllocationMemento* memento, | |
3529 AllocationSite* allocation_site) { | |
3530 memento->set_map_no_write_barrier(allocation_memento_map()); | |
3531 DCHECK(allocation_site->map() == allocation_site_map()); | |
3532 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER); | |
3533 if (FLAG_allocation_site_pretenuring) { | |
3534 allocation_site->IncrementMementoCreateCount(); | |
3535 } | |
3536 } | |
3537 | |
3538 | |
3539 AllocationResult Heap::Allocate(Map* map, AllocationSpace space, | |
3540 AllocationSite* allocation_site) { | |
3541 DCHECK(gc_state_ == NOT_IN_GC); | |
3542 DCHECK(map->instance_type() != MAP_TYPE); | |
3543 // If allocation failures are disallowed, we may allocate in a different | |
3544 // space when new space is full and the object is not a large object. | |
3545 AllocationSpace retry_space = | |
3546 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); | |
3547 int size = map->instance_size(); | |
3548 if (allocation_site != NULL) { | |
3549 size += AllocationMemento::kSize; | |
3550 } | |
3551 HeapObject* result; | |
3552 AllocationResult allocation = AllocateRaw(size, space, retry_space); | |
3553 if (!allocation.To(&result)) return allocation; | |
3554 // No need for write barrier since object is white and map is in old space. | |
3555 result->set_map_no_write_barrier(map); | |
3556 if (allocation_site != NULL) { | |
3557 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( | |
3558 reinterpret_cast<Address>(result) + map->instance_size()); | |
3559 InitializeAllocationMemento(alloc_memento, allocation_site); | |
3560 } | |
3561 return result; | |
3562 } | |
3563 | |
3564 | |
3565 void Heap::InitializeJSObjectFromMap(JSObject* obj, | |
3566 FixedArray* properties, | |
3567 Map* map) { | |
3568 obj->set_properties(properties); | |
3569 obj->initialize_elements(); | |
3570 // TODO(1240798): Initialize the object's body using valid initial values | |
3571 // according to the object's initial map. For example, if the map's | |
3572 // instance type is JS_ARRAY_TYPE, the length field should be initialized | |
3573 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a | |
3574 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object | |
3575 // verification code has to cope with (temporarily) invalid objects. See | |
3576 // for example, JSArray::JSArrayVerify). | |
3577 Object* filler; | |
3578 // We cannot always fill with one_pointer_filler_map because objects | |
3579 // created from API functions expect their internal fields to be initialized | |
3580 // with undefined_value. | |
3581 // Pre-allocated fields need to be initialized with undefined_value as well | |
3582 // so that object accesses before the constructor completes (e.g. in the | |
3583 // debugger) will not cause a crash. | |
3584 if (map->constructor()->IsJSFunction() && | |
3585 JSFunction::cast(map->constructor())-> | |
3586 IsInobjectSlackTrackingInProgress()) { | |
3587 // We might want to shrink the object later. | |
3588 DCHECK(obj->GetInternalFieldCount() == 0); | |
3589 filler = Heap::one_pointer_filler_map(); | |
3590 } else { | |
3591 filler = Heap::undefined_value(); | |
3592 } | |
3593 obj->InitializeBody(map, Heap::undefined_value(), filler); | |
3594 } | |
3595 | |
3596 | |
3597 AllocationResult Heap::AllocateJSObjectFromMap( | |
3598 Map* map, | |
3599 PretenureFlag pretenure, | |
3600 bool allocate_properties, | |
3601 AllocationSite* allocation_site) { | |
3602 // JSFunctions should be allocated using AllocateFunction to be | |
3603 // properly initialized. | |
3604 DCHECK(map->instance_type() != JS_FUNCTION_TYPE); | |
3605 | |
3606 // Both types of global objects should be allocated using | |
3607 // AllocateGlobalObject to be properly initialized. | |
3608 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); | |
3609 DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); | |
3610 | |
3611 // Allocate the backing storage for the properties. | |
3612 FixedArray* properties; | |
3613 if (allocate_properties) { | |
3614 int prop_size = map->InitialPropertiesLength(); | |
3615 DCHECK(prop_size >= 0); | |
3616 { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure); | |
3617 if (!allocation.To(&properties)) return allocation; | |
3618 } | |
3619 } else { | |
3620 properties = empty_fixed_array(); | |
3621 } | |
3622 | |
3623 // Allocate the JSObject. | |
3624 int size = map->instance_size(); | |
3625 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); | |
3626 JSObject* js_obj; | |
3627 AllocationResult allocation = Allocate(map, space, allocation_site); | |
3628 if (!allocation.To(&js_obj)) return allocation; | |
3629 | |
3630 // Initialize the JSObject. | |
3631 InitializeJSObjectFromMap(js_obj, properties, map); | |
3632 DCHECK(js_obj->HasFastElements() || | |
3633 js_obj->HasExternalArrayElements() || | |
3634 js_obj->HasFixedTypedArrayElements()); | |
3635 return js_obj; | |
3636 } | |
3637 | |
3638 | |
3639 AllocationResult Heap::AllocateJSObject(JSFunction* constructor, | |
3640 PretenureFlag pretenure, | |
3641 AllocationSite* allocation_site) { | |
3642 DCHECK(constructor->has_initial_map()); | |
3643 | |
3644 // Allocate the object based on the constructors initial map. | |
3645 AllocationResult allocation = AllocateJSObjectFromMap( | |
3646 constructor->initial_map(), pretenure, true, allocation_site); | |
3647 #ifdef DEBUG | |
3648 // Make sure result is NOT a global object if valid. | |
3649 HeapObject* obj; | |
3650 DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject()); | |
3651 #endif | |
3652 return allocation; | |
3653 } | |
3654 | |
3655 | |
3656 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) { | |
3657 // Never used to copy functions. If functions need to be copied we | |
3658 // have to be careful to clear the literals array. | |
3659 SLOW_DCHECK(!source->IsJSFunction()); | |
3660 | |
3661 // Make the clone. | |
3662 Map* map = source->map(); | |
3663 int object_size = map->instance_size(); | |
3664 HeapObject* clone; | |
3665 | |
3666 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type())); | |
3667 | |
3668 WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; | |
3669 | |
3670 // If we're forced to always allocate, we use the general allocation | |
3671 // functions which may leave us with an object in old space. | |
3672 if (always_allocate()) { | |
3673 { AllocationResult allocation = | |
3674 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); | |
3675 if (!allocation.To(&clone)) return allocation; | |
3676 } | |
3677 Address clone_address = clone->address(); | |
3678 CopyBlock(clone_address, | |
3679 source->address(), | |
3680 object_size); | |
3681 // Update write barrier for all fields that lie beyond the header. | |
3682 RecordWrites(clone_address, | |
3683 JSObject::kHeaderSize, | |
3684 (object_size - JSObject::kHeaderSize) / kPointerSize); | |
3685 } else { | |
3686 wb_mode = SKIP_WRITE_BARRIER; | |
3687 | |
3688 { int adjusted_object_size = site != NULL | |
3689 ? object_size + AllocationMemento::kSize | |
3690 : object_size; | |
3691 AllocationResult allocation = | |
3692 AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE); | |
3693 if (!allocation.To(&clone)) return allocation; | |
3694 } | |
3695 SLOW_DCHECK(InNewSpace(clone)); | |
3696 // Since we know the clone is allocated in new space, we can copy | |
3697 // the contents without worrying about updating the write barrier. | |
3698 CopyBlock(clone->address(), | |
3699 source->address(), | |
3700 object_size); | |
3701 | |
3702 if (site != NULL) { | |
3703 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( | |
3704 reinterpret_cast<Address>(clone) + object_size); | |
3705 InitializeAllocationMemento(alloc_memento, site); | |
3706 } | |
3707 } | |
3708 | |
3709 SLOW_DCHECK( | |
3710 JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); | |
3711 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); | |
3712 FixedArray* properties = FixedArray::cast(source->properties()); | |
3713 // Update elements if necessary. | |
3714 if (elements->length() > 0) { | |
3715 FixedArrayBase* elem; | |
3716 { AllocationResult allocation; | |
3717 if (elements->map() == fixed_cow_array_map()) { | |
3718 allocation = FixedArray::cast(elements); | |
3719 } else if (source->HasFastDoubleElements()) { | |
3720 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); | |
3721 } else { | |
3722 allocation = CopyFixedArray(FixedArray::cast(elements)); | |
3723 } | |
3724 if (!allocation.To(&elem)) return allocation; | |
3725 } | |
3726 JSObject::cast(clone)->set_elements(elem, wb_mode); | |
3727 } | |
3728 // Update properties if necessary. | |
3729 if (properties->length() > 0) { | |
3730 FixedArray* prop; | |
3731 { AllocationResult allocation = CopyFixedArray(properties); | |
3732 if (!allocation.To(&prop)) return allocation; | |
3733 } | |
3734 JSObject::cast(clone)->set_properties(prop, wb_mode); | |
3735 } | |
3736 // Return the new clone. | |
3737 return clone; | |
3738 } | |
3739 | |
3740 | |
3741 static inline void WriteOneByteData(Vector<const char> vector, | |
3742 uint8_t* chars, | |
3743 int len) { | |
3744 // Only works for ascii. | |
3745 DCHECK(vector.length() == len); | |
3746 MemCopy(chars, vector.start(), len); | |
3747 } | |
3748 | |
3749 static inline void WriteTwoByteData(Vector<const char> vector, | |
3750 uint16_t* chars, | |
3751 int len) { | |
3752 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); | |
3753 unsigned stream_length = vector.length(); | |
3754 while (stream_length != 0) { | |
3755 unsigned consumed = 0; | |
3756 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); | |
3757 DCHECK(c != unibrow::Utf8::kBadChar); | |
3758 DCHECK(consumed <= stream_length); | |
3759 stream_length -= consumed; | |
3760 stream += consumed; | |
3761 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { | |
3762 len -= 2; | |
3763 if (len < 0) break; | |
3764 *chars++ = unibrow::Utf16::LeadSurrogate(c); | |
3765 *chars++ = unibrow::Utf16::TrailSurrogate(c); | |
3766 } else { | |
3767 len -= 1; | |
3768 if (len < 0) break; | |
3769 *chars++ = c; | |
3770 } | |
3771 } | |
3772 DCHECK(stream_length == 0); | |
3773 DCHECK(len == 0); | |
3774 } | |
3775 | |
3776 | |
3777 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { | |
3778 DCHECK(s->length() == len); | |
3779 String::WriteToFlat(s, chars, 0, len); | |
3780 } | |
3781 | |
3782 | |
3783 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { | |
3784 DCHECK(s->length() == len); | |
3785 String::WriteToFlat(s, chars, 0, len); | |
3786 } | |
3787 | |
3788 | |
3789 template<bool is_one_byte, typename T> | |
3790 AllocationResult Heap::AllocateInternalizedStringImpl( | |
3791 T t, int chars, uint32_t hash_field) { | |
3792 DCHECK(chars >= 0); | |
3793 // Compute map and object size. | |
3794 int size; | |
3795 Map* map; | |
3796 | |
3797 DCHECK_LE(0, chars); | |
3798 DCHECK_GE(String::kMaxLength, chars); | |
3799 if (is_one_byte) { | |
3800 map = ascii_internalized_string_map(); | |
3801 size = SeqOneByteString::SizeFor(chars); | |
3802 } else { | |
3803 map = internalized_string_map(); | |
3804 size = SeqTwoByteString::SizeFor(chars); | |
3805 } | |
3806 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); | |
3807 | |
3808 // Allocate string. | |
3809 HeapObject* result; | |
3810 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3811 if (!allocation.To(&result)) return allocation; | |
3812 } | |
3813 | |
3814 result->set_map_no_write_barrier(map); | |
3815 // Set length and hash fields of the allocated string. | |
3816 String* answer = String::cast(result); | |
3817 answer->set_length(chars); | |
3818 answer->set_hash_field(hash_field); | |
3819 | |
3820 DCHECK_EQ(size, answer->Size()); | |
3821 | |
3822 if (is_one_byte) { | |
3823 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); | |
3824 } else { | |
3825 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); | |
3826 } | |
3827 return answer; | |
3828 } | |
3829 | |
3830 | |
3831 // Need explicit instantiations. | |
3832 template | |
3833 AllocationResult Heap::AllocateInternalizedStringImpl<true>( | |
3834 String*, int, uint32_t); | |
3835 template | |
3836 AllocationResult Heap::AllocateInternalizedStringImpl<false>( | |
3837 String*, int, uint32_t); | |
3838 template | |
3839 AllocationResult Heap::AllocateInternalizedStringImpl<false>( | |
3840 Vector<const char>, int, uint32_t); | |
3841 | |
3842 | |
3843 AllocationResult Heap::AllocateRawOneByteString(int length, | |
3844 PretenureFlag pretenure) { | |
3845 DCHECK_LE(0, length); | |
3846 DCHECK_GE(String::kMaxLength, length); | |
3847 int size = SeqOneByteString::SizeFor(length); | |
3848 DCHECK(size <= SeqOneByteString::kMaxSize); | |
3849 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
3850 | |
3851 HeapObject* result; | |
3852 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3853 if (!allocation.To(&result)) return allocation; | |
3854 } | |
3855 | |
3856 // Partially initialize the object. | |
3857 result->set_map_no_write_barrier(ascii_string_map()); | |
3858 String::cast(result)->set_length(length); | |
3859 String::cast(result)->set_hash_field(String::kEmptyHashField); | |
3860 DCHECK_EQ(size, HeapObject::cast(result)->Size()); | |
3861 | |
3862 return result; | |
3863 } | |
3864 | |
3865 | |
3866 AllocationResult Heap::AllocateRawTwoByteString(int length, | |
3867 PretenureFlag pretenure) { | |
3868 DCHECK_LE(0, length); | |
3869 DCHECK_GE(String::kMaxLength, length); | |
3870 int size = SeqTwoByteString::SizeFor(length); | |
3871 DCHECK(size <= SeqTwoByteString::kMaxSize); | |
3872 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
3873 | |
3874 HeapObject* result; | |
3875 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
3876 if (!allocation.To(&result)) return allocation; | |
3877 } | |
3878 | |
3879 // Partially initialize the object. | |
3880 result->set_map_no_write_barrier(string_map()); | |
3881 String::cast(result)->set_length(length); | |
3882 String::cast(result)->set_hash_field(String::kEmptyHashField); | |
3883 DCHECK_EQ(size, HeapObject::cast(result)->Size()); | |
3884 return result; | |
3885 } | |
3886 | |
3887 | |
3888 AllocationResult Heap::AllocateEmptyFixedArray() { | |
3889 int size = FixedArray::SizeFor(0); | |
3890 HeapObject* result; | |
3891 { AllocationResult allocation = | |
3892 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); | |
3893 if (!allocation.To(&result)) return allocation; | |
3894 } | |
3895 // Initialize the object. | |
3896 result->set_map_no_write_barrier(fixed_array_map()); | |
3897 FixedArray::cast(result)->set_length(0); | |
3898 return result; | |
3899 } | |
3900 | |
3901 | |
3902 AllocationResult Heap::AllocateEmptyExternalArray( | |
3903 ExternalArrayType array_type) { | |
3904 return AllocateExternalArray(0, array_type, NULL, TENURED); | |
3905 } | |
3906 | |
3907 | |
3908 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) { | |
3909 if (!InNewSpace(src)) { | |
3910 return src; | |
3911 } | |
3912 | |
3913 int len = src->length(); | |
3914 HeapObject* obj; | |
3915 { AllocationResult allocation = AllocateRawFixedArray(len, TENURED); | |
3916 if (!allocation.To(&obj)) return allocation; | |
3917 } | |
3918 obj->set_map_no_write_barrier(fixed_array_map()); | |
3919 FixedArray* result = FixedArray::cast(obj); | |
3920 result->set_length(len); | |
3921 | |
3922 // Copy the content | |
3923 DisallowHeapAllocation no_gc; | |
3924 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); | |
3925 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); | |
3926 | |
3927 // TODO(mvstanton): The map is set twice because of protection against calling | |
3928 // set() on a COW FixedArray. Issue v8:3221 created to track this, and | |
3929 // we might then be able to remove this whole method. | |
3930 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map()); | |
3931 return result; | |
3932 } | |
3933 | |
3934 | |
3935 AllocationResult Heap::AllocateEmptyFixedTypedArray( | |
3936 ExternalArrayType array_type) { | |
3937 return AllocateFixedTypedArray(0, array_type, TENURED); | |
3938 } | |
3939 | |
3940 | |
3941 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { | |
3942 int len = src->length(); | |
3943 HeapObject* obj; | |
3944 { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED); | |
3945 if (!allocation.To(&obj)) return allocation; | |
3946 } | |
3947 if (InNewSpace(obj)) { | |
3948 obj->set_map_no_write_barrier(map); | |
3949 CopyBlock(obj->address() + kPointerSize, | |
3950 src->address() + kPointerSize, | |
3951 FixedArray::SizeFor(len) - kPointerSize); | |
3952 return obj; | |
3953 } | |
3954 obj->set_map_no_write_barrier(map); | |
3955 FixedArray* result = FixedArray::cast(obj); | |
3956 result->set_length(len); | |
3957 | |
3958 // Copy the content | |
3959 DisallowHeapAllocation no_gc; | |
3960 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc); | |
3961 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode); | |
3962 return result; | |
3963 } | |
3964 | |
3965 | |
3966 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, | |
3967 Map* map) { | |
3968 int len = src->length(); | |
3969 HeapObject* obj; | |
3970 { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED); | |
3971 if (!allocation.To(&obj)) return allocation; | |
3972 } | |
3973 obj->set_map_no_write_barrier(map); | |
3974 CopyBlock( | |
3975 obj->address() + FixedDoubleArray::kLengthOffset, | |
3976 src->address() + FixedDoubleArray::kLengthOffset, | |
3977 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset); | |
3978 return obj; | |
3979 } | |
3980 | |
3981 | |
3982 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src, | |
3983 Map* map) { | |
3984 HeapObject* obj; | |
3985 if (src->is_extended_layout()) { | |
3986 ConstantPoolArray::NumberOfEntries small(src, | |
3987 ConstantPoolArray::SMALL_SECTION); | |
3988 ConstantPoolArray::NumberOfEntries extended(src, | |
3989 ConstantPoolArray::EXTENDED_SECTION); | |
3990 AllocationResult allocation = | |
3991 AllocateExtendedConstantPoolArray(small, extended); | |
3992 if (!allocation.To(&obj)) return allocation; | |
3993 } else { | |
3994 ConstantPoolArray::NumberOfEntries small(src, | |
3995 ConstantPoolArray::SMALL_SECTION); | |
3996 AllocationResult allocation = AllocateConstantPoolArray(small); | |
3997 if (!allocation.To(&obj)) return allocation; | |
3998 } | |
3999 obj->set_map_no_write_barrier(map); | |
4000 CopyBlock( | |
4001 obj->address() + ConstantPoolArray::kFirstEntryOffset, | |
4002 src->address() + ConstantPoolArray::kFirstEntryOffset, | |
4003 src->size() - ConstantPoolArray::kFirstEntryOffset); | |
4004 return obj; | |
4005 } | |
4006 | |
4007 | |
4008 AllocationResult Heap::AllocateRawFixedArray(int length, | |
4009 PretenureFlag pretenure) { | |
4010 if (length < 0 || length > FixedArray::kMaxLength) { | |
4011 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); | |
4012 } | |
4013 int size = FixedArray::SizeFor(length); | |
4014 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); | |
4015 | |
4016 return AllocateRaw(size, space, OLD_POINTER_SPACE); | |
4017 } | |
4018 | |
4019 | |
4020 AllocationResult Heap::AllocateFixedArrayWithFiller(int length, | |
4021 PretenureFlag pretenure, | |
4022 Object* filler) { | |
4023 DCHECK(length >= 0); | |
4024 DCHECK(empty_fixed_array()->IsFixedArray()); | |
4025 if (length == 0) return empty_fixed_array(); | |
4026 | |
4027 DCHECK(!InNewSpace(filler)); | |
4028 HeapObject* result; | |
4029 { AllocationResult allocation = AllocateRawFixedArray(length, pretenure); | |
4030 if (!allocation.To(&result)) return allocation; | |
4031 } | |
4032 | |
4033 result->set_map_no_write_barrier(fixed_array_map()); | |
4034 FixedArray* array = FixedArray::cast(result); | |
4035 array->set_length(length); | |
4036 MemsetPointer(array->data_start(), filler, length); | |
4037 return array; | |
4038 } | |
4039 | |
4040 | |
4041 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { | |
4042 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value()); | |
4043 } | |
4044 | |
4045 | |
4046 AllocationResult Heap::AllocateUninitializedFixedArray(int length) { | |
4047 if (length == 0) return empty_fixed_array(); | |
4048 | |
4049 HeapObject* obj; | |
4050 { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED); | |
4051 if (!allocation.To(&obj)) return allocation; | |
4052 } | |
4053 | |
4054 obj->set_map_no_write_barrier(fixed_array_map()); | |
4055 FixedArray::cast(obj)->set_length(length); | |
4056 return obj; | |
4057 } | |
4058 | |
4059 | |
4060 AllocationResult Heap::AllocateUninitializedFixedDoubleArray( | |
4061 int length, | |
4062 PretenureFlag pretenure) { | |
4063 if (length == 0) return empty_fixed_array(); | |
4064 | |
4065 HeapObject* elements; | |
4066 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure); | |
4067 if (!allocation.To(&elements)) return allocation; | |
4068 | |
4069 elements->set_map_no_write_barrier(fixed_double_array_map()); | |
4070 FixedDoubleArray::cast(elements)->set_length(length); | |
4071 return elements; | |
4072 } | |
4073 | |
4074 | |
4075 AllocationResult Heap::AllocateRawFixedDoubleArray(int length, | |
4076 PretenureFlag pretenure) { | |
4077 if (length < 0 || length > FixedDoubleArray::kMaxLength) { | |
4078 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); | |
4079 } | |
4080 int size = FixedDoubleArray::SizeFor(length); | |
4081 #ifndef V8_HOST_ARCH_64_BIT | |
4082 size += kPointerSize; | |
4083 #endif | |
4084 AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); | |
4085 | |
4086 HeapObject* object; | |
4087 { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE); | |
4088 if (!allocation.To(&object)) return allocation; | |
4089 } | |
4090 | |
4091 return EnsureDoubleAligned(this, object, size); | |
4092 } | |
4093 | |
4094 | |
4095 AllocationResult Heap::AllocateConstantPoolArray( | |
4096 const ConstantPoolArray::NumberOfEntries& small) { | |
4097 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType)); | |
4098 int size = ConstantPoolArray::SizeFor(small); | |
4099 #ifndef V8_HOST_ARCH_64_BIT | |
4100 size += kPointerSize; | |
4101 #endif | |
4102 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); | |
4103 | |
4104 HeapObject* object; | |
4105 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE); | |
4106 if (!allocation.To(&object)) return allocation; | |
4107 } | |
4108 object = EnsureDoubleAligned(this, object, size); | |
4109 object->set_map_no_write_barrier(constant_pool_array_map()); | |
4110 | |
4111 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); | |
4112 constant_pool->Init(small); | |
4113 constant_pool->ClearPtrEntries(isolate()); | |
4114 return constant_pool; | |
4115 } | |
4116 | |
4117 | |
4118 AllocationResult Heap::AllocateExtendedConstantPoolArray( | |
4119 const ConstantPoolArray::NumberOfEntries& small, | |
4120 const ConstantPoolArray::NumberOfEntries& extended) { | |
4121 CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType)); | |
4122 CHECK(extended.are_in_range(0, kMaxInt)); | |
4123 int size = ConstantPoolArray::SizeForExtended(small, extended); | |
4124 #ifndef V8_HOST_ARCH_64_BIT | |
4125 size += kPointerSize; | |
4126 #endif | |
4127 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); | |
4128 | |
4129 HeapObject* object; | |
4130 { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE); | |
4131 if (!allocation.To(&object)) return allocation; | |
4132 } | |
4133 object = EnsureDoubleAligned(this, object, size); | |
4134 object->set_map_no_write_barrier(constant_pool_array_map()); | |
4135 | |
4136 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); | |
4137 constant_pool->InitExtended(small, extended); | |
4138 constant_pool->ClearPtrEntries(isolate()); | |
4139 return constant_pool; | |
4140 } | |
4141 | |
4142 | |
4143 AllocationResult Heap::AllocateEmptyConstantPoolArray() { | |
4144 ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0); | |
4145 int size = ConstantPoolArray::SizeFor(small); | |
4146 HeapObject* result; | |
4147 { AllocationResult allocation = | |
4148 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE); | |
4149 if (!allocation.To(&result)) return allocation; | |
4150 } | |
4151 result->set_map_no_write_barrier(constant_pool_array_map()); | |
4152 ConstantPoolArray::cast(result)->Init(small); | |
4153 return result; | |
4154 } | |
4155 | |
4156 | |
4157 AllocationResult Heap::AllocateSymbol() { | |
4158 // Statically ensure that it is safe to allocate symbols in paged spaces. | |
4159 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize); | |
4160 | |
4161 HeapObject* result; | |
4162 AllocationResult allocation = | |
4163 AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE); | |
4164 if (!allocation.To(&result)) return allocation; | |
4165 | |
4166 result->set_map_no_write_barrier(symbol_map()); | |
4167 | |
4168 // Generate a random hash value. | |
4169 int hash; | |
4170 int attempts = 0; | |
4171 do { | |
4172 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask; | |
4173 attempts++; | |
4174 } while (hash == 0 && attempts < 30); | |
4175 if (hash == 0) hash = 1; // never return 0 | |
4176 | |
4177 Symbol::cast(result)->set_hash_field( | |
4178 Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); | |
4179 Symbol::cast(result)->set_name(undefined_value()); | |
4180 Symbol::cast(result)->set_flags(Smi::FromInt(0)); | |
4181 | |
4182 DCHECK(!Symbol::cast(result)->is_private()); | |
4183 return result; | |
4184 } | |
4185 | |
4186 | |
4187 AllocationResult Heap::AllocateStruct(InstanceType type) { | |
4188 Map* map; | |
4189 switch (type) { | |
4190 #define MAKE_CASE(NAME, Name, name) \ | |
4191 case NAME##_TYPE: map = name##_map(); break; | |
4192 STRUCT_LIST(MAKE_CASE) | |
4193 #undef MAKE_CASE | |
4194 default: | |
4195 UNREACHABLE(); | |
4196 return exception(); | |
4197 } | |
4198 int size = map->instance_size(); | |
4199 AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); | |
4200 Struct* result; | |
4201 { AllocationResult allocation = Allocate(map, space); | |
4202 if (!allocation.To(&result)) return allocation; | |
4203 } | |
4204 result->InitializeBody(size); | |
4205 return result; | |
4206 } | |
4207 | |
4208 | |
4209 bool Heap::IsHeapIterable() { | |
4210 // TODO(hpayer): This function is not correct. Allocation folding in old | |
4211 // space breaks the iterability. | |
4212 return (old_pointer_space()->swept_precisely() && | |
4213 old_data_space()->swept_precisely() && | |
4214 new_space_top_after_last_gc_ == new_space()->top()); | |
4215 } | |
4216 | |
4217 | |
4218 void Heap::MakeHeapIterable() { | |
4219 DCHECK(AllowHeapAllocation::IsAllowed()); | |
4220 if (!IsHeapIterable()) { | |
4221 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); | |
4222 } | |
4223 if (mark_compact_collector()->sweeping_in_progress()) { | |
4224 mark_compact_collector()->EnsureSweepingCompleted(); | |
4225 } | |
4226 DCHECK(IsHeapIterable()); | |
4227 } | |
4228 | |
4229 | |
4230 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { | |
4231 incremental_marking()->Step(step_size, | |
4232 IncrementalMarking::NO_GC_VIA_STACK_GUARD); | |
4233 | |
4234 if (incremental_marking()->IsComplete()) { | |
4235 bool uncommit = false; | |
4236 if (gc_count_at_last_idle_gc_ == gc_count_) { | |
4237 // No GC since the last full GC, the mutator is probably not active. | |
4238 isolate_->compilation_cache()->Clear(); | |
4239 uncommit = true; | |
4240 } | |
4241 CollectAllGarbage(kReduceMemoryFootprintMask, | |
4242 "idle notification: finalize incremental"); | |
4243 mark_sweeps_since_idle_round_started_++; | |
4244 gc_count_at_last_idle_gc_ = gc_count_; | |
4245 if (uncommit) { | |
4246 new_space_.Shrink(); | |
4247 UncommitFromSpace(); | |
4248 } | |
4249 } | |
4250 } | |
4251 | |
4252 | |
4253 bool Heap::IdleNotification(int hint) { | |
4254 // If incremental marking is off, we do not perform idle notification. | |
4255 if (!FLAG_incremental_marking) return true; | |
4256 | |
4257 // Hints greater than this value indicate that | |
4258 // the embedder is requesting a lot of GC work. | |
4259 const int kMaxHint = 1000; | |
4260 const int kMinHintForIncrementalMarking = 10; | |
4261 // Minimal hint that allows to do full GC. | |
4262 const int kMinHintForFullGC = 100; | |
4263 intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; | |
4264 // The size factor is in range [5..250]. The numbers here are chosen from | |
4265 // experiments. If you changes them, make sure to test with | |
4266 // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.* | |
4267 intptr_t step_size = | |
4268 size_factor * IncrementalMarking::kAllocatedThreshold; | |
4269 | |
4270 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(hint); | |
4271 HistogramTimerScope idle_notification_scope( | |
4272 isolate_->counters()->gc_idle_notification()); | |
4273 | |
4274 if (contexts_disposed_ > 0) { | |
4275 contexts_disposed_ = 0; | |
4276 int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000); | |
4277 if (hint >= mark_sweep_time && !FLAG_expose_gc && | |
4278 incremental_marking()->IsStopped()) { | |
4279 HistogramTimerScope scope(isolate_->counters()->gc_context()); | |
4280 CollectAllGarbage(kReduceMemoryFootprintMask, | |
4281 "idle notification: contexts disposed"); | |
4282 } else { | |
4283 AdvanceIdleIncrementalMarking(step_size); | |
4284 } | |
4285 | |
4286 // After context disposal there is likely a lot of garbage remaining, reset | |
4287 // the idle notification counters in order to trigger more incremental GCs | |
4288 // on subsequent idle notifications. | |
4289 StartIdleRound(); | |
4290 return false; | |
4291 } | |
4292 | |
4293 // By doing small chunks of GC work in each IdleNotification, | |
4294 // perform a round of incremental GCs and after that wait until | |
4295 // the mutator creates enough garbage to justify a new round. | |
4296 // An incremental GC progresses as follows: | |
4297 // 1. many incremental marking steps, | |
4298 // 2. one old space mark-sweep-compact, | |
4299 // Use mark-sweep-compact events to count incremental GCs in a round. | |
4300 | |
4301 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { | |
4302 if (EnoughGarbageSinceLastIdleRound()) { | |
4303 StartIdleRound(); | |
4304 } else { | |
4305 return true; | |
4306 } | |
4307 } | |
4308 | |
4309 int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound - | |
4310 mark_sweeps_since_idle_round_started_; | |
4311 | |
4312 if (incremental_marking()->IsStopped()) { | |
4313 // If there are no more than two GCs left in this idle round and we are | |
4314 // allowed to do a full GC, then make those GCs full in order to compact | |
4315 // the code space. | |
4316 // TODO(ulan): Once we enable code compaction for incremental marking, | |
4317 // we can get rid of this special case and always start incremental marking. | |
4318 if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) { | |
4319 CollectAllGarbage(kReduceMemoryFootprintMask, | |
4320 "idle notification: finalize idle round"); | |
4321 mark_sweeps_since_idle_round_started_++; | |
4322 } else if (hint > kMinHintForIncrementalMarking) { | |
4323 incremental_marking()->Start(); | |
4324 } | |
4325 } | |
4326 if (!incremental_marking()->IsStopped() && | |
4327 hint > kMinHintForIncrementalMarking) { | |
4328 AdvanceIdleIncrementalMarking(step_size); | |
4329 } | |
4330 | |
4331 if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { | |
4332 FinishIdleRound(); | |
4333 return true; | |
4334 } | |
4335 | |
4336 // If the IdleNotifcation is called with a large hint we will wait for | |
4337 // the sweepter threads here. | |
4338 if (hint >= kMinHintForFullGC && | |
4339 mark_compact_collector()->sweeping_in_progress()) { | |
4340 mark_compact_collector()->EnsureSweepingCompleted(); | |
4341 } | |
4342 | |
4343 return false; | |
4344 } | |
4345 | |
4346 | |
4347 #ifdef DEBUG | |
4348 | |
4349 void Heap::Print() { | |
4350 if (!HasBeenSetUp()) return; | |
4351 isolate()->PrintStack(stdout); | |
4352 AllSpaces spaces(this); | |
4353 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { | |
4354 space->Print(); | |
4355 } | |
4356 } | |
4357 | |
4358 | |
4359 void Heap::ReportCodeStatistics(const char* title) { | |
4360 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title); | |
4361 PagedSpace::ResetCodeStatistics(isolate()); | |
4362 // We do not look for code in new space, map space, or old space. If code | |
4363 // somehow ends up in those spaces, we would miss it here. | |
4364 code_space_->CollectCodeStatistics(); | |
4365 lo_space_->CollectCodeStatistics(); | |
4366 PagedSpace::ReportCodeStatistics(isolate()); | |
4367 } | |
4368 | |
4369 | |
4370 // This function expects that NewSpace's allocated objects histogram is | |
4371 // populated (via a call to CollectStatistics or else as a side effect of a | |
4372 // just-completed scavenge collection). | |
4373 void Heap::ReportHeapStatistics(const char* title) { | |
4374 USE(title); | |
4375 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", | |
4376 title, gc_count_); | |
4377 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n", | |
4378 old_generation_allocation_limit_); | |
4379 | |
4380 PrintF("\n"); | |
4381 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_)); | |
4382 isolate_->global_handles()->PrintStats(); | |
4383 PrintF("\n"); | |
4384 | |
4385 PrintF("Heap statistics : "); | |
4386 isolate_->memory_allocator()->ReportStatistics(); | |
4387 PrintF("To space : "); | |
4388 new_space_.ReportStatistics(); | |
4389 PrintF("Old pointer space : "); | |
4390 old_pointer_space_->ReportStatistics(); | |
4391 PrintF("Old data space : "); | |
4392 old_data_space_->ReportStatistics(); | |
4393 PrintF("Code space : "); | |
4394 code_space_->ReportStatistics(); | |
4395 PrintF("Map space : "); | |
4396 map_space_->ReportStatistics(); | |
4397 PrintF("Cell space : "); | |
4398 cell_space_->ReportStatistics(); | |
4399 PrintF("PropertyCell space : "); | |
4400 property_cell_space_->ReportStatistics(); | |
4401 PrintF("Large object space : "); | |
4402 lo_space_->ReportStatistics(); | |
4403 PrintF(">>>>>> ========================================= >>>>>>\n"); | |
4404 } | |
4405 | |
4406 #endif // DEBUG | |
4407 | |
4408 bool Heap::Contains(HeapObject* value) { | |
4409 return Contains(value->address()); | |
4410 } | |
4411 | |
4412 | |
4413 bool Heap::Contains(Address addr) { | |
4414 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; | |
4415 return HasBeenSetUp() && | |
4416 (new_space_.ToSpaceContains(addr) || | |
4417 old_pointer_space_->Contains(addr) || | |
4418 old_data_space_->Contains(addr) || | |
4419 code_space_->Contains(addr) || | |
4420 map_space_->Contains(addr) || | |
4421 cell_space_->Contains(addr) || | |
4422 property_cell_space_->Contains(addr) || | |
4423 lo_space_->SlowContains(addr)); | |
4424 } | |
4425 | |
4426 | |
4427 bool Heap::InSpace(HeapObject* value, AllocationSpace space) { | |
4428 return InSpace(value->address(), space); | |
4429 } | |
4430 | |
4431 | |
4432 bool Heap::InSpace(Address addr, AllocationSpace space) { | |
4433 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false; | |
4434 if (!HasBeenSetUp()) return false; | |
4435 | |
4436 switch (space) { | |
4437 case NEW_SPACE: | |
4438 return new_space_.ToSpaceContains(addr); | |
4439 case OLD_POINTER_SPACE: | |
4440 return old_pointer_space_->Contains(addr); | |
4441 case OLD_DATA_SPACE: | |
4442 return old_data_space_->Contains(addr); | |
4443 case CODE_SPACE: | |
4444 return code_space_->Contains(addr); | |
4445 case MAP_SPACE: | |
4446 return map_space_->Contains(addr); | |
4447 case CELL_SPACE: | |
4448 return cell_space_->Contains(addr); | |
4449 case PROPERTY_CELL_SPACE: | |
4450 return property_cell_space_->Contains(addr); | |
4451 case LO_SPACE: | |
4452 return lo_space_->SlowContains(addr); | |
4453 case INVALID_SPACE: | |
4454 break; | |
4455 } | |
4456 UNREACHABLE(); | |
4457 return false; | |
4458 } | |
4459 | |
4460 | |
4461 #ifdef VERIFY_HEAP | |
4462 void Heap::Verify() { | |
4463 CHECK(HasBeenSetUp()); | |
4464 HandleScope scope(isolate()); | |
4465 | |
4466 store_buffer()->Verify(); | |
4467 | |
4468 if (mark_compact_collector()->sweeping_in_progress()) { | |
4469 // We have to wait here for the sweeper threads to have an iterable heap. | |
4470 mark_compact_collector()->EnsureSweepingCompleted(); | |
4471 } | |
4472 | |
4473 VerifyPointersVisitor visitor; | |
4474 IterateRoots(&visitor, VISIT_ONLY_STRONG); | |
4475 | |
4476 VerifySmisVisitor smis_visitor; | |
4477 IterateSmiRoots(&smis_visitor); | |
4478 | |
4479 new_space_.Verify(); | |
4480 | |
4481 old_pointer_space_->Verify(&visitor); | |
4482 map_space_->Verify(&visitor); | |
4483 | |
4484 VerifyPointersVisitor no_dirty_regions_visitor; | |
4485 old_data_space_->Verify(&no_dirty_regions_visitor); | |
4486 code_space_->Verify(&no_dirty_regions_visitor); | |
4487 cell_space_->Verify(&no_dirty_regions_visitor); | |
4488 property_cell_space_->Verify(&no_dirty_regions_visitor); | |
4489 | |
4490 lo_space_->Verify(); | |
4491 } | |
4492 #endif | |
4493 | |
4494 | |
4495 void Heap::ZapFromSpace() { | |
4496 NewSpacePageIterator it(new_space_.FromSpaceStart(), | |
4497 new_space_.FromSpaceEnd()); | |
4498 while (it.has_next()) { | |
4499 NewSpacePage* page = it.next(); | |
4500 for (Address cursor = page->area_start(), limit = page->area_end(); | |
4501 cursor < limit; | |
4502 cursor += kPointerSize) { | |
4503 Memory::Address_at(cursor) = kFromSpaceZapValue; | |
4504 } | |
4505 } | |
4506 } | |
4507 | |
4508 | |
4509 void Heap::IterateAndMarkPointersToFromSpace(Address start, | |
4510 Address end, | |
4511 ObjectSlotCallback callback) { | |
4512 Address slot_address = start; | |
4513 | |
4514 // We are not collecting slots on new space objects during mutation | |
4515 // thus we have to scan for pointers to evacuation candidates when we | |
4516 // promote objects. But we should not record any slots in non-black | |
4517 // objects. Grey object's slots would be rescanned. | |
4518 // White object might not survive until the end of collection | |
4519 // it would be a violation of the invariant to record it's slots. | |
4520 bool record_slots = false; | |
4521 if (incremental_marking()->IsCompacting()) { | |
4522 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start)); | |
4523 record_slots = Marking::IsBlack(mark_bit); | |
4524 } | |
4525 | |
4526 while (slot_address < end) { | |
4527 Object** slot = reinterpret_cast<Object**>(slot_address); | |
4528 Object* object = *slot; | |
4529 // If the store buffer becomes overfull we mark pages as being exempt from | |
4530 // the store buffer. These pages are scanned to find pointers that point | |
4531 // to the new space. In that case we may hit newly promoted objects and | |
4532 // fix the pointers before the promotion queue gets to them. Thus the 'if'. | |
4533 if (object->IsHeapObject()) { | |
4534 if (Heap::InFromSpace(object)) { | |
4535 callback(reinterpret_cast<HeapObject**>(slot), | |
4536 HeapObject::cast(object)); | |
4537 Object* new_object = *slot; | |
4538 if (InNewSpace(new_object)) { | |
4539 SLOW_DCHECK(Heap::InToSpace(new_object)); | |
4540 SLOW_DCHECK(new_object->IsHeapObject()); | |
4541 store_buffer_.EnterDirectlyIntoStoreBuffer( | |
4542 reinterpret_cast<Address>(slot)); | |
4543 } | |
4544 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); | |
4545 } else if (record_slots && | |
4546 MarkCompactCollector::IsOnEvacuationCandidate(object)) { | |
4547 mark_compact_collector()->RecordSlot(slot, slot, object); | |
4548 } | |
4549 } | |
4550 slot_address += kPointerSize; | |
4551 } | |
4552 } | |
4553 | |
4554 | |
4555 #ifdef DEBUG | |
4556 typedef bool (*CheckStoreBufferFilter)(Object** addr); | |
4557 | |
4558 | |
4559 bool IsAMapPointerAddress(Object** addr) { | |
4560 uintptr_t a = reinterpret_cast<uintptr_t>(addr); | |
4561 int mod = a % Map::kSize; | |
4562 return mod >= Map::kPointerFieldsBeginOffset && | |
4563 mod < Map::kPointerFieldsEndOffset; | |
4564 } | |
4565 | |
4566 | |
4567 bool EverythingsAPointer(Object** addr) { | |
4568 return true; | |
4569 } | |
4570 | |
4571 | |
4572 static void CheckStoreBuffer(Heap* heap, | |
4573 Object** current, | |
4574 Object** limit, | |
4575 Object**** store_buffer_position, | |
4576 Object*** store_buffer_top, | |
4577 CheckStoreBufferFilter filter, | |
4578 Address special_garbage_start, | |
4579 Address special_garbage_end) { | |
4580 Map* free_space_map = heap->free_space_map(); | |
4581 for ( ; current < limit; current++) { | |
4582 Object* o = *current; | |
4583 Address current_address = reinterpret_cast<Address>(current); | |
4584 // Skip free space. | |
4585 if (o == free_space_map) { | |
4586 Address current_address = reinterpret_cast<Address>(current); | |
4587 FreeSpace* free_space = | |
4588 FreeSpace::cast(HeapObject::FromAddress(current_address)); | |
4589 int skip = free_space->Size(); | |
4590 DCHECK(current_address + skip <= reinterpret_cast<Address>(limit)); | |
4591 DCHECK(skip > 0); | |
4592 current_address += skip - kPointerSize; | |
4593 current = reinterpret_cast<Object**>(current_address); | |
4594 continue; | |
4595 } | |
4596 // Skip the current linear allocation space between top and limit which is | |
4597 // unmarked with the free space map, but can contain junk. | |
4598 if (current_address == special_garbage_start && | |
4599 special_garbage_end != special_garbage_start) { | |
4600 current_address = special_garbage_end - kPointerSize; | |
4601 current = reinterpret_cast<Object**>(current_address); | |
4602 continue; | |
4603 } | |
4604 if (!(*filter)(current)) continue; | |
4605 DCHECK(current_address < special_garbage_start || | |
4606 current_address >= special_garbage_end); | |
4607 DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); | |
4608 // We have to check that the pointer does not point into new space | |
4609 // without trying to cast it to a heap object since the hash field of | |
4610 // a string can contain values like 1 and 3 which are tagged null | |
4611 // pointers. | |
4612 if (!heap->InNewSpace(o)) continue; | |
4613 while (**store_buffer_position < current && | |
4614 *store_buffer_position < store_buffer_top) { | |
4615 (*store_buffer_position)++; | |
4616 } | |
4617 if (**store_buffer_position != current || | |
4618 *store_buffer_position == store_buffer_top) { | |
4619 Object** obj_start = current; | |
4620 while (!(*obj_start)->IsMap()) obj_start--; | |
4621 UNREACHABLE(); | |
4622 } | |
4623 } | |
4624 } | |
4625 | |
4626 | |
4627 // Check that the store buffer contains all intergenerational pointers by | |
4628 // scanning a page and ensuring that all pointers to young space are in the | |
4629 // store buffer. | |
4630 void Heap::OldPointerSpaceCheckStoreBuffer() { | |
4631 OldSpace* space = old_pointer_space(); | |
4632 PageIterator pages(space); | |
4633 | |
4634 store_buffer()->SortUniq(); | |
4635 | |
4636 while (pages.has_next()) { | |
4637 Page* page = pages.next(); | |
4638 Object** current = reinterpret_cast<Object**>(page->area_start()); | |
4639 | |
4640 Address end = page->area_end(); | |
4641 | |
4642 Object*** store_buffer_position = store_buffer()->Start(); | |
4643 Object*** store_buffer_top = store_buffer()->Top(); | |
4644 | |
4645 Object** limit = reinterpret_cast<Object**>(end); | |
4646 CheckStoreBuffer(this, | |
4647 current, | |
4648 limit, | |
4649 &store_buffer_position, | |
4650 store_buffer_top, | |
4651 &EverythingsAPointer, | |
4652 space->top(), | |
4653 space->limit()); | |
4654 } | |
4655 } | |
4656 | |
4657 | |
4658 void Heap::MapSpaceCheckStoreBuffer() { | |
4659 MapSpace* space = map_space(); | |
4660 PageIterator pages(space); | |
4661 | |
4662 store_buffer()->SortUniq(); | |
4663 | |
4664 while (pages.has_next()) { | |
4665 Page* page = pages.next(); | |
4666 Object** current = reinterpret_cast<Object**>(page->area_start()); | |
4667 | |
4668 Address end = page->area_end(); | |
4669 | |
4670 Object*** store_buffer_position = store_buffer()->Start(); | |
4671 Object*** store_buffer_top = store_buffer()->Top(); | |
4672 | |
4673 Object** limit = reinterpret_cast<Object**>(end); | |
4674 CheckStoreBuffer(this, | |
4675 current, | |
4676 limit, | |
4677 &store_buffer_position, | |
4678 store_buffer_top, | |
4679 &IsAMapPointerAddress, | |
4680 space->top(), | |
4681 space->limit()); | |
4682 } | |
4683 } | |
4684 | |
4685 | |
4686 void Heap::LargeObjectSpaceCheckStoreBuffer() { | |
4687 LargeObjectIterator it(lo_space()); | |
4688 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | |
4689 // We only have code, sequential strings, or fixed arrays in large | |
4690 // object space, and only fixed arrays can possibly contain pointers to | |
4691 // the young generation. | |
4692 if (object->IsFixedArray()) { | |
4693 Object*** store_buffer_position = store_buffer()->Start(); | |
4694 Object*** store_buffer_top = store_buffer()->Top(); | |
4695 Object** current = reinterpret_cast<Object**>(object->address()); | |
4696 Object** limit = | |
4697 reinterpret_cast<Object**>(object->address() + object->Size()); | |
4698 CheckStoreBuffer(this, | |
4699 current, | |
4700 limit, | |
4701 &store_buffer_position, | |
4702 store_buffer_top, | |
4703 &EverythingsAPointer, | |
4704 NULL, | |
4705 NULL); | |
4706 } | |
4707 } | |
4708 } | |
4709 #endif | |
4710 | |
4711 | |
4712 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { | |
4713 IterateStrongRoots(v, mode); | |
4714 IterateWeakRoots(v, mode); | |
4715 } | |
4716 | |
4717 | |
4718 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { | |
4719 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); | |
4720 v->Synchronize(VisitorSynchronization::kStringTable); | |
4721 if (mode != VISIT_ALL_IN_SCAVENGE && | |
4722 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { | |
4723 // Scavenge collections have special processing for this. | |
4724 external_string_table_.Iterate(v); | |
4725 } | |
4726 v->Synchronize(VisitorSynchronization::kExternalStringsTable); | |
4727 } | |
4728 | |
4729 | |
4730 void Heap::IterateSmiRoots(ObjectVisitor* v) { | |
4731 // Acquire execution access since we are going to read stack limit values. | |
4732 ExecutionAccess access(isolate()); | |
4733 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]); | |
4734 v->Synchronize(VisitorSynchronization::kSmiRootList); | |
4735 } | |
4736 | |
4737 | |
4738 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { | |
4739 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); | |
4740 v->Synchronize(VisitorSynchronization::kStrongRootList); | |
4741 | |
4742 v->VisitPointer(BitCast<Object**>(&hidden_string_)); | |
4743 v->Synchronize(VisitorSynchronization::kInternalizedString); | |
4744 | |
4745 isolate_->bootstrapper()->Iterate(v); | |
4746 v->Synchronize(VisitorSynchronization::kBootstrapper); | |
4747 isolate_->Iterate(v); | |
4748 v->Synchronize(VisitorSynchronization::kTop); | |
4749 Relocatable::Iterate(isolate_, v); | |
4750 v->Synchronize(VisitorSynchronization::kRelocatable); | |
4751 | |
4752 if (isolate_->deoptimizer_data() != NULL) { | |
4753 isolate_->deoptimizer_data()->Iterate(v); | |
4754 } | |
4755 v->Synchronize(VisitorSynchronization::kDebug); | |
4756 isolate_->compilation_cache()->Iterate(v); | |
4757 v->Synchronize(VisitorSynchronization::kCompilationCache); | |
4758 | |
4759 // Iterate over local handles in handle scopes. | |
4760 isolate_->handle_scope_implementer()->Iterate(v); | |
4761 isolate_->IterateDeferredHandles(v); | |
4762 v->Synchronize(VisitorSynchronization::kHandleScope); | |
4763 | |
4764 // Iterate over the builtin code objects and code stubs in the | |
4765 // heap. Note that it is not necessary to iterate over code objects | |
4766 // on scavenge collections. | |
4767 if (mode != VISIT_ALL_IN_SCAVENGE) { | |
4768 isolate_->builtins()->IterateBuiltins(v); | |
4769 } | |
4770 v->Synchronize(VisitorSynchronization::kBuiltins); | |
4771 | |
4772 // Iterate over global handles. | |
4773 switch (mode) { | |
4774 case VISIT_ONLY_STRONG: | |
4775 isolate_->global_handles()->IterateStrongRoots(v); | |
4776 break; | |
4777 case VISIT_ALL_IN_SCAVENGE: | |
4778 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v); | |
4779 break; | |
4780 case VISIT_ALL_IN_SWEEP_NEWSPACE: | |
4781 case VISIT_ALL: | |
4782 isolate_->global_handles()->IterateAllRoots(v); | |
4783 break; | |
4784 } | |
4785 v->Synchronize(VisitorSynchronization::kGlobalHandles); | |
4786 | |
4787 // Iterate over eternal handles. | |
4788 if (mode == VISIT_ALL_IN_SCAVENGE) { | |
4789 isolate_->eternal_handles()->IterateNewSpaceRoots(v); | |
4790 } else { | |
4791 isolate_->eternal_handles()->IterateAllRoots(v); | |
4792 } | |
4793 v->Synchronize(VisitorSynchronization::kEternalHandles); | |
4794 | |
4795 // Iterate over pointers being held by inactive threads. | |
4796 isolate_->thread_manager()->Iterate(v); | |
4797 v->Synchronize(VisitorSynchronization::kThreadManager); | |
4798 | |
4799 // Iterate over the pointers the Serialization/Deserialization code is | |
4800 // holding. | |
4801 // During garbage collection this keeps the partial snapshot cache alive. | |
4802 // During deserialization of the startup snapshot this creates the partial | |
4803 // snapshot cache and deserializes the objects it refers to. During | |
4804 // serialization this does nothing, since the partial snapshot cache is | |
4805 // empty. However the next thing we do is create the partial snapshot, | |
4806 // filling up the partial snapshot cache with objects it needs as we go. | |
4807 SerializerDeserializer::Iterate(isolate_, v); | |
4808 // We don't do a v->Synchronize call here, because in debug mode that will | |
4809 // output a flag to the snapshot. However at this point the serializer and | |
4810 // deserializer are deliberately a little unsynchronized (see above) so the | |
4811 // checking of the sync flag in the snapshot would fail. | |
4812 } | |
4813 | |
4814 | |
4815 // TODO(1236194): Since the heap size is configurable on the command line | |
4816 // and through the API, we should gracefully handle the case that the heap | |
4817 // size is not big enough to fit all the initial objects. | |
4818 bool Heap::ConfigureHeap(int max_semi_space_size, | |
4819 int max_old_space_size, | |
4820 int max_executable_size, | |
4821 size_t code_range_size) { | |
4822 if (HasBeenSetUp()) return false; | |
4823 | |
4824 // Overwrite default configuration. | |
4825 if (max_semi_space_size > 0) { | |
4826 max_semi_space_size_ = max_semi_space_size * MB; | |
4827 } | |
4828 if (max_old_space_size > 0) { | |
4829 max_old_generation_size_ = max_old_space_size * MB; | |
4830 } | |
4831 if (max_executable_size > 0) { | |
4832 max_executable_size_ = max_executable_size * MB; | |
4833 } | |
4834 | |
4835 // If max space size flags are specified overwrite the configuration. | |
4836 if (FLAG_max_semi_space_size > 0) { | |
4837 max_semi_space_size_ = FLAG_max_semi_space_size * MB; | |
4838 } | |
4839 if (FLAG_max_old_space_size > 0) { | |
4840 max_old_generation_size_ = FLAG_max_old_space_size * MB; | |
4841 } | |
4842 if (FLAG_max_executable_size > 0) { | |
4843 max_executable_size_ = FLAG_max_executable_size * MB; | |
4844 } | |
4845 | |
4846 if (FLAG_stress_compaction) { | |
4847 // This will cause more frequent GCs when stressing. | |
4848 max_semi_space_size_ = Page::kPageSize; | |
4849 } | |
4850 | |
4851 if (Snapshot::HaveASnapshotToStartFrom()) { | |
4852 // If we are using a snapshot we always reserve the default amount | |
4853 // of memory for each semispace because code in the snapshot has | |
4854 // write-barrier code that relies on the size and alignment of new | |
4855 // space. We therefore cannot use a larger max semispace size | |
4856 // than the default reserved semispace size. | |
4857 if (max_semi_space_size_ > reserved_semispace_size_) { | |
4858 max_semi_space_size_ = reserved_semispace_size_; | |
4859 if (FLAG_trace_gc) { | |
4860 PrintPID("Max semi-space size cannot be more than %d kbytes\n", | |
4861 reserved_semispace_size_ >> 10); | |
4862 } | |
4863 } | |
4864 } else { | |
4865 // If we are not using snapshots we reserve space for the actual | |
4866 // max semispace size. | |
4867 reserved_semispace_size_ = max_semi_space_size_; | |
4868 } | |
4869 | |
4870 // The max executable size must be less than or equal to the max old | |
4871 // generation size. | |
4872 if (max_executable_size_ > max_old_generation_size_) { | |
4873 max_executable_size_ = max_old_generation_size_; | |
4874 } | |
4875 | |
4876 // The new space size must be a power of two to support single-bit testing | |
4877 // for containment. | |
4878 max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_); | |
4879 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); | |
4880 | |
4881 if (FLAG_min_semi_space_size > 0) { | |
4882 int initial_semispace_size = FLAG_min_semi_space_size * MB; | |
4883 if (initial_semispace_size > max_semi_space_size_) { | |
4884 initial_semispace_size_ = max_semi_space_size_; | |
4885 if (FLAG_trace_gc) { | |
4886 PrintPID("Min semi-space size cannot be more than the maximum" | |
4887 "semi-space size of %d MB\n", max_semi_space_size_); | |
4888 } | |
4889 } else { | |
4890 initial_semispace_size_ = initial_semispace_size; | |
4891 } | |
4892 } | |
4893 | |
4894 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_); | |
4895 | |
4896 // The old generation is paged and needs at least one page for each space. | |
4897 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | |
4898 max_old_generation_size_ = | |
4899 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), | |
4900 max_old_generation_size_); | |
4901 | |
4902 // We rely on being able to allocate new arrays in paged spaces. | |
4903 DCHECK(Page::kMaxRegularHeapObjectSize >= | |
4904 (JSArray::kSize + | |
4905 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + | |
4906 AllocationMemento::kSize)); | |
4907 | |
4908 code_range_size_ = code_range_size * MB; | |
4909 | |
4910 configured_ = true; | |
4911 return true; | |
4912 } | |
4913 | |
4914 | |
4915 bool Heap::ConfigureHeapDefault() { | |
4916 return ConfigureHeap(0, 0, 0, 0); | |
4917 } | |
4918 | |
4919 | |
4920 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { | |
4921 *stats->start_marker = HeapStats::kStartMarker; | |
4922 *stats->end_marker = HeapStats::kEndMarker; | |
4923 *stats->new_space_size = new_space_.SizeAsInt(); | |
4924 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity()); | |
4925 *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects(); | |
4926 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); | |
4927 *stats->old_data_space_size = old_data_space_->SizeOfObjects(); | |
4928 *stats->old_data_space_capacity = old_data_space_->Capacity(); | |
4929 *stats->code_space_size = code_space_->SizeOfObjects(); | |
4930 *stats->code_space_capacity = code_space_->Capacity(); | |
4931 *stats->map_space_size = map_space_->SizeOfObjects(); | |
4932 *stats->map_space_capacity = map_space_->Capacity(); | |
4933 *stats->cell_space_size = cell_space_->SizeOfObjects(); | |
4934 *stats->cell_space_capacity = cell_space_->Capacity(); | |
4935 *stats->property_cell_space_size = property_cell_space_->SizeOfObjects(); | |
4936 *stats->property_cell_space_capacity = property_cell_space_->Capacity(); | |
4937 *stats->lo_space_size = lo_space_->Size(); | |
4938 isolate_->global_handles()->RecordStats(stats); | |
4939 *stats->memory_allocator_size = isolate()->memory_allocator()->Size(); | |
4940 *stats->memory_allocator_capacity = | |
4941 isolate()->memory_allocator()->Size() + | |
4942 isolate()->memory_allocator()->Available(); | |
4943 *stats->os_error = base::OS::GetLastError(); | |
4944 isolate()->memory_allocator()->Available(); | |
4945 if (take_snapshot) { | |
4946 HeapIterator iterator(this); | |
4947 for (HeapObject* obj = iterator.next(); | |
4948 obj != NULL; | |
4949 obj = iterator.next()) { | |
4950 InstanceType type = obj->map()->instance_type(); | |
4951 DCHECK(0 <= type && type <= LAST_TYPE); | |
4952 stats->objects_per_type[type]++; | |
4953 stats->size_per_type[type] += obj->Size(); | |
4954 } | |
4955 } | |
4956 } | |
4957 | |
4958 | |
4959 intptr_t Heap::PromotedSpaceSizeOfObjects() { | |
4960 return old_pointer_space_->SizeOfObjects() | |
4961 + old_data_space_->SizeOfObjects() | |
4962 + code_space_->SizeOfObjects() | |
4963 + map_space_->SizeOfObjects() | |
4964 + cell_space_->SizeOfObjects() | |
4965 + property_cell_space_->SizeOfObjects() | |
4966 + lo_space_->SizeOfObjects(); | |
4967 } | |
4968 | |
4969 | |
4970 int64_t Heap::PromotedExternalMemorySize() { | |
4971 if (amount_of_external_allocated_memory_ | |
4972 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; | |
4973 return amount_of_external_allocated_memory_ | |
4974 - amount_of_external_allocated_memory_at_last_global_gc_; | |
4975 } | |
4976 | |
4977 | |
4978 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, | |
4979 int freed_global_handles) { | |
4980 const int kMaxHandles = 1000; | |
4981 const int kMinHandles = 100; | |
4982 double min_factor = 1.1; | |
4983 double max_factor = 4; | |
4984 // We set the old generation growing factor to 2 to grow the heap slower on | |
4985 // memory-constrained devices. | |
4986 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { | |
4987 max_factor = 2; | |
4988 } | |
4989 // If there are many freed global handles, then the next full GC will | |
4990 // likely collect a lot of garbage. Choose the heap growing factor | |
4991 // depending on freed global handles. | |
4992 // TODO(ulan, hpayer): Take into account mutator utilization. | |
4993 double factor; | |
4994 if (freed_global_handles <= kMinHandles) { | |
4995 factor = max_factor; | |
4996 } else if (freed_global_handles >= kMaxHandles) { | |
4997 factor = min_factor; | |
4998 } else { | |
4999 // Compute factor using linear interpolation between points | |
5000 // (kMinHandles, max_factor) and (kMaxHandles, min_factor). | |
5001 factor = max_factor - | |
5002 (freed_global_handles - kMinHandles) * (max_factor - min_factor) / | |
5003 (kMaxHandles - kMinHandles); | |
5004 } | |
5005 | |
5006 if (FLAG_stress_compaction || | |
5007 mark_compact_collector()->reduce_memory_footprint_) { | |
5008 factor = min_factor; | |
5009 } | |
5010 | |
5011 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); | |
5012 limit = Max(limit, kMinimumOldGenerationAllocationLimit); | |
5013 limit += new_space_.Capacity(); | |
5014 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; | |
5015 return Min(limit, halfway_to_the_max); | |
5016 } | |
5017 | |
5018 | |
5019 void Heap::EnableInlineAllocation() { | |
5020 if (!inline_allocation_disabled_) return; | |
5021 inline_allocation_disabled_ = false; | |
5022 | |
5023 // Update inline allocation limit for new space. | |
5024 new_space()->UpdateInlineAllocationLimit(0); | |
5025 } | |
5026 | |
5027 | |
5028 void Heap::DisableInlineAllocation() { | |
5029 if (inline_allocation_disabled_) return; | |
5030 inline_allocation_disabled_ = true; | |
5031 | |
5032 // Update inline allocation limit for new space. | |
5033 new_space()->UpdateInlineAllocationLimit(0); | |
5034 | |
5035 // Update inline allocation limit for old spaces. | |
5036 PagedSpaces spaces(this); | |
5037 for (PagedSpace* space = spaces.next(); | |
5038 space != NULL; | |
5039 space = spaces.next()) { | |
5040 space->EmptyAllocationInfo(); | |
5041 } | |
5042 } | |
5043 | |
5044 | |
5045 V8_DECLARE_ONCE(initialize_gc_once); | |
5046 | |
5047 static void InitializeGCOnce() { | |
5048 InitializeScavengingVisitorsTables(); | |
5049 NewSpaceScavenger::Initialize(); | |
5050 MarkCompactCollector::Initialize(); | |
5051 } | |
5052 | |
5053 | |
5054 bool Heap::SetUp() { | |
5055 #ifdef DEBUG | |
5056 allocation_timeout_ = FLAG_gc_interval; | |
5057 #endif | |
5058 | |
5059 // Initialize heap spaces and initial maps and objects. Whenever something | |
5060 // goes wrong, just return false. The caller should check the results and | |
5061 // call Heap::TearDown() to release allocated memory. | |
5062 // | |
5063 // If the heap is not yet configured (e.g. through the API), configure it. | |
5064 // Configuration is based on the flags new-space-size (really the semispace | |
5065 // size) and old-space-size if set or the initial values of semispace_size_ | |
5066 // and old_generation_size_ otherwise. | |
5067 if (!configured_) { | |
5068 if (!ConfigureHeapDefault()) return false; | |
5069 } | |
5070 | |
5071 base::CallOnce(&initialize_gc_once, &InitializeGCOnce); | |
5072 | |
5073 MarkMapPointersAsEncoded(false); | |
5074 | |
5075 // Set up memory allocator. | |
5076 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize())) | |
5077 return false; | |
5078 | |
5079 // Set up new space. | |
5080 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) { | |
5081 return false; | |
5082 } | |
5083 new_space_top_after_last_gc_ = new_space()->top(); | |
5084 | |
5085 // Initialize old pointer space. | |
5086 old_pointer_space_ = | |
5087 new OldSpace(this, | |
5088 max_old_generation_size_, | |
5089 OLD_POINTER_SPACE, | |
5090 NOT_EXECUTABLE); | |
5091 if (old_pointer_space_ == NULL) return false; | |
5092 if (!old_pointer_space_->SetUp()) return false; | |
5093 | |
5094 // Initialize old data space. | |
5095 old_data_space_ = | |
5096 new OldSpace(this, | |
5097 max_old_generation_size_, | |
5098 OLD_DATA_SPACE, | |
5099 NOT_EXECUTABLE); | |
5100 if (old_data_space_ == NULL) return false; | |
5101 if (!old_data_space_->SetUp()) return false; | |
5102 | |
5103 if (!isolate_->code_range()->SetUp(code_range_size_)) return false; | |
5104 | |
5105 // Initialize the code space, set its maximum capacity to the old | |
5106 // generation size. It needs executable memory. | |
5107 code_space_ = | |
5108 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); | |
5109 if (code_space_ == NULL) return false; | |
5110 if (!code_space_->SetUp()) return false; | |
5111 | |
5112 // Initialize map space. | |
5113 map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE); | |
5114 if (map_space_ == NULL) return false; | |
5115 if (!map_space_->SetUp()) return false; | |
5116 | |
5117 // Initialize simple cell space. | |
5118 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); | |
5119 if (cell_space_ == NULL) return false; | |
5120 if (!cell_space_->SetUp()) return false; | |
5121 | |
5122 // Initialize global property cell space. | |
5123 property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_, | |
5124 PROPERTY_CELL_SPACE); | |
5125 if (property_cell_space_ == NULL) return false; | |
5126 if (!property_cell_space_->SetUp()) return false; | |
5127 | |
5128 // The large object code space may contain code or data. We set the memory | |
5129 // to be non-executable here for safety, but this means we need to enable it | |
5130 // explicitly when allocating large code objects. | |
5131 lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE); | |
5132 if (lo_space_ == NULL) return false; | |
5133 if (!lo_space_->SetUp()) return false; | |
5134 | |
5135 // Set up the seed that is used to randomize the string hash function. | |
5136 DCHECK(hash_seed() == 0); | |
5137 if (FLAG_randomize_hashes) { | |
5138 if (FLAG_hash_seed == 0) { | |
5139 int rnd = isolate()->random_number_generator()->NextInt(); | |
5140 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask)); | |
5141 } else { | |
5142 set_hash_seed(Smi::FromInt(FLAG_hash_seed)); | |
5143 } | |
5144 } | |
5145 | |
5146 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); | |
5147 LOG(isolate_, IntPtrTEvent("heap-available", Available())); | |
5148 | |
5149 store_buffer()->SetUp(); | |
5150 | |
5151 mark_compact_collector()->SetUp(); | |
5152 | |
5153 return true; | |
5154 } | |
5155 | |
5156 | |
5157 bool Heap::CreateHeapObjects() { | |
5158 // Create initial maps. | |
5159 if (!CreateInitialMaps()) return false; | |
5160 CreateApiObjects(); | |
5161 | |
5162 // Create initial objects | |
5163 CreateInitialObjects(); | |
5164 CHECK_EQ(0, gc_count_); | |
5165 | |
5166 set_native_contexts_list(undefined_value()); | |
5167 set_array_buffers_list(undefined_value()); | |
5168 set_allocation_sites_list(undefined_value()); | |
5169 weak_object_to_code_table_ = undefined_value(); | |
5170 return true; | |
5171 } | |
5172 | |
5173 | |
5174 void Heap::SetStackLimits() { | |
5175 DCHECK(isolate_ != NULL); | |
5176 DCHECK(isolate_ == isolate()); | |
5177 // On 64 bit machines, pointers are generally out of range of Smis. We write | |
5178 // something that looks like an out of range Smi to the GC. | |
5179 | |
5180 // Set up the special root array entries containing the stack limits. | |
5181 // These are actually addresses, but the tag makes the GC ignore it. | |
5182 roots_[kStackLimitRootIndex] = | |
5183 reinterpret_cast<Object*>( | |
5184 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); | |
5185 roots_[kRealStackLimitRootIndex] = | |
5186 reinterpret_cast<Object*>( | |
5187 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag); | |
5188 } | |
5189 | |
5190 | |
5191 void Heap::TearDown() { | |
5192 #ifdef VERIFY_HEAP | |
5193 if (FLAG_verify_heap) { | |
5194 Verify(); | |
5195 } | |
5196 #endif | |
5197 | |
5198 UpdateMaximumCommitted(); | |
5199 | |
5200 if (FLAG_print_cumulative_gc_stat) { | |
5201 PrintF("\n"); | |
5202 PrintF("gc_count=%d ", gc_count_); | |
5203 PrintF("mark_sweep_count=%d ", ms_count_); | |
5204 PrintF("max_gc_pause=%.1f ", get_max_gc_pause()); | |
5205 PrintF("total_gc_time=%.1f ", total_gc_time_ms_); | |
5206 PrintF("min_in_mutator=%.1f ", get_min_in_mutator()); | |
5207 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", | |
5208 get_max_alive_after_gc()); | |
5209 PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration()); | |
5210 PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration()); | |
5211 PrintF("\n\n"); | |
5212 } | |
5213 | |
5214 if (FLAG_print_max_heap_committed) { | |
5215 PrintF("\n"); | |
5216 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ", | |
5217 MaximumCommittedMemory()); | |
5218 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ", | |
5219 new_space_.MaximumCommittedMemory()); | |
5220 PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ", | |
5221 old_data_space_->MaximumCommittedMemory()); | |
5222 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", | |
5223 old_pointer_space_->MaximumCommittedMemory()); | |
5224 PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ", | |
5225 old_pointer_space_->MaximumCommittedMemory()); | |
5226 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ", | |
5227 code_space_->MaximumCommittedMemory()); | |
5228 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ", | |
5229 map_space_->MaximumCommittedMemory()); | |
5230 PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ", | |
5231 cell_space_->MaximumCommittedMemory()); | |
5232 PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ", | |
5233 property_cell_space_->MaximumCommittedMemory()); | |
5234 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ", | |
5235 lo_space_->MaximumCommittedMemory()); | |
5236 PrintF("\n\n"); | |
5237 } | |
5238 | |
5239 if (FLAG_verify_predictable) { | |
5240 PrintAlloctionsHash(); | |
5241 } | |
5242 | |
5243 TearDownArrayBuffers(); | |
5244 | |
5245 isolate_->global_handles()->TearDown(); | |
5246 | |
5247 external_string_table_.TearDown(); | |
5248 | |
5249 mark_compact_collector()->TearDown(); | |
5250 | |
5251 new_space_.TearDown(); | |
5252 | |
5253 if (old_pointer_space_ != NULL) { | |
5254 old_pointer_space_->TearDown(); | |
5255 delete old_pointer_space_; | |
5256 old_pointer_space_ = NULL; | |
5257 } | |
5258 | |
5259 if (old_data_space_ != NULL) { | |
5260 old_data_space_->TearDown(); | |
5261 delete old_data_space_; | |
5262 old_data_space_ = NULL; | |
5263 } | |
5264 | |
5265 if (code_space_ != NULL) { | |
5266 code_space_->TearDown(); | |
5267 delete code_space_; | |
5268 code_space_ = NULL; | |
5269 } | |
5270 | |
5271 if (map_space_ != NULL) { | |
5272 map_space_->TearDown(); | |
5273 delete map_space_; | |
5274 map_space_ = NULL; | |
5275 } | |
5276 | |
5277 if (cell_space_ != NULL) { | |
5278 cell_space_->TearDown(); | |
5279 delete cell_space_; | |
5280 cell_space_ = NULL; | |
5281 } | |
5282 | |
5283 if (property_cell_space_ != NULL) { | |
5284 property_cell_space_->TearDown(); | |
5285 delete property_cell_space_; | |
5286 property_cell_space_ = NULL; | |
5287 } | |
5288 | |
5289 if (lo_space_ != NULL) { | |
5290 lo_space_->TearDown(); | |
5291 delete lo_space_; | |
5292 lo_space_ = NULL; | |
5293 } | |
5294 | |
5295 store_buffer()->TearDown(); | |
5296 incremental_marking()->TearDown(); | |
5297 | |
5298 isolate_->memory_allocator()->TearDown(); | |
5299 } | |
5300 | |
5301 | |
5302 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, | |
5303 GCType gc_type, | |
5304 bool pass_isolate) { | |
5305 DCHECK(callback != NULL); | |
5306 GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); | |
5307 DCHECK(!gc_prologue_callbacks_.Contains(pair)); | |
5308 return gc_prologue_callbacks_.Add(pair); | |
5309 } | |
5310 | |
5311 | |
5312 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { | |
5313 DCHECK(callback != NULL); | |
5314 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { | |
5315 if (gc_prologue_callbacks_[i].callback == callback) { | |
5316 gc_prologue_callbacks_.Remove(i); | |
5317 return; | |
5318 } | |
5319 } | |
5320 UNREACHABLE(); | |
5321 } | |
5322 | |
5323 | |
5324 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, | |
5325 GCType gc_type, | |
5326 bool pass_isolate) { | |
5327 DCHECK(callback != NULL); | |
5328 GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); | |
5329 DCHECK(!gc_epilogue_callbacks_.Contains(pair)); | |
5330 return gc_epilogue_callbacks_.Add(pair); | |
5331 } | |
5332 | |
5333 | |
5334 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { | |
5335 DCHECK(callback != NULL); | |
5336 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { | |
5337 if (gc_epilogue_callbacks_[i].callback == callback) { | |
5338 gc_epilogue_callbacks_.Remove(i); | |
5339 return; | |
5340 } | |
5341 } | |
5342 UNREACHABLE(); | |
5343 } | |
5344 | |
5345 | |
5346 // TODO(ishell): Find a better place for this. | |
5347 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj, | |
5348 Handle<DependentCode> dep) { | |
5349 DCHECK(!InNewSpace(*obj)); | |
5350 DCHECK(!InNewSpace(*dep)); | |
5351 // This handle scope keeps the table handle local to this function, which | |
5352 // allows us to safely skip write barriers in table update operations. | |
5353 HandleScope scope(isolate()); | |
5354 Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_), | |
5355 isolate()); | |
5356 table = WeakHashTable::Put(table, obj, dep); | |
5357 | |
5358 if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) { | |
5359 WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); | |
5360 } | |
5361 set_weak_object_to_code_table(*table); | |
5362 DCHECK_EQ(*dep, table->Lookup(obj)); | |
5363 } | |
5364 | |
5365 | |
5366 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) { | |
5367 Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); | |
5368 if (dep->IsDependentCode()) return DependentCode::cast(dep); | |
5369 return DependentCode::cast(empty_fixed_array()); | |
5370 } | |
5371 | |
5372 | |
5373 void Heap::EnsureWeakObjectToCodeTable() { | |
5374 if (!weak_object_to_code_table()->IsHashTable()) { | |
5375 set_weak_object_to_code_table(*WeakHashTable::New( | |
5376 isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED)); | |
5377 } | |
5378 } | |
5379 | |
5380 | |
5381 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { | |
5382 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot); | |
5383 } | |
5384 | |
5385 #ifdef DEBUG | |
5386 | |
5387 class PrintHandleVisitor: public ObjectVisitor { | |
5388 public: | |
5389 void VisitPointers(Object** start, Object** end) { | |
5390 for (Object** p = start; p < end; p++) | |
5391 PrintF(" handle %p to %p\n", | |
5392 reinterpret_cast<void*>(p), | |
5393 reinterpret_cast<void*>(*p)); | |
5394 } | |
5395 }; | |
5396 | |
5397 | |
5398 void Heap::PrintHandles() { | |
5399 PrintF("Handles:\n"); | |
5400 PrintHandleVisitor v; | |
5401 isolate_->handle_scope_implementer()->Iterate(&v); | |
5402 } | |
5403 | |
5404 #endif | |
5405 | |
5406 | |
5407 Space* AllSpaces::next() { | |
5408 switch (counter_++) { | |
5409 case NEW_SPACE: | |
5410 return heap_->new_space(); | |
5411 case OLD_POINTER_SPACE: | |
5412 return heap_->old_pointer_space(); | |
5413 case OLD_DATA_SPACE: | |
5414 return heap_->old_data_space(); | |
5415 case CODE_SPACE: | |
5416 return heap_->code_space(); | |
5417 case MAP_SPACE: | |
5418 return heap_->map_space(); | |
5419 case CELL_SPACE: | |
5420 return heap_->cell_space(); | |
5421 case PROPERTY_CELL_SPACE: | |
5422 return heap_->property_cell_space(); | |
5423 case LO_SPACE: | |
5424 return heap_->lo_space(); | |
5425 default: | |
5426 return NULL; | |
5427 } | |
5428 } | |
5429 | |
5430 | |
5431 PagedSpace* PagedSpaces::next() { | |
5432 switch (counter_++) { | |
5433 case OLD_POINTER_SPACE: | |
5434 return heap_->old_pointer_space(); | |
5435 case OLD_DATA_SPACE: | |
5436 return heap_->old_data_space(); | |
5437 case CODE_SPACE: | |
5438 return heap_->code_space(); | |
5439 case MAP_SPACE: | |
5440 return heap_->map_space(); | |
5441 case CELL_SPACE: | |
5442 return heap_->cell_space(); | |
5443 case PROPERTY_CELL_SPACE: | |
5444 return heap_->property_cell_space(); | |
5445 default: | |
5446 return NULL; | |
5447 } | |
5448 } | |
5449 | |
5450 | |
5451 | |
5452 OldSpace* OldSpaces::next() { | |
5453 switch (counter_++) { | |
5454 case OLD_POINTER_SPACE: | |
5455 return heap_->old_pointer_space(); | |
5456 case OLD_DATA_SPACE: | |
5457 return heap_->old_data_space(); | |
5458 case CODE_SPACE: | |
5459 return heap_->code_space(); | |
5460 default: | |
5461 return NULL; | |
5462 } | |
5463 } | |
5464 | |
5465 | |
5466 SpaceIterator::SpaceIterator(Heap* heap) | |
5467 : heap_(heap), | |
5468 current_space_(FIRST_SPACE), | |
5469 iterator_(NULL), | |
5470 size_func_(NULL) { | |
5471 } | |
5472 | |
5473 | |
5474 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func) | |
5475 : heap_(heap), | |
5476 current_space_(FIRST_SPACE), | |
5477 iterator_(NULL), | |
5478 size_func_(size_func) { | |
5479 } | |
5480 | |
5481 | |
5482 SpaceIterator::~SpaceIterator() { | |
5483 // Delete active iterator if any. | |
5484 delete iterator_; | |
5485 } | |
5486 | |
5487 | |
5488 bool SpaceIterator::has_next() { | |
5489 // Iterate until no more spaces. | |
5490 return current_space_ != LAST_SPACE; | |
5491 } | |
5492 | |
5493 | |
5494 ObjectIterator* SpaceIterator::next() { | |
5495 if (iterator_ != NULL) { | |
5496 delete iterator_; | |
5497 iterator_ = NULL; | |
5498 // Move to the next space | |
5499 current_space_++; | |
5500 if (current_space_ > LAST_SPACE) { | |
5501 return NULL; | |
5502 } | |
5503 } | |
5504 | |
5505 // Return iterator for the new current space. | |
5506 return CreateIterator(); | |
5507 } | |
5508 | |
5509 | |
5510 // Create an iterator for the space to iterate. | |
5511 ObjectIterator* SpaceIterator::CreateIterator() { | |
5512 DCHECK(iterator_ == NULL); | |
5513 | |
5514 switch (current_space_) { | |
5515 case NEW_SPACE: | |
5516 iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); | |
5517 break; | |
5518 case OLD_POINTER_SPACE: | |
5519 iterator_ = | |
5520 new HeapObjectIterator(heap_->old_pointer_space(), size_func_); | |
5521 break; | |
5522 case OLD_DATA_SPACE: | |
5523 iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_); | |
5524 break; | |
5525 case CODE_SPACE: | |
5526 iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_); | |
5527 break; | |
5528 case MAP_SPACE: | |
5529 iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_); | |
5530 break; | |
5531 case CELL_SPACE: | |
5532 iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_); | |
5533 break; | |
5534 case PROPERTY_CELL_SPACE: | |
5535 iterator_ = new HeapObjectIterator(heap_->property_cell_space(), | |
5536 size_func_); | |
5537 break; | |
5538 case LO_SPACE: | |
5539 iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); | |
5540 break; | |
5541 } | |
5542 | |
5543 // Return the newly allocated iterator; | |
5544 DCHECK(iterator_ != NULL); | |
5545 return iterator_; | |
5546 } | |
5547 | |
5548 | |
5549 class HeapObjectsFilter { | |
5550 public: | |
5551 virtual ~HeapObjectsFilter() {} | |
5552 virtual bool SkipObject(HeapObject* object) = 0; | |
5553 }; | |
5554 | |
5555 | |
5556 class UnreachableObjectsFilter : public HeapObjectsFilter { | |
5557 public: | |
5558 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) { | |
5559 MarkReachableObjects(); | |
5560 } | |
5561 | |
5562 ~UnreachableObjectsFilter() { | |
5563 heap_->mark_compact_collector()->ClearMarkbits(); | |
5564 } | |
5565 | |
5566 bool SkipObject(HeapObject* object) { | |
5567 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
5568 return !mark_bit.Get(); | |
5569 } | |
5570 | |
5571 private: | |
5572 class MarkingVisitor : public ObjectVisitor { | |
5573 public: | |
5574 MarkingVisitor() : marking_stack_(10) {} | |
5575 | |
5576 void VisitPointers(Object** start, Object** end) { | |
5577 for (Object** p = start; p < end; p++) { | |
5578 if (!(*p)->IsHeapObject()) continue; | |
5579 HeapObject* obj = HeapObject::cast(*p); | |
5580 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
5581 if (!mark_bit.Get()) { | |
5582 mark_bit.Set(); | |
5583 marking_stack_.Add(obj); | |
5584 } | |
5585 } | |
5586 } | |
5587 | |
5588 void TransitiveClosure() { | |
5589 while (!marking_stack_.is_empty()) { | |
5590 HeapObject* obj = marking_stack_.RemoveLast(); | |
5591 obj->Iterate(this); | |
5592 } | |
5593 } | |
5594 | |
5595 private: | |
5596 List<HeapObject*> marking_stack_; | |
5597 }; | |
5598 | |
5599 void MarkReachableObjects() { | |
5600 MarkingVisitor visitor; | |
5601 heap_->IterateRoots(&visitor, VISIT_ALL); | |
5602 visitor.TransitiveClosure(); | |
5603 } | |
5604 | |
5605 Heap* heap_; | |
5606 DisallowHeapAllocation no_allocation_; | |
5607 }; | |
5608 | |
5609 | |
5610 HeapIterator::HeapIterator(Heap* heap) | |
5611 : make_heap_iterable_helper_(heap), | |
5612 no_heap_allocation_(), | |
5613 heap_(heap), | |
5614 filtering_(HeapIterator::kNoFiltering), | |
5615 filter_(NULL) { | |
5616 Init(); | |
5617 } | |
5618 | |
5619 | |
5620 HeapIterator::HeapIterator(Heap* heap, | |
5621 HeapIterator::HeapObjectsFiltering filtering) | |
5622 : make_heap_iterable_helper_(heap), | |
5623 no_heap_allocation_(), | |
5624 heap_(heap), | |
5625 filtering_(filtering), | |
5626 filter_(NULL) { | |
5627 Init(); | |
5628 } | |
5629 | |
5630 | |
5631 HeapIterator::~HeapIterator() { | |
5632 Shutdown(); | |
5633 } | |
5634 | |
5635 | |
5636 void HeapIterator::Init() { | |
5637 // Start the iteration. | |
5638 space_iterator_ = new SpaceIterator(heap_); | |
5639 switch (filtering_) { | |
5640 case kFilterUnreachable: | |
5641 filter_ = new UnreachableObjectsFilter(heap_); | |
5642 break; | |
5643 default: | |
5644 break; | |
5645 } | |
5646 object_iterator_ = space_iterator_->next(); | |
5647 } | |
5648 | |
5649 | |
5650 void HeapIterator::Shutdown() { | |
5651 #ifdef DEBUG | |
5652 // Assert that in filtering mode we have iterated through all | |
5653 // objects. Otherwise, heap will be left in an inconsistent state. | |
5654 if (filtering_ != kNoFiltering) { | |
5655 DCHECK(object_iterator_ == NULL); | |
5656 } | |
5657 #endif | |
5658 // Make sure the last iterator is deallocated. | |
5659 delete space_iterator_; | |
5660 space_iterator_ = NULL; | |
5661 object_iterator_ = NULL; | |
5662 delete filter_; | |
5663 filter_ = NULL; | |
5664 } | |
5665 | |
5666 | |
5667 HeapObject* HeapIterator::next() { | |
5668 if (filter_ == NULL) return NextObject(); | |
5669 | |
5670 HeapObject* obj = NextObject(); | |
5671 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject(); | |
5672 return obj; | |
5673 } | |
5674 | |
5675 | |
5676 HeapObject* HeapIterator::NextObject() { | |
5677 // No iterator means we are done. | |
5678 if (object_iterator_ == NULL) return NULL; | |
5679 | |
5680 if (HeapObject* obj = object_iterator_->next_object()) { | |
5681 // If the current iterator has more objects we are fine. | |
5682 return obj; | |
5683 } else { | |
5684 // Go though the spaces looking for one that has objects. | |
5685 while (space_iterator_->has_next()) { | |
5686 object_iterator_ = space_iterator_->next(); | |
5687 if (HeapObject* obj = object_iterator_->next_object()) { | |
5688 return obj; | |
5689 } | |
5690 } | |
5691 } | |
5692 // Done with the last space. | |
5693 object_iterator_ = NULL; | |
5694 return NULL; | |
5695 } | |
5696 | |
5697 | |
5698 void HeapIterator::reset() { | |
5699 // Restart the iterator. | |
5700 Shutdown(); | |
5701 Init(); | |
5702 } | |
5703 | |
5704 | |
5705 #ifdef DEBUG | |
5706 | |
5707 Object* const PathTracer::kAnyGlobalObject = NULL; | |
5708 | |
5709 class PathTracer::MarkVisitor: public ObjectVisitor { | |
5710 public: | |
5711 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {} | |
5712 void VisitPointers(Object** start, Object** end) { | |
5713 // Scan all HeapObject pointers in [start, end) | |
5714 for (Object** p = start; !tracer_->found() && (p < end); p++) { | |
5715 if ((*p)->IsHeapObject()) | |
5716 tracer_->MarkRecursively(p, this); | |
5717 } | |
5718 } | |
5719 | |
5720 private: | |
5721 PathTracer* tracer_; | |
5722 }; | |
5723 | |
5724 | |
5725 class PathTracer::UnmarkVisitor: public ObjectVisitor { | |
5726 public: | |
5727 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {} | |
5728 void VisitPointers(Object** start, Object** end) { | |
5729 // Scan all HeapObject pointers in [start, end) | |
5730 for (Object** p = start; p < end; p++) { | |
5731 if ((*p)->IsHeapObject()) | |
5732 tracer_->UnmarkRecursively(p, this); | |
5733 } | |
5734 } | |
5735 | |
5736 private: | |
5737 PathTracer* tracer_; | |
5738 }; | |
5739 | |
5740 | |
5741 void PathTracer::VisitPointers(Object** start, Object** end) { | |
5742 bool done = ((what_to_find_ == FIND_FIRST) && found_target_); | |
5743 // Visit all HeapObject pointers in [start, end) | |
5744 for (Object** p = start; !done && (p < end); p++) { | |
5745 if ((*p)->IsHeapObject()) { | |
5746 TracePathFrom(p); | |
5747 done = ((what_to_find_ == FIND_FIRST) && found_target_); | |
5748 } | |
5749 } | |
5750 } | |
5751 | |
5752 | |
5753 void PathTracer::Reset() { | |
5754 found_target_ = false; | |
5755 object_stack_.Clear(); | |
5756 } | |
5757 | |
5758 | |
5759 void PathTracer::TracePathFrom(Object** root) { | |
5760 DCHECK((search_target_ == kAnyGlobalObject) || | |
5761 search_target_->IsHeapObject()); | |
5762 found_target_in_trace_ = false; | |
5763 Reset(); | |
5764 | |
5765 MarkVisitor mark_visitor(this); | |
5766 MarkRecursively(root, &mark_visitor); | |
5767 | |
5768 UnmarkVisitor unmark_visitor(this); | |
5769 UnmarkRecursively(root, &unmark_visitor); | |
5770 | |
5771 ProcessResults(); | |
5772 } | |
5773 | |
5774 | |
5775 static bool SafeIsNativeContext(HeapObject* obj) { | |
5776 return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map(); | |
5777 } | |
5778 | |
5779 | |
5780 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { | |
5781 if (!(*p)->IsHeapObject()) return; | |
5782 | |
5783 HeapObject* obj = HeapObject::cast(*p); | |
5784 | |
5785 MapWord map_word = obj->map_word(); | |
5786 if (!map_word.ToMap()->IsHeapObject()) return; // visited before | |
5787 | |
5788 if (found_target_in_trace_) return; // stop if target found | |
5789 object_stack_.Add(obj); | |
5790 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || | |
5791 (obj == search_target_)) { | |
5792 found_target_in_trace_ = true; | |
5793 found_target_ = true; | |
5794 return; | |
5795 } | |
5796 | |
5797 bool is_native_context = SafeIsNativeContext(obj); | |
5798 | |
5799 // not visited yet | |
5800 Map* map = Map::cast(map_word.ToMap()); | |
5801 | |
5802 MapWord marked_map_word = | |
5803 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag); | |
5804 obj->set_map_word(marked_map_word); | |
5805 | |
5806 // Scan the object body. | |
5807 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) { | |
5808 // This is specialized to scan Context's properly. | |
5809 Object** start = reinterpret_cast<Object**>(obj->address() + | |
5810 Context::kHeaderSize); | |
5811 Object** end = reinterpret_cast<Object**>(obj->address() + | |
5812 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize); | |
5813 mark_visitor->VisitPointers(start, end); | |
5814 } else { | |
5815 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor); | |
5816 } | |
5817 | |
5818 // Scan the map after the body because the body is a lot more interesting | |
5819 // when doing leak detection. | |
5820 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor); | |
5821 | |
5822 if (!found_target_in_trace_) { // don't pop if found the target | |
5823 object_stack_.RemoveLast(); | |
5824 } | |
5825 } | |
5826 | |
5827 | |
5828 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) { | |
5829 if (!(*p)->IsHeapObject()) return; | |
5830 | |
5831 HeapObject* obj = HeapObject::cast(*p); | |
5832 | |
5833 MapWord map_word = obj->map_word(); | |
5834 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already | |
5835 | |
5836 MapWord unmarked_map_word = | |
5837 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag); | |
5838 obj->set_map_word(unmarked_map_word); | |
5839 | |
5840 Map* map = Map::cast(unmarked_map_word.ToMap()); | |
5841 | |
5842 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor); | |
5843 | |
5844 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor); | |
5845 } | |
5846 | |
5847 | |
5848 void PathTracer::ProcessResults() { | |
5849 if (found_target_) { | |
5850 OFStream os(stdout); | |
5851 os << "=====================================\n" | |
5852 << "==== Path to object ====\n" | |
5853 << "=====================================\n\n"; | |
5854 | |
5855 DCHECK(!object_stack_.is_empty()); | |
5856 for (int i = 0; i < object_stack_.length(); i++) { | |
5857 if (i > 0) os << "\n |\n |\n V\n\n"; | |
5858 object_stack_[i]->Print(os); | |
5859 } | |
5860 os << "=====================================\n"; | |
5861 } | |
5862 } | |
5863 | |
5864 | |
5865 // Triggers a depth-first traversal of reachable objects from one | |
5866 // given root object and finds a path to a specific heap object and | |
5867 // prints it. | |
5868 void Heap::TracePathToObjectFrom(Object* target, Object* root) { | |
5869 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); | |
5870 tracer.VisitPointer(&root); | |
5871 } | |
5872 | |
5873 | |
5874 // Triggers a depth-first traversal of reachable objects from roots | |
5875 // and finds a path to a specific heap object and prints it. | |
5876 void Heap::TracePathToObject(Object* target) { | |
5877 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL); | |
5878 IterateRoots(&tracer, VISIT_ONLY_STRONG); | |
5879 } | |
5880 | |
5881 | |
5882 // Triggers a depth-first traversal of reachable objects from roots | |
5883 // and finds a path to any global object and prints it. Useful for | |
5884 // determining the source for leaks of global objects. | |
5885 void Heap::TracePathToGlobal() { | |
5886 PathTracer tracer(PathTracer::kAnyGlobalObject, | |
5887 PathTracer::FIND_ALL, | |
5888 VISIT_ALL); | |
5889 IterateRoots(&tracer, VISIT_ONLY_STRONG); | |
5890 } | |
5891 #endif | |
5892 | |
5893 | |
5894 void Heap::UpdateCumulativeGCStatistics(double duration, | |
5895 double spent_in_mutator, | |
5896 double marking_time) { | |
5897 if (FLAG_print_cumulative_gc_stat) { | |
5898 total_gc_time_ms_ += duration; | |
5899 max_gc_pause_ = Max(max_gc_pause_, duration); | |
5900 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects()); | |
5901 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator); | |
5902 } else if (FLAG_trace_gc_verbose) { | |
5903 total_gc_time_ms_ += duration; | |
5904 } | |
5905 | |
5906 marking_time_ += marking_time; | |
5907 } | |
5908 | |
5909 | |
5910 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) { | |
5911 DisallowHeapAllocation no_gc; | |
5912 // Uses only lower 32 bits if pointers are larger. | |
5913 uintptr_t addr_hash = | |
5914 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift; | |
5915 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); | |
5916 } | |
5917 | |
5918 | |
5919 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) { | |
5920 DisallowHeapAllocation no_gc; | |
5921 int index = (Hash(map, name) & kHashMask); | |
5922 for (int i = 0; i < kEntriesPerBucket; i++) { | |
5923 Key& key = keys_[index + i]; | |
5924 if ((key.map == *map) && key.name->Equals(*name)) { | |
5925 return field_offsets_[index + i]; | |
5926 } | |
5927 } | |
5928 return kNotFound; | |
5929 } | |
5930 | |
5931 | |
5932 void KeyedLookupCache::Update(Handle<Map> map, | |
5933 Handle<Name> name, | |
5934 int field_offset) { | |
5935 DisallowHeapAllocation no_gc; | |
5936 if (!name->IsUniqueName()) { | |
5937 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(), | |
5938 Handle<String>::cast(name)). | |
5939 ToHandle(&name)) { | |
5940 return; | |
5941 } | |
5942 } | |
5943 // This cache is cleared only between mark compact passes, so we expect the | |
5944 // cache to only contain old space names. | |
5945 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name)); | |
5946 | |
5947 int index = (Hash(map, name) & kHashMask); | |
5948 // After a GC there will be free slots, so we use them in order (this may | |
5949 // help to get the most frequently used one in position 0). | |
5950 for (int i = 0; i< kEntriesPerBucket; i++) { | |
5951 Key& key = keys_[index]; | |
5952 Object* free_entry_indicator = NULL; | |
5953 if (key.map == free_entry_indicator) { | |
5954 key.map = *map; | |
5955 key.name = *name; | |
5956 field_offsets_[index + i] = field_offset; | |
5957 return; | |
5958 } | |
5959 } | |
5960 // No free entry found in this bucket, so we move them all down one and | |
5961 // put the new entry at position zero. | |
5962 for (int i = kEntriesPerBucket - 1; i > 0; i--) { | |
5963 Key& key = keys_[index + i]; | |
5964 Key& key2 = keys_[index + i - 1]; | |
5965 key = key2; | |
5966 field_offsets_[index + i] = field_offsets_[index + i - 1]; | |
5967 } | |
5968 | |
5969 // Write the new first entry. | |
5970 Key& key = keys_[index]; | |
5971 key.map = *map; | |
5972 key.name = *name; | |
5973 field_offsets_[index] = field_offset; | |
5974 } | |
5975 | |
5976 | |
5977 void KeyedLookupCache::Clear() { | |
5978 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; | |
5979 } | |
5980 | |
5981 | |
5982 void DescriptorLookupCache::Clear() { | |
5983 for (int index = 0; index < kLength; index++) keys_[index].source = NULL; | |
5984 } | |
5985 | |
5986 | |
5987 void ExternalStringTable::CleanUp() { | |
5988 int last = 0; | |
5989 for (int i = 0; i < new_space_strings_.length(); ++i) { | |
5990 if (new_space_strings_[i] == heap_->the_hole_value()) { | |
5991 continue; | |
5992 } | |
5993 DCHECK(new_space_strings_[i]->IsExternalString()); | |
5994 if (heap_->InNewSpace(new_space_strings_[i])) { | |
5995 new_space_strings_[last++] = new_space_strings_[i]; | |
5996 } else { | |
5997 old_space_strings_.Add(new_space_strings_[i]); | |
5998 } | |
5999 } | |
6000 new_space_strings_.Rewind(last); | |
6001 new_space_strings_.Trim(); | |
6002 | |
6003 last = 0; | |
6004 for (int i = 0; i < old_space_strings_.length(); ++i) { | |
6005 if (old_space_strings_[i] == heap_->the_hole_value()) { | |
6006 continue; | |
6007 } | |
6008 DCHECK(old_space_strings_[i]->IsExternalString()); | |
6009 DCHECK(!heap_->InNewSpace(old_space_strings_[i])); | |
6010 old_space_strings_[last++] = old_space_strings_[i]; | |
6011 } | |
6012 old_space_strings_.Rewind(last); | |
6013 old_space_strings_.Trim(); | |
6014 #ifdef VERIFY_HEAP | |
6015 if (FLAG_verify_heap) { | |
6016 Verify(); | |
6017 } | |
6018 #endif | |
6019 } | |
6020 | |
6021 | |
6022 void ExternalStringTable::TearDown() { | |
6023 for (int i = 0; i < new_space_strings_.length(); ++i) { | |
6024 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i])); | |
6025 } | |
6026 new_space_strings_.Free(); | |
6027 for (int i = 0; i < old_space_strings_.length(); ++i) { | |
6028 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i])); | |
6029 } | |
6030 old_space_strings_.Free(); | |
6031 } | |
6032 | |
6033 | |
6034 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { | |
6035 chunk->set_next_chunk(chunks_queued_for_free_); | |
6036 chunks_queued_for_free_ = chunk; | |
6037 } | |
6038 | |
6039 | |
6040 void Heap::FreeQueuedChunks() { | |
6041 if (chunks_queued_for_free_ == NULL) return; | |
6042 MemoryChunk* next; | |
6043 MemoryChunk* chunk; | |
6044 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { | |
6045 next = chunk->next_chunk(); | |
6046 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); | |
6047 | |
6048 if (chunk->owner()->identity() == LO_SPACE) { | |
6049 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. | |
6050 // If FromAnyPointerAddress encounters a slot that belongs to a large | |
6051 // chunk queued for deletion it will fail to find the chunk because | |
6052 // it try to perform a search in the list of pages owned by of the large | |
6053 // object space and queued chunks were detached from that list. | |
6054 // To work around this we split large chunk into normal kPageSize aligned | |
6055 // pieces and initialize size, owner and flags field of every piece. | |
6056 // If FromAnyPointerAddress encounters a slot that belongs to one of | |
6057 // these smaller pieces it will treat it as a slot on a normal Page. | |
6058 Address chunk_end = chunk->address() + chunk->size(); | |
6059 MemoryChunk* inner = MemoryChunk::FromAddress( | |
6060 chunk->address() + Page::kPageSize); | |
6061 MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1); | |
6062 while (inner <= inner_last) { | |
6063 // Size of a large chunk is always a multiple of | |
6064 // OS::AllocateAlignment() so there is always | |
6065 // enough space for a fake MemoryChunk header. | |
6066 Address area_end = Min(inner->address() + Page::kPageSize, chunk_end); | |
6067 // Guard against overflow. | |
6068 if (area_end < inner->address()) area_end = chunk_end; | |
6069 inner->SetArea(inner->address(), area_end); | |
6070 inner->set_size(Page::kPageSize); | |
6071 inner->set_owner(lo_space()); | |
6072 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); | |
6073 inner = MemoryChunk::FromAddress( | |
6074 inner->address() + Page::kPageSize); | |
6075 } | |
6076 } | |
6077 } | |
6078 isolate_->heap()->store_buffer()->Compact(); | |
6079 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); | |
6080 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { | |
6081 next = chunk->next_chunk(); | |
6082 isolate_->memory_allocator()->Free(chunk); | |
6083 } | |
6084 chunks_queued_for_free_ = NULL; | |
6085 } | |
6086 | |
6087 | |
6088 void Heap::RememberUnmappedPage(Address page, bool compacted) { | |
6089 uintptr_t p = reinterpret_cast<uintptr_t>(page); | |
6090 // Tag the page pointer to make it findable in the dump file. | |
6091 if (compacted) { | |
6092 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared. | |
6093 } else { | |
6094 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died. | |
6095 } | |
6096 remembered_unmapped_pages_[remembered_unmapped_pages_index_] = | |
6097 reinterpret_cast<Address>(p); | |
6098 remembered_unmapped_pages_index_++; | |
6099 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages; | |
6100 } | |
6101 | |
6102 | |
6103 void Heap::ClearObjectStats(bool clear_last_time_stats) { | |
6104 memset(object_counts_, 0, sizeof(object_counts_)); | |
6105 memset(object_sizes_, 0, sizeof(object_sizes_)); | |
6106 if (clear_last_time_stats) { | |
6107 memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_)); | |
6108 memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_)); | |
6109 } | |
6110 } | |
6111 | |
6112 | |
6113 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER; | |
6114 | |
6115 | |
6116 void Heap::CheckpointObjectStats() { | |
6117 base::LockGuard<base::Mutex> lock_guard( | |
6118 checkpoint_object_stats_mutex.Pointer()); | |
6119 Counters* counters = isolate()->counters(); | |
6120 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ | |
6121 counters->count_of_##name()->Increment( \ | |
6122 static_cast<int>(object_counts_[name])); \ | |
6123 counters->count_of_##name()->Decrement( \ | |
6124 static_cast<int>(object_counts_last_time_[name])); \ | |
6125 counters->size_of_##name()->Increment( \ | |
6126 static_cast<int>(object_sizes_[name])); \ | |
6127 counters->size_of_##name()->Decrement( \ | |
6128 static_cast<int>(object_sizes_last_time_[name])); | |
6129 INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) | |
6130 #undef ADJUST_LAST_TIME_OBJECT_COUNT | |
6131 int index; | |
6132 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ | |
6133 index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \ | |
6134 counters->count_of_CODE_TYPE_##name()->Increment( \ | |
6135 static_cast<int>(object_counts_[index])); \ | |
6136 counters->count_of_CODE_TYPE_##name()->Decrement( \ | |
6137 static_cast<int>(object_counts_last_time_[index])); \ | |
6138 counters->size_of_CODE_TYPE_##name()->Increment( \ | |
6139 static_cast<int>(object_sizes_[index])); \ | |
6140 counters->size_of_CODE_TYPE_##name()->Decrement( \ | |
6141 static_cast<int>(object_sizes_last_time_[index])); | |
6142 CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) | |
6143 #undef ADJUST_LAST_TIME_OBJECT_COUNT | |
6144 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ | |
6145 index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \ | |
6146 counters->count_of_FIXED_ARRAY_##name()->Increment( \ | |
6147 static_cast<int>(object_counts_[index])); \ | |
6148 counters->count_of_FIXED_ARRAY_##name()->Decrement( \ | |
6149 static_cast<int>(object_counts_last_time_[index])); \ | |
6150 counters->size_of_FIXED_ARRAY_##name()->Increment( \ | |
6151 static_cast<int>(object_sizes_[index])); \ | |
6152 counters->size_of_FIXED_ARRAY_##name()->Decrement( \ | |
6153 static_cast<int>(object_sizes_last_time_[index])); | |
6154 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) | |
6155 #undef ADJUST_LAST_TIME_OBJECT_COUNT | |
6156 #define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ | |
6157 index = \ | |
6158 FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \ | |
6159 counters->count_of_CODE_AGE_##name()->Increment( \ | |
6160 static_cast<int>(object_counts_[index])); \ | |
6161 counters->count_of_CODE_AGE_##name()->Decrement( \ | |
6162 static_cast<int>(object_counts_last_time_[index])); \ | |
6163 counters->size_of_CODE_AGE_##name()->Increment( \ | |
6164 static_cast<int>(object_sizes_[index])); \ | |
6165 counters->size_of_CODE_AGE_##name()->Decrement( \ | |
6166 static_cast<int>(object_sizes_last_time_[index])); | |
6167 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | |
6168 #undef ADJUST_LAST_TIME_OBJECT_COUNT | |
6169 | |
6170 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | |
6171 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | |
6172 ClearObjectStats(); | |
6173 } | |
6174 | |
6175 } } // namespace v8::internal | |
OLD | NEW |