Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(117)

Side by Side Diff: src/heap.cc

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 18 matching lines...) Expand all
29 29
30 #include "accessors.h" 30 #include "accessors.h"
31 #include "api.h" 31 #include "api.h"
32 #include "bootstrapper.h" 32 #include "bootstrapper.h"
33 #include "codegen.h" 33 #include "codegen.h"
34 #include "compilation-cache.h" 34 #include "compilation-cache.h"
35 #include "debug.h" 35 #include "debug.h"
36 #include "deoptimizer.h" 36 #include "deoptimizer.h"
37 #include "global-handles.h" 37 #include "global-handles.h"
38 #include "heap-profiler.h" 38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h" 40 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h" 41 #include "mark-compact.h"
41 #include "natives.h" 42 #include "natives.h"
42 #include "objects-visiting.h" 43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
43 #include "runtime-profiler.h" 45 #include "runtime-profiler.h"
44 #include "scopeinfo.h" 46 #include "scopeinfo.h"
45 #include "snapshot.h" 47 #include "snapshot.h"
48 #include "store-buffer.h"
46 #include "v8threads.h" 49 #include "v8threads.h"
47 #include "vm-state-inl.h" 50 #include "vm-state-inl.h"
48 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP 51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
49 #include "regexp-macro-assembler.h" 52 #include "regexp-macro-assembler.h"
50 #include "arm/regexp-macro-assembler-arm.h" 53 #include "arm/regexp-macro-assembler-arm.h"
51 #endif 54 #endif
52 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP 55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h" 56 #include "regexp-macro-assembler.h"
54 #include "mips/regexp-macro-assembler-mips.h" 57 #include "mips/regexp-macro-assembler-mips.h"
55 #endif 58 #endif
56 59
57 namespace v8 { 60 namespace v8 {
58 namespace internal { 61 namespace internal {
59 62
60 63
61 static const intptr_t kMinimumPromotionLimit = 2 * MB;
62 static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
64
65 static Mutex* gc_initializer_mutex = OS::CreateMutex(); 64 static Mutex* gc_initializer_mutex = OS::CreateMutex();
66 65
67 66
68 Heap::Heap() 67 Heap::Heap()
69 : isolate_(NULL), 68 : isolate_(NULL),
70 // semispace_size_ should be a power of 2 and old_generation_size_ should be 69 // semispace_size_ should be a power of 2 and old_generation_size_ should be
71 // a multiple of Page::kPageSize. 70 // a multiple of Page::kPageSize.
72 #if defined(ANDROID) 71 #if defined(ANDROID)
73 reserved_semispace_size_(2*MB), 72 #define LUMP_OF_MEMORY (128 * KB)
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0), 73 code_range_size_(0),
79 #elif defined(V8_TARGET_ARCH_X64) 74 #elif defined(V8_TARGET_ARCH_X64)
80 reserved_semispace_size_(16*MB), 75 #define LUMP_OF_MEMORY (2 * MB)
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1400*MB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB), 76 code_range_size_(512*MB),
86 #else 77 #else
87 reserved_semispace_size_(8*MB), 78 #define LUMP_OF_MEMORY MB
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(700*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0), 79 code_range_size_(0),
93 #endif 80 #endif
81 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83 initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_old_generation_size_(1400ul * LUMP_OF_MEMORY),
85 max_executable_size_(256l * LUMP_OF_MEMORY),
86
94 // Variables set based on semispace_size_ and old_generation_size_ in 87 // Variables set based on semispace_size_ and old_generation_size_ in
95 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) 88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
96 // Will be 4 * reserved_semispace_size_ to ensure that young 89 // Will be 4 * reserved_semispace_size_ to ensure that young
97 // generation can be aligned to its size. 90 // generation can be aligned to its size.
98 survived_since_last_expansion_(0), 91 survived_since_last_expansion_(0),
99 sweep_generation_(0), 92 sweep_generation_(0),
100 always_allocate_scope_depth_(0), 93 always_allocate_scope_depth_(0),
101 linear_allocation_scope_depth_(0), 94 linear_allocation_scope_depth_(0),
102 contexts_disposed_(0), 95 contexts_disposed_(0),
96 scan_on_scavenge_pages_(0),
103 new_space_(this), 97 new_space_(this),
104 old_pointer_space_(NULL), 98 old_pointer_space_(NULL),
105 old_data_space_(NULL), 99 old_data_space_(NULL),
106 code_space_(NULL), 100 code_space_(NULL),
107 map_space_(NULL), 101 map_space_(NULL),
108 cell_space_(NULL), 102 cell_space_(NULL),
109 lo_space_(NULL), 103 lo_space_(NULL),
110 gc_state_(NOT_IN_GC), 104 gc_state_(NOT_IN_GC),
111 gc_post_processing_depth_(0), 105 gc_post_processing_depth_(0),
112 mc_count_(0),
113 ms_count_(0), 106 ms_count_(0),
114 gc_count_(0), 107 gc_count_(0),
115 unflattened_strings_length_(0), 108 unflattened_strings_length_(0),
116 #ifdef DEBUG 109 #ifdef DEBUG
117 allocation_allowed_(true), 110 allocation_allowed_(true),
118 allocation_timeout_(0), 111 allocation_timeout_(0),
119 disallow_allocation_failure_(false), 112 disallow_allocation_failure_(false),
120 debug_utils_(NULL), 113 debug_utils_(NULL),
121 #endif // DEBUG 114 #endif // DEBUG
122 old_gen_promotion_limit_(kMinimumPromotionLimit), 115 old_gen_promotion_limit_(kMinimumPromotionLimit),
123 old_gen_allocation_limit_(kMinimumAllocationLimit), 116 old_gen_allocation_limit_(kMinimumAllocationLimit),
117 old_gen_limit_factor_(1),
118 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0), 119 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0), 120 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0), 121 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false), 122 old_gen_exhausted_(false),
123 store_buffer_rebuilder_(store_buffer()),
128 hidden_symbol_(NULL), 124 hidden_symbol_(NULL),
129 global_gc_prologue_callback_(NULL), 125 global_gc_prologue_callback_(NULL),
130 global_gc_epilogue_callback_(NULL), 126 global_gc_epilogue_callback_(NULL),
131 gc_safe_size_of_old_object_(NULL), 127 gc_safe_size_of_old_object_(NULL),
132 total_regexp_code_generated_(0), 128 total_regexp_code_generated_(0),
133 tracer_(NULL), 129 tracer_(NULL),
134 young_survivors_after_last_gc_(0), 130 young_survivors_after_last_gc_(0),
135 high_survival_rate_period_length_(0), 131 high_survival_rate_period_length_(0),
136 survival_rate_(0), 132 survival_rate_(0),
137 previous_survival_rate_trend_(Heap::STABLE), 133 previous_survival_rate_trend_(Heap::STABLE),
138 survival_rate_trend_(Heap::STABLE), 134 survival_rate_trend_(Heap::STABLE),
139 max_gc_pause_(0), 135 max_gc_pause_(0),
140 max_alive_after_gc_(0), 136 max_alive_after_gc_(0),
141 min_in_mutator_(kMaxInt), 137 min_in_mutator_(kMaxInt),
142 alive_after_last_gc_(0), 138 alive_after_last_gc_(0),
143 last_gc_end_timestamp_(0.0), 139 last_gc_end_timestamp_(0.0),
144 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED), 140 store_buffer_(this),
141 marking_(this),
142 incremental_marking_(this),
145 number_idle_notifications_(0), 143 number_idle_notifications_(0),
146 last_idle_notification_gc_count_(0), 144 last_idle_notification_gc_count_(0),
147 last_idle_notification_gc_count_init_(false), 145 last_idle_notification_gc_count_init_(false),
148 configured_(false), 146 configured_(false),
149 is_safe_to_read_maps_(true) { 147 last_empty_page_was_given_back_to_the_os_(false),
148 chunks_queued_for_free_(NULL) {
150 // Allow build-time customization of the max semispace size. Building 149 // Allow build-time customization of the max semispace size. Building
151 // V8 with snapshots and a non-default max semispace size is much 150 // V8 with snapshots and a non-default max semispace size is much
152 // easier if you can define it as part of the build environment. 151 // easier if you can define it as part of the build environment.
153 #if defined(V8_MAX_SEMISPACE_SIZE) 152 #if defined(V8_MAX_SEMISPACE_SIZE)
154 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; 153 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155 #endif 154 #endif
156 155
157 intptr_t max_virtual = OS::MaxVirtualMemory(); 156 intptr_t max_virtual = OS::MaxVirtualMemory();
158 157
159 if (max_virtual > 0) { 158 if (max_virtual > 0) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 return old_pointer_space_ != NULL && 216 return old_pointer_space_ != NULL &&
218 old_data_space_ != NULL && 217 old_data_space_ != NULL &&
219 code_space_ != NULL && 218 code_space_ != NULL &&
220 map_space_ != NULL && 219 map_space_ != NULL &&
221 cell_space_ != NULL && 220 cell_space_ != NULL &&
222 lo_space_ != NULL; 221 lo_space_ != NULL;
223 } 222 }
224 223
225 224
226 int Heap::GcSafeSizeOfOldObject(HeapObject* object) { 225 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
227 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects. 226 if (IntrusiveMarking::IsMarked(object)) {
228 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded()); 227 return IntrusiveMarking::SizeOfMarkedObject(object);
229 MapWord map_word = object->map_word(); 228 }
230 map_word.ClearMark(); 229 return object->SizeFromMap(object->map());
231 map_word.ClearOverflow();
232 return object->SizeFromMap(map_word.ToMap());
233 } 230 }
234 231
235 232
236 int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
237 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
238 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
239 uint32_t marker = Memory::uint32_at(object->address());
240 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
241 return kIntSize;
242 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
243 return Memory::int_at(object->address() + kIntSize);
244 } else {
245 MapWord map_word = object->map_word();
246 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
247 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
248 return object->SizeFromMap(map);
249 }
250 }
251
252
253 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { 233 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
254 // Is global GC requested? 234 // Is global GC requested?
255 if (space != NEW_SPACE || FLAG_gc_global) { 235 if (space != NEW_SPACE || FLAG_gc_global) {
256 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); 236 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
257 return MARK_COMPACTOR; 237 return MARK_COMPACTOR;
258 } 238 }
259 239
260 // Is enough data promoted to justify a global GC? 240 // Is enough data promoted to justify a global GC?
261 if (OldGenerationPromotionLimitReached()) { 241 if (OldGenerationPromotionLimitReached()) {
262 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); 242 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
393 } 373 }
394 374
395 if (FLAG_gc_verbose) Print(); 375 if (FLAG_gc_verbose) Print();
396 #endif // DEBUG 376 #endif // DEBUG
397 377
398 #if defined(DEBUG) 378 #if defined(DEBUG)
399 ReportStatisticsBeforeGC(); 379 ReportStatisticsBeforeGC();
400 #endif // DEBUG 380 #endif // DEBUG
401 381
402 LiveObjectList::GCPrologue(); 382 LiveObjectList::GCPrologue();
383 store_buffer()->GCPrologue();
403 } 384 }
404 385
405 intptr_t Heap::SizeOfObjects() { 386 intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0; 387 intptr_t total = 0;
407 AllSpaces spaces; 388 AllSpaces spaces;
408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 389 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
409 total += space->SizeOfObjects(); 390 total += space->SizeOfObjects();
410 } 391 }
411 return total; 392 return total;
412 } 393 }
413 394
414 void Heap::GarbageCollectionEpilogue() { 395 void Heap::GarbageCollectionEpilogue() {
396 store_buffer()->GCEpilogue();
415 LiveObjectList::GCEpilogue(); 397 LiveObjectList::GCEpilogue();
416 #ifdef DEBUG 398 #ifdef DEBUG
417 allow_allocation(true); 399 allow_allocation(true);
418 ZapFromSpace(); 400 ZapFromSpace();
419 401
420 if (FLAG_verify_heap) { 402 if (FLAG_verify_heap) {
421 Verify(); 403 Verify();
422 } 404 }
423 405
424 if (FLAG_print_global_handles) isolate_->global_handles()->Print(); 406 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
(...skipping 11 matching lines...) Expand all
436 symbol_table()->NumberOfElements()); 418 symbol_table()->NumberOfElements());
437 #if defined(DEBUG) 419 #if defined(DEBUG)
438 ReportStatisticsAfterGC(); 420 ReportStatisticsAfterGC();
439 #endif // DEBUG 421 #endif // DEBUG
440 #ifdef ENABLE_DEBUGGER_SUPPORT 422 #ifdef ENABLE_DEBUGGER_SUPPORT
441 isolate_->debug()->AfterGarbageCollection(); 423 isolate_->debug()->AfterGarbageCollection();
442 #endif // ENABLE_DEBUGGER_SUPPORT 424 #endif // ENABLE_DEBUGGER_SUPPORT
443 } 425 }
444 426
445 427
446 void Heap::CollectAllGarbage(bool force_compaction) { 428 void Heap::CollectAllGarbage(int flags) {
447 // Since we are ignoring the return value, the exact choice of space does 429 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not 430 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC. 431 // cause a full GC.
450 mark_compact_collector_.SetForceCompaction(force_compaction); 432 mark_compact_collector_.SetFlags(flags);
451 CollectGarbage(OLD_POINTER_SPACE); 433 CollectGarbage(OLD_POINTER_SPACE);
452 mark_compact_collector_.SetForceCompaction(false); 434 mark_compact_collector_.SetFlags(kNoGCFlags);
453 } 435 }
454 436
455 437
456 void Heap::CollectAllAvailableGarbage() { 438 void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does 439 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not 440 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC. 441 // cause a full GC.
460 mark_compact_collector()->SetForceCompaction(true);
461
462 // Major GC would invoke weak handle callbacks on weakly reachable 442 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next 443 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback 444 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become 445 // has been invoked, we rerun major GC to release objects which become
466 // garbage. 446 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot 447 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations. 448 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts. 449 // Therefore stop recollecting after several attempts.
450 mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
470 const int kMaxNumberOfAttempts = 7; 451 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 452 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { 453 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break; 454 break;
474 } 455 }
475 } 456 }
476 mark_compact_collector()->SetForceCompaction(false); 457 mark_compact_collector()->SetFlags(kNoGCFlags);
477 } 458 }
478 459
479 460
480 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { 461 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
481 // The VM is in the GC state until exiting this function. 462 // The VM is in the GC state until exiting this function.
482 VMState state(isolate_, GC); 463 VMState state(isolate_, GC);
483 464
484 #ifdef DEBUG 465 #ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to 466 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason 467 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we 468 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent 469 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through. 470 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval); 471 allocation_timeout_ = Max(6, FLAG_gc_interval);
491 #endif 472 #endif
492 473
474 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
475 if (FLAG_trace_incremental_marking) {
476 PrintF("[IncrementalMarking] Scavenge during marking.\n");
477 }
478 }
479
480 if (collector == MARK_COMPACTOR &&
481 !mark_compact_collector()->PreciseSweepingRequired() &&
482 !incremental_marking()->IsStopped() &&
483 !incremental_marking()->should_hurry() &&
484 FLAG_incremental_marking_steps) {
485 if (FLAG_trace_incremental_marking) {
486 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
487 }
488 collector = SCAVENGER;
489 }
490
493 bool next_gc_likely_to_collect_more = false; 491 bool next_gc_likely_to_collect_more = false;
494 492
495 { GCTracer tracer(this); 493 { GCTracer tracer(this);
496 GarbageCollectionPrologue(); 494 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about 495 // The GC count was incremented in the prologue. Tell the tracer about
498 // it. 496 // it.
499 tracer.set_gc_count(gc_count_); 497 tracer.set_gc_count(gc_count_);
500 498
501 // Tell the tracer which collector we've selected. 499 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector); 500 tracer.set_collector(collector);
503 501
504 HistogramTimer* rate = (collector == SCAVENGER) 502 HistogramTimer* rate = (collector == SCAVENGER)
505 ? isolate_->counters()->gc_scavenger() 503 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor(); 504 : isolate_->counters()->gc_compactor();
507 rate->Start(); 505 rate->Start();
508 next_gc_likely_to_collect_more = 506 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer); 507 PerformGarbageCollection(collector, &tracer);
510 rate->Stop(); 508 rate->Stop();
511 509
512 GarbageCollectionEpilogue(); 510 GarbageCollectionEpilogue();
513 } 511 }
514 512
513 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
514 if (incremental_marking()->IsStopped()) {
515 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
516 incremental_marking()->Start();
517 }
518 }
519
515 return next_gc_likely_to_collect_more; 520 return next_gc_likely_to_collect_more;
516 } 521 }
517 522
518 523
519 void Heap::PerformScavenge() { 524 void Heap::PerformScavenge() {
520 GCTracer tracer(this); 525 GCTracer tracer(this);
521 PerformGarbageCollection(SCAVENGER, &tracer); 526 if (incremental_marking()->IsStopped()) {
527 PerformGarbageCollection(SCAVENGER, &tracer);
528 } else {
529 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
530 }
522 } 531 }
523 532
524 533
525 #ifdef DEBUG 534 #ifdef DEBUG
526 // Helper class for verifying the symbol table. 535 // Helper class for verifying the symbol table.
527 class SymbolTableVerifier : public ObjectVisitor { 536 class SymbolTableVerifier : public ObjectVisitor {
528 public: 537 public:
529 void VisitPointers(Object** start, Object** end) { 538 void VisitPointers(Object** start, Object** end) {
530 // Visit all HeapObject pointers in [start, end). 539 // Visit all HeapObject pointers in [start, end).
531 for (Object** p = start; p < end; p++) { 540 for (Object** p = start; p < end; p++) {
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
603 } 612 }
604 } 613 }
605 } 614 }
606 615
607 616
608 void Heap::EnsureFromSpaceIsCommitted() { 617 void Heap::EnsureFromSpaceIsCommitted() {
609 if (new_space_.CommitFromSpaceIfNeeded()) return; 618 if (new_space_.CommitFromSpaceIfNeeded()) return;
610 619
611 // Committing memory to from space failed. 620 // Committing memory to from space failed.
612 // Try shrinking and try again. 621 // Try shrinking and try again.
613 PagedSpaces spaces;
614 for (PagedSpace* space = spaces.next();
615 space != NULL;
616 space = spaces.next()) {
617 space->RelinkPageListInChunkOrder(true);
618 }
619
620 Shrink(); 622 Shrink();
621 if (new_space_.CommitFromSpaceIfNeeded()) return; 623 if (new_space_.CommitFromSpaceIfNeeded()) return;
622 624
623 // Committing memory to from space failed again. 625 // Committing memory to from space failed again.
624 // Memory is exhausted and we will die. 626 // Memory is exhausted and we will die.
625 V8::FatalProcessOutOfMemory("Committing semi space failed."); 627 V8::FatalProcessOutOfMemory("Committing semi space failed.");
626 } 628 }
627 629
628 630
629 void Heap::ClearJSFunctionResultCaches() { 631 void Heap::ClearJSFunctionResultCaches() {
(...skipping 10 matching lines...) Expand all
640 JSFunctionResultCache::cast(caches->get(i))->Clear(); 642 JSFunctionResultCache::cast(caches->get(i))->Clear();
641 } 643 }
642 // Get the next context: 644 // Get the next context:
643 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); 645 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
644 } 646 }
645 } 647 }
646 648
647 649
648 650
649 void Heap::ClearNormalizedMapCaches() { 651 void Heap::ClearNormalizedMapCaches() {
650 if (isolate_->bootstrapper()->IsActive()) return; 652 if (isolate_->bootstrapper()->IsActive() &&
653 !incremental_marking()->IsMarking()) {
654 return;
655 }
651 656
652 Object* context = global_contexts_list_; 657 Object* context = global_contexts_list_;
653 while (!context->IsUndefined()) { 658 while (!context->IsUndefined()) {
654 Context::cast(context)->normalized_map_cache()->Clear(); 659 Context::cast(context)->normalized_map_cache()->Clear();
655 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); 660 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
656 } 661 }
657 } 662 }
658 663
659 664
660 #ifdef DEBUG
661
662 enum PageWatermarkValidity {
663 ALL_VALID,
664 ALL_INVALID
665 };
666
667 static void VerifyPageWatermarkValidity(PagedSpace* space,
668 PageWatermarkValidity validity) {
669 PageIterator it(space, PageIterator::PAGES_IN_USE);
670 bool expected_value = (validity == ALL_VALID);
671 while (it.has_next()) {
672 Page* page = it.next();
673 ASSERT(page->IsWatermarkValid() == expected_value);
674 }
675 }
676 #endif
677
678 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { 665 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
679 double survival_rate = 666 double survival_rate =
680 (static_cast<double>(young_survivors_after_last_gc_) * 100) / 667 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
681 start_new_space_size; 668 start_new_space_size;
682 669
683 if (survival_rate > kYoungSurvivalRateThreshold) { 670 if (survival_rate > kYoungSurvivalRateThreshold) {
684 high_survival_rate_period_length_++; 671 high_survival_rate_period_length_++;
685 } else { 672 } else {
686 high_survival_rate_period_length_ = 0; 673 high_survival_rate_period_length_ = 0;
687 } 674 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
720 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 707 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
721 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 708 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
722 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); 709 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
723 } 710 }
724 } 711 }
725 712
726 EnsureFromSpaceIsCommitted(); 713 EnsureFromSpaceIsCommitted();
727 714
728 int start_new_space_size = Heap::new_space()->SizeAsInt(); 715 int start_new_space_size = Heap::new_space()->SizeAsInt();
729 716
717 if (IsHighSurvivalRate()) {
718 // We speed up the incremental marker if it is running so that it
719 // does not fall behind the rate of promotion, which would cause a
720 // constantly growing old space.
721 incremental_marking()->NotifyOfHighPromotionRate();
722 }
723
730 if (collector == MARK_COMPACTOR) { 724 if (collector == MARK_COMPACTOR) {
731 // Perform mark-sweep with optional compaction. 725 // Perform mark-sweep with optional compaction.
732 MarkCompact(tracer); 726 MarkCompact(tracer);
733 sweep_generation_++; 727 sweep_generation_++;
734 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && 728 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
735 IsStableOrIncreasingSurvivalTrend(); 729 IsStableOrIncreasingSurvivalTrend();
736 730
737 UpdateSurvivalRateTrend(start_new_space_size); 731 UpdateSurvivalRateTrend(start_new_space_size);
738 732
739 intptr_t old_gen_size = PromotedSpaceSize(); 733 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
740 old_gen_promotion_limit_ =
741 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
742 old_gen_allocation_limit_ =
743 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
744 734
745 if (high_survival_rate_during_scavenges && 735 if (high_survival_rate_during_scavenges &&
746 IsStableOrIncreasingSurvivalTrend()) { 736 IsStableOrIncreasingSurvivalTrend()) {
747 // Stable high survival rates of young objects both during partial and 737 // Stable high survival rates of young objects both during partial and
748 // full collection indicate that mutator is either building or modifying 738 // full collection indicate that mutator is either building or modifying
749 // a structure with a long lifetime. 739 // a structure with a long lifetime.
750 // In this case we aggressively raise old generation memory limits to 740 // In this case we aggressively raise old generation memory limits to
751 // postpone subsequent mark-sweep collection and thus trade memory 741 // postpone subsequent mark-sweep collection and thus trade memory
752 // space for the mutation speed. 742 // space for the mutation speed.
753 old_gen_promotion_limit_ *= 2; 743 old_gen_limit_factor_ = 2;
754 old_gen_allocation_limit_ *= 2; 744 } else {
745 old_gen_limit_factor_ = 1;
755 } 746 }
756 747
748 old_gen_promotion_limit_ =
749 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
750 old_gen_allocation_limit_ =
751 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
752
757 old_gen_exhausted_ = false; 753 old_gen_exhausted_ = false;
758 } else { 754 } else {
759 tracer_ = tracer; 755 tracer_ = tracer;
760 Scavenge(); 756 Scavenge();
761 tracer_ = NULL; 757 tracer_ = NULL;
762 758
763 UpdateSurvivalRateTrend(start_new_space_size); 759 UpdateSurvivalRateTrend(start_new_space_size);
764 } 760 }
765 761
766 isolate_->counters()->objs_since_last_young()->Set(0); 762 isolate_->counters()->objs_since_last_young()->Set(0);
767 763
768 gc_post_processing_depth_++; 764 gc_post_processing_depth_++;
769 { DisableAssertNoAllocation allow_allocation; 765 { DisableAssertNoAllocation allow_allocation;
770 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 766 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
771 next_gc_likely_to_collect_more = 767 next_gc_likely_to_collect_more =
772 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); 768 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
773 } 769 }
774 gc_post_processing_depth_--; 770 gc_post_processing_depth_--;
775 771
776 // Update relocatables. 772 // Update relocatables.
777 Relocatable::PostGarbageCollectionProcessing(); 773 Relocatable::PostGarbageCollectionProcessing();
778 774
779 if (collector == MARK_COMPACTOR) { 775 if (collector == MARK_COMPACTOR) {
780 // Register the amount of external allocated memory. 776 // Register the amount of external allocated memory.
781 amount_of_external_allocated_memory_at_last_global_gc_ = 777 amount_of_external_allocated_memory_at_last_global_gc_ =
782 amount_of_external_allocated_memory_; 778 amount_of_external_allocated_memory_;
783 } 779 }
784 780
785 GCCallbackFlags callback_flags = tracer->is_compacting() 781 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
786 ? kGCCallbackFlagCompacted
787 : kNoGCCallbackFlags;
788 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { 782 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
789 if (gc_type & gc_epilogue_callbacks_[i].gc_type) { 783 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
790 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags); 784 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
791 } 785 }
792 } 786 }
793 787
794 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { 788 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
795 ASSERT(!allocation_allowed_); 789 ASSERT(!allocation_allowed_);
796 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 790 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
797 global_gc_epilogue_callback_(); 791 global_gc_epilogue_callback_();
798 } 792 }
799 VerifySymbolTable(); 793 VerifySymbolTable();
800 794
801 return next_gc_likely_to_collect_more; 795 return next_gc_likely_to_collect_more;
802 } 796 }
803 797
804 798
805 void Heap::MarkCompact(GCTracer* tracer) { 799 void Heap::MarkCompact(GCTracer* tracer) {
806 gc_state_ = MARK_COMPACT; 800 gc_state_ = MARK_COMPACT;
807 LOG(isolate_, ResourceEvent("markcompact", "begin")); 801 LOG(isolate_, ResourceEvent("markcompact", "begin"));
808 802
809 mark_compact_collector_.Prepare(tracer); 803 mark_compact_collector_.Prepare(tracer);
810 804
811 bool is_compacting = mark_compact_collector_.IsCompacting(); 805 ms_count_++;
806 tracer->set_full_gc_count(ms_count_);
812 807
813 if (is_compacting) { 808 MarkCompactPrologue();
814 mc_count_++;
815 } else {
816 ms_count_++;
817 }
818 tracer->set_full_gc_count(mc_count_ + ms_count_);
819 809
820 MarkCompactPrologue(is_compacting);
821
822 is_safe_to_read_maps_ = false;
823 mark_compact_collector_.CollectGarbage(); 810 mark_compact_collector_.CollectGarbage();
824 is_safe_to_read_maps_ = true;
825 811
826 LOG(isolate_, ResourceEvent("markcompact", "end")); 812 LOG(isolate_, ResourceEvent("markcompact", "end"));
827 813
828 gc_state_ = NOT_IN_GC; 814 gc_state_ = NOT_IN_GC;
829 815
830 Shrink(); 816 Shrink();
831 817
832 isolate_->counters()->objs_since_last_full()->Set(0); 818 isolate_->counters()->objs_since_last_full()->Set(0);
833 819
834 contexts_disposed_ = 0; 820 contexts_disposed_ = 0;
835 } 821 }
836 822
837 823
838 void Heap::MarkCompactPrologue(bool is_compacting) { 824 void Heap::MarkCompactPrologue() {
839 // At any old GC clear the keyed lookup cache to enable collection of unused 825 // At any old GC clear the keyed lookup cache to enable collection of unused
840 // maps. 826 // maps.
841 isolate_->keyed_lookup_cache()->Clear(); 827 isolate_->keyed_lookup_cache()->Clear();
842 isolate_->context_slot_cache()->Clear(); 828 isolate_->context_slot_cache()->Clear();
843 isolate_->descriptor_lookup_cache()->Clear(); 829 isolate_->descriptor_lookup_cache()->Clear();
844 StringSplitCache::Clear(string_split_cache()); 830 StringSplitCache::Clear(string_split_cache());
845 831
846 isolate_->compilation_cache()->MarkCompactPrologue(); 832 isolate_->compilation_cache()->MarkCompactPrologue();
847 833
848 CompletelyClearInstanceofCache(); 834 CompletelyClearInstanceofCache();
849 835
850 if (is_compacting) FlushNumberStringCache(); 836 // TODO(1605) select heuristic for flushing NumberString cache with
837 // FlushNumberStringCache
851 if (FLAG_cleanup_code_caches_at_gc) { 838 if (FLAG_cleanup_code_caches_at_gc) {
852 polymorphic_code_cache()->set_cache(undefined_value()); 839 polymorphic_code_cache()->set_cache(undefined_value());
853 } 840 }
854 841
855 ClearNormalizedMapCaches(); 842 ClearNormalizedMapCaches();
856 } 843 }
857 844
858 845
859 Object* Heap::FindCodeObject(Address a) { 846 Object* Heap::FindCodeObject(Address a) {
860 Object* obj = NULL; // Initialization to please compiler. 847 Object* obj = NULL; // Initialization to please compiler.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
904 } 891 }
905 } 892 }
906 }; 893 };
907 894
908 895
909 static void VerifyNonPointerSpacePointers() { 896 static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we 897 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them. 898 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v; 899 VerifyNonPointerSpacePointersVisitor v;
913 HeapObjectIterator code_it(HEAP->code_space()); 900 HeapObjectIterator code_it(HEAP->code_space());
914 for (HeapObject* object = code_it.next(); 901 for (HeapObject* object = code_it.Next();
915 object != NULL; object = code_it.next()) 902 object != NULL; object = code_it.Next())
916 object->Iterate(&v); 903 object->Iterate(&v);
917 904
918 HeapObjectIterator data_it(HEAP->old_data_space()); 905 // The old data space was normally swept conservatively so that the iterator
919 for (HeapObject* object = data_it.next(); 906 // doesn't work, so we normally skip the next bit.
920 object != NULL; object = data_it.next()) 907 if (!HEAP->old_data_space()->was_swept_conservatively()) {
921 object->Iterate(&v); 908 HeapObjectIterator data_it(HEAP->old_data_space());
909 for (HeapObject* object = data_it.Next();
910 object != NULL; object = data_it.Next())
911 object->Iterate(&v);
912 }
922 } 913 }
923 #endif 914 #endif
924 915
925 916
926 void Heap::CheckNewSpaceExpansionCriteria() { 917 void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() && 918 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) { 919 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough 920 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion. 921 // data has survived scavenge since the last expansion.
931 new_space_.Grow(); 922 new_space_.Grow();
932 survived_since_last_expansion_ = 0; 923 survived_since_last_expansion_ = 0;
933 } 924 }
934 } 925 }
935 926
936 927
937 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { 928 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
938 return heap->InNewSpace(*p) && 929 return heap->InNewSpace(*p) &&
939 !HeapObject::cast(*p)->map_word().IsForwardingAddress(); 930 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
940 } 931 }
941 932
942 933
934 void Heap::ScavengeStoreBufferCallback(
935 Heap* heap,
936 MemoryChunk* page,
937 StoreBufferEvent event) {
938 heap->store_buffer_rebuilder_.Callback(page, event);
939 }
940
941
942 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
943 if (event == kStoreBufferStartScanningPagesEvent) {
944 start_of_current_page_ = NULL;
945 current_page_ = NULL;
946 } else if (event == kStoreBufferScanningPageEvent) {
947 if (current_page_ != NULL) {
948 // If this page already overflowed the store buffer during this iteration.
949 if (current_page_->scan_on_scavenge()) {
950 // Then we should wipe out the entries that have been added for it.
951 store_buffer_->SetTop(start_of_current_page_);
952 } else if (store_buffer_->Top() - start_of_current_page_ >=
953 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
954 // Did we find too many pointers in the previous page? The heuristic is
955 // that no page can take more then 1/5 the remaining slots in the store
956 // buffer.
957 current_page_->set_scan_on_scavenge(true);
958 store_buffer_->SetTop(start_of_current_page_);
959 } else {
960 // In this case the page we scanned took a reasonable number of slots in
961 // the store buffer. It has now been rehabilitated and is no longer
962 // marked scan_on_scavenge.
963 ASSERT(!current_page_->scan_on_scavenge());
964 }
965 }
966 start_of_current_page_ = store_buffer_->Top();
967 current_page_ = page;
968 } else if (event == kStoreBufferFullEvent) {
969 // The current page overflowed the store buffer again. Wipe out its entries
970 // in the store buffer and mark it scan-on-scavenge again. This may happen
971 // several times while scanning.
972 if (current_page_ == NULL) {
973 // Store Buffer overflowed while scanning promoted objects. These are not
974 // in any particular page, though they are likely to be clustered by the
975 // allocation routines.
976 store_buffer_->HandleFullness();
977 } else {
978 // Store Buffer overflowed while scanning a particular old space page for
979 // pointers to new space.
980 ASSERT(current_page_ == page);
981 ASSERT(page != NULL);
982 current_page_->set_scan_on_scavenge(true);
983 ASSERT(start_of_current_page_ != store_buffer_->Top());
984 store_buffer_->SetTop(start_of_current_page_);
985 }
986 } else {
987 UNREACHABLE();
988 }
989 }
990
991
943 void Heap::Scavenge() { 992 void Heap::Scavenge() {
944 #ifdef DEBUG 993 #ifdef DEBUG
945 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); 994 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
946 #endif 995 #endif
947 996
948 gc_state_ = SCAVENGE; 997 gc_state_ = SCAVENGE;
949 998
950 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
951
952 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
953 #ifdef DEBUG
954 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
955 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
956 #endif
957
958 // We do not update an allocation watermark of the top page during linear
959 // allocation to avoid overhead. So to maintain the watermark invariant
960 // we have to manually cache the watermark and mark the top page as having an
961 // invalid watermark. This guarantees that dirty regions iteration will use a
962 // correct watermark even if a linear allocation happens.
963 old_pointer_space_->FlushTopPageWatermark();
964 map_space_->FlushTopPageWatermark();
965
966 // Implements Cheney's copying algorithm 999 // Implements Cheney's copying algorithm
967 LOG(isolate_, ResourceEvent("scavenge", "begin")); 1000 LOG(isolate_, ResourceEvent("scavenge", "begin"));
968 1001
969 // Clear descriptor cache. 1002 // Clear descriptor cache.
970 isolate_->descriptor_lookup_cache()->Clear(); 1003 isolate_->descriptor_lookup_cache()->Clear();
971 1004
972 // Used for updating survived_since_last_expansion_ at function end. 1005 // Used for updating survived_since_last_expansion_ at function end.
973 intptr_t survived_watermark = PromotedSpaceSize(); 1006 intptr_t survived_watermark = PromotedSpaceSize();
974 1007
975 CheckNewSpaceExpansionCriteria(); 1008 CheckNewSpaceExpansionCriteria();
976 1009
1010 SelectScavengingVisitorsTable();
1011
1012 incremental_marking()->PrepareForScavenge();
1013
1014 old_pointer_space()->AdvanceSweeper(new_space_.Size());
1015 old_data_space()->AdvanceSweeper(new_space_.Size());
1016
977 // Flip the semispaces. After flipping, to space is empty, from space has 1017 // Flip the semispaces. After flipping, to space is empty, from space has
978 // live objects. 1018 // live objects.
979 new_space_.Flip(); 1019 new_space_.Flip();
980 new_space_.ResetAllocationInfo(); 1020 new_space_.ResetAllocationInfo();
981 1021
982 // We need to sweep newly copied objects which can be either in the 1022 // We need to sweep newly copied objects which can be either in the
983 // to space or promoted to the old generation. For to-space 1023 // to space or promoted to the old generation. For to-space
984 // objects, we treat the bottom of the to space as a queue. Newly 1024 // objects, we treat the bottom of the to space as a queue. Newly
985 // copied and unswept objects lie between a 'front' mark and the 1025 // copied and unswept objects lie between a 'front' mark and the
986 // allocation pointer. 1026 // allocation pointer.
987 // 1027 //
988 // Promoted objects can go into various old-generation spaces, and 1028 // Promoted objects can go into various old-generation spaces, and
989 // can be allocated internally in the spaces (from the free list). 1029 // can be allocated internally in the spaces (from the free list).
990 // We treat the top of the to space as a queue of addresses of 1030 // We treat the top of the to space as a queue of addresses of
991 // promoted objects. The addresses of newly promoted and unswept 1031 // promoted objects. The addresses of newly promoted and unswept
992 // objects lie between a 'front' mark and a 'rear' mark that is 1032 // objects lie between a 'front' mark and a 'rear' mark that is
993 // updated as a side effect of promoting an object. 1033 // updated as a side effect of promoting an object.
994 // 1034 //
995 // There is guaranteed to be enough room at the top of the to space 1035 // There is guaranteed to be enough room at the top of the to space
996 // for the addresses of promoted objects: every object promoted 1036 // for the addresses of promoted objects: every object promoted
997 // frees up its size in bytes from the top of the new space, and 1037 // frees up its size in bytes from the top of the new space, and
998 // objects are at least one pointer in size. 1038 // objects are at least one pointer in size.
999 Address new_space_front = new_space_.ToSpaceLow(); 1039 Address new_space_front = new_space_.ToSpaceStart();
1000 promotion_queue_.Initialize(new_space_.ToSpaceHigh()); 1040 promotion_queue_.Initialize(new_space_.ToSpaceEnd());
1001 1041
1002 is_safe_to_read_maps_ = false; 1042 #ifdef DEBUG
1043 store_buffer()->Clean();
1044 #endif
1045
1003 ScavengeVisitor scavenge_visitor(this); 1046 ScavengeVisitor scavenge_visitor(this);
1004 // Copy roots. 1047 // Copy roots.
1005 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1048 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1006 1049
1007 // Copy objects reachable from the old generation. By definition, 1050 // Copy objects reachable from the old generation.
1008 // there are no intergenerational pointers in code or data spaces. 1051 {
1009 IterateDirtyRegions(old_pointer_space_, 1052 StoreBufferRebuildScope scope(this,
1010 &Heap::IteratePointersInDirtyRegion, 1053 store_buffer(),
1011 &ScavengePointer, 1054 &ScavengeStoreBufferCallback);
1012 WATERMARK_CAN_BE_INVALID); 1055 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1013 1056 }
1014 IterateDirtyRegions(map_space_,
1015 &IteratePointersInDirtyMapsRegion,
1016 &ScavengePointer,
1017 WATERMARK_CAN_BE_INVALID);
1018
1019 lo_space_->IterateDirtyRegions(&ScavengePointer);
1020 1057
1021 // Copy objects reachable from cells by scavenging cell values directly. 1058 // Copy objects reachable from cells by scavenging cell values directly.
1022 HeapObjectIterator cell_iterator(cell_space_); 1059 HeapObjectIterator cell_iterator(cell_space_);
1023 for (HeapObject* cell = cell_iterator.next(); 1060 for (HeapObject* cell = cell_iterator.Next();
1024 cell != NULL; cell = cell_iterator.next()) { 1061 cell != NULL; cell = cell_iterator.Next()) {
1025 if (cell->IsJSGlobalPropertyCell()) { 1062 if (cell->IsJSGlobalPropertyCell()) {
1026 Address value_address = 1063 Address value_address =
1027 reinterpret_cast<Address>(cell) + 1064 reinterpret_cast<Address>(cell) +
1028 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1065 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1029 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 1066 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1030 } 1067 }
1031 } 1068 }
1032 1069
1033 // Scavenge object reachable from the global contexts list directly. 1070 // Scavenge object reachable from the global contexts list directly.
1034 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); 1071 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1035 1072
1036 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1073 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1037 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( 1074 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1038 &IsUnscavengedHeapObject); 1075 &IsUnscavengedHeapObject);
1039 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( 1076 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1040 &scavenge_visitor); 1077 &scavenge_visitor);
1041 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1078 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1042 1079
1043 1080
1044 UpdateNewSpaceReferencesInExternalStringTable( 1081 UpdateNewSpaceReferencesInExternalStringTable(
1045 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1082 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1046 1083
1047 LiveObjectList::UpdateReferencesForScavengeGC(); 1084 LiveObjectList::UpdateReferencesForScavengeGC();
1048 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 1085 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1086 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1049 1087
1050 ASSERT(new_space_front == new_space_.top()); 1088 ASSERT(new_space_front == new_space_.top());
1051 1089
1052 is_safe_to_read_maps_ = true;
1053
1054 // Set age mark. 1090 // Set age mark.
1055 new_space_.set_age_mark(new_space_.top()); 1091 new_space_.set_age_mark(new_space_.top());
1056 1092
1093 new_space_.LowerInlineAllocationLimit(
1094 new_space_.inline_allocation_limit_step());
1095
1057 // Update how much has survived scavenge. 1096 // Update how much has survived scavenge.
1058 IncrementYoungSurvivorsCounter(static_cast<int>( 1097 IncrementYoungSurvivorsCounter(static_cast<int>(
1059 (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); 1098 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1060 1099
1061 LOG(isolate_, ResourceEvent("scavenge", "end")); 1100 LOG(isolate_, ResourceEvent("scavenge", "end"));
1062 1101
1063 gc_state_ = NOT_IN_GC; 1102 gc_state_ = NOT_IN_GC;
1064 } 1103 }
1065 1104
1066 1105
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1105 // String got promoted. Move it to the old string list. 1144 // String got promoted. Move it to the old string list.
1106 external_string_table_.AddOldString(target); 1145 external_string_table_.AddOldString(target);
1107 } 1146 }
1108 } 1147 }
1109 1148
1110 ASSERT(last <= end); 1149 ASSERT(last <= end);
1111 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); 1150 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1112 } 1151 }
1113 1152
1114 1153
1154 void Heap::UpdateReferencesInExternalStringTable(
1155 ExternalStringTableUpdaterCallback updater_func) {
1156
1157 // Update old space string references.
1158 if (external_string_table_.old_space_strings_.length() > 0) {
1159 Object** start = &external_string_table_.old_space_strings_[0];
1160 Object** end = start + external_string_table_.old_space_strings_.length();
1161 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1162 }
1163
1164 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1165 }
1166
1167
1115 static Object* ProcessFunctionWeakReferences(Heap* heap, 1168 static Object* ProcessFunctionWeakReferences(Heap* heap,
1116 Object* function, 1169 Object* function,
1117 WeakObjectRetainer* retainer) { 1170 WeakObjectRetainer* retainer) {
1118 Object* head = heap->undefined_value(); 1171 Object* undefined = heap->undefined_value();
1172 Object* head = undefined;
1119 JSFunction* tail = NULL; 1173 JSFunction* tail = NULL;
1120 Object* candidate = function; 1174 Object* candidate = function;
1121 while (candidate != heap->undefined_value()) { 1175 while (candidate != undefined) {
1122 // Check whether to keep the candidate in the list. 1176 // Check whether to keep the candidate in the list.
1123 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate); 1177 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1124 Object* retain = retainer->RetainAs(candidate); 1178 Object* retain = retainer->RetainAs(candidate);
1125 if (retain != NULL) { 1179 if (retain != NULL) {
1126 if (head == heap->undefined_value()) { 1180 if (head == undefined) {
1127 // First element in the list. 1181 // First element in the list.
1128 head = candidate_function; 1182 head = retain;
1129 } else { 1183 } else {
1130 // Subsequent elements in the list. 1184 // Subsequent elements in the list.
1131 ASSERT(tail != NULL); 1185 ASSERT(tail != NULL);
1132 tail->set_next_function_link(candidate_function); 1186 tail->set_next_function_link(retain);
1133 } 1187 }
1134 // Retained function is new tail. 1188 // Retained function is new tail.
1189 candidate_function = reinterpret_cast<JSFunction*>(retain);
1135 tail = candidate_function; 1190 tail = candidate_function;
1191
1192 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1193
1194 if (retain == undefined) break;
1136 } 1195 }
1196
1137 // Move to next element in the list. 1197 // Move to next element in the list.
1138 candidate = candidate_function->next_function_link(); 1198 candidate = candidate_function->next_function_link();
1139 } 1199 }
1140 1200
1141 // Terminate the list if there is one or more elements. 1201 // Terminate the list if there is one or more elements.
1142 if (tail != NULL) { 1202 if (tail != NULL) {
1143 tail->set_next_function_link(heap->undefined_value()); 1203 tail->set_next_function_link(undefined);
1144 } 1204 }
1145 1205
1146 return head; 1206 return head;
1147 } 1207 }
1148 1208
1149 1209
1150 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { 1210 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1151 Object* head = undefined_value(); 1211 Object* undefined = undefined_value();
1212 Object* head = undefined;
1152 Context* tail = NULL; 1213 Context* tail = NULL;
1153 Object* candidate = global_contexts_list_; 1214 Object* candidate = global_contexts_list_;
1154 while (candidate != undefined_value()) { 1215 while (candidate != undefined) {
1155 // Check whether to keep the candidate in the list. 1216 // Check whether to keep the candidate in the list.
1156 Context* candidate_context = reinterpret_cast<Context*>(candidate); 1217 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1157 Object* retain = retainer->RetainAs(candidate); 1218 Object* retain = retainer->RetainAs(candidate);
1158 if (retain != NULL) { 1219 if (retain != NULL) {
1159 if (head == undefined_value()) { 1220 if (head == undefined) {
1160 // First element in the list. 1221 // First element in the list.
1161 head = candidate_context; 1222 head = retain;
1162 } else { 1223 } else {
1163 // Subsequent elements in the list. 1224 // Subsequent elements in the list.
1164 ASSERT(tail != NULL); 1225 ASSERT(tail != NULL);
1165 tail->set_unchecked(this, 1226 tail->set_unchecked(this,
1166 Context::NEXT_CONTEXT_LINK, 1227 Context::NEXT_CONTEXT_LINK,
1167 candidate_context, 1228 retain,
1168 UPDATE_WRITE_BARRIER); 1229 UPDATE_WRITE_BARRIER);
1169 } 1230 }
1170 // Retained context is new tail. 1231 // Retained context is new tail.
1232 candidate_context = reinterpret_cast<Context*>(retain);
1171 tail = candidate_context; 1233 tail = candidate_context;
1172 1234
1235 if (retain == undefined) break;
1236
1173 // Process the weak list of optimized functions for the context. 1237 // Process the weak list of optimized functions for the context.
1174 Object* function_list_head = 1238 Object* function_list_head =
1175 ProcessFunctionWeakReferences( 1239 ProcessFunctionWeakReferences(
1176 this, 1240 this,
1177 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST), 1241 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1178 retainer); 1242 retainer);
1179 candidate_context->set_unchecked(this, 1243 candidate_context->set_unchecked(this,
1180 Context::OPTIMIZED_FUNCTIONS_LIST, 1244 Context::OPTIMIZED_FUNCTIONS_LIST,
1181 function_list_head, 1245 function_list_head,
1182 UPDATE_WRITE_BARRIER); 1246 UPDATE_WRITE_BARRIER);
1183 } 1247 }
1248
1184 // Move to next element in the list. 1249 // Move to next element in the list.
1185 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK); 1250 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1186 } 1251 }
1187 1252
1188 // Terminate the list if there is one or more elements. 1253 // Terminate the list if there is one or more elements.
1189 if (tail != NULL) { 1254 if (tail != NULL) {
1190 tail->set_unchecked(this, 1255 tail->set_unchecked(this,
1191 Context::NEXT_CONTEXT_LINK, 1256 Context::NEXT_CONTEXT_LINK,
1192 Heap::undefined_value(), 1257 Heap::undefined_value(),
1193 UPDATE_WRITE_BARRIER); 1258 UPDATE_WRITE_BARRIER);
(...skipping 11 matching lines...) Expand all
1205 if (!heap->InNewSpace(object)) return; 1270 if (!heap->InNewSpace(object)) return;
1206 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), 1271 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1207 reinterpret_cast<HeapObject*>(object)); 1272 reinterpret_cast<HeapObject*>(object));
1208 } 1273 }
1209 }; 1274 };
1210 1275
1211 1276
1212 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, 1277 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1213 Address new_space_front) { 1278 Address new_space_front) {
1214 do { 1279 do {
1215 ASSERT(new_space_front <= new_space_.top()); 1280 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1216
1217 // The addresses new_space_front and new_space_.top() define a 1281 // The addresses new_space_front and new_space_.top() define a
1218 // queue of unprocessed copied objects. Process them until the 1282 // queue of unprocessed copied objects. Process them until the
1219 // queue is empty. 1283 // queue is empty.
1220 while (new_space_front < new_space_.top()) { 1284 while (new_space_front != new_space_.top()) {
1221 HeapObject* object = HeapObject::FromAddress(new_space_front); 1285 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1222 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object); 1286 HeapObject* object = HeapObject::FromAddress(new_space_front);
1287 new_space_front +=
1288 NewSpaceScavenger::IterateBody(object->map(), object);
1289 } else {
1290 new_space_front =
1291 NewSpacePage::FromLimit(new_space_front)->next_page()->body();
1292 }
1223 } 1293 }
1224 1294
1225 // Promote and process all the to-be-promoted objects. 1295 // Promote and process all the to-be-promoted objects.
1226 while (!promotion_queue_.is_empty()) { 1296 {
1227 HeapObject* target; 1297 StoreBufferRebuildScope scope(this,
1228 int size; 1298 store_buffer(),
1229 promotion_queue_.remove(&target, &size); 1299 &ScavengeStoreBufferCallback);
1300 while (!promotion_queue()->is_empty()) {
1301 HeapObject* target;
1302 int size;
1303 promotion_queue()->remove(&target, &size);
1230 1304
1231 // Promoted object might be already partially visited 1305 // Promoted object might be already partially visited
1232 // during dirty regions iteration. Thus we search specificly 1306 // during old space pointer iteration. Thus we search specificly
1233 // for pointers to from semispace instead of looking for pointers 1307 // for pointers to from semispace instead of looking for pointers
1234 // to new space. 1308 // to new space.
1235 ASSERT(!target->IsMap()); 1309 ASSERT(!target->IsMap());
1236 IterateAndMarkPointersToFromSpace(target->address(), 1310 IterateAndMarkPointersToFromSpace(target->address(),
1237 target->address() + size, 1311 target->address() + size,
1238 &ScavengePointer); 1312 &ScavengeObject);
1313 }
1239 } 1314 }
1240 1315
1241 // Take another spin if there are now unswept objects in new space 1316 // Take another spin if there are now unswept objects in new space
1242 // (there are currently no more unswept promoted objects). 1317 // (there are currently no more unswept promoted objects).
1243 } while (new_space_front < new_space_.top()); 1318 } while (new_space_front != new_space_.top());
1244 1319
1245 return new_space_front; 1320 return new_space_front;
1246 } 1321 }
1247 1322
1248 1323
1249 enum LoggingAndProfiling { 1324 enum LoggingAndProfiling {
1250 LOGGING_AND_PROFILING_ENABLED, 1325 LOGGING_AND_PROFILING_ENABLED,
1251 LOGGING_AND_PROFILING_DISABLED 1326 LOGGING_AND_PROFILING_DISABLED
1252 }; 1327 };
1253 1328
1254 1329
1255 typedef void (*ScavengingCallback)(Map* map, 1330 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1256 HeapObject** slot,
1257 HeapObject* object);
1258 1331
1259 1332
1260 static Atomic32 scavenging_visitors_table_mode_; 1333 template<MarksHandling marks_handling,
1261 static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; 1334 LoggingAndProfiling logging_and_profiling_mode>
1262
1263
1264 INLINE(static void DoScavengeObject(Map* map,
1265 HeapObject** slot,
1266 HeapObject* obj));
1267
1268
1269 void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1270 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1271 }
1272
1273
1274 template<LoggingAndProfiling logging_and_profiling_mode>
1275 class ScavengingVisitor : public StaticVisitorBase { 1335 class ScavengingVisitor : public StaticVisitorBase {
1276 public: 1336 public:
1277 static void Initialize() { 1337 static void Initialize() {
1278 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString); 1338 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1279 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); 1339 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1280 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); 1340 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1281 table_.Register(kVisitByteArray, &EvacuateByteArray); 1341 table_.Register(kVisitByteArray, &EvacuateByteArray);
1282 table_.Register(kVisitFixedArray, &EvacuateFixedArray); 1342 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1283 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); 1343 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1284 1344
(...skipping 14 matching lines...) Expand all
1299 template VisitSpecialized<SharedFunctionInfo::kSize>); 1359 template VisitSpecialized<SharedFunctionInfo::kSize>);
1300 1360
1301 table_.Register(kVisitJSWeakMap, 1361 table_.Register(kVisitJSWeakMap,
1302 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1362 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1303 Visit); 1363 Visit);
1304 1364
1305 table_.Register(kVisitJSRegExp, 1365 table_.Register(kVisitJSRegExp,
1306 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1366 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1307 Visit); 1367 Visit);
1308 1368
1309 table_.Register(kVisitJSFunction, 1369 if (marks_handling == IGNORE_MARKS) {
1310 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1370 table_.Register(kVisitJSFunction,
1311 template VisitSpecialized<JSFunction::kSize>); 1371 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1372 template VisitSpecialized<JSFunction::kSize>);
1373 } else {
1374 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1375 }
1312 1376
1313 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, 1377 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1314 kVisitDataObject, 1378 kVisitDataObject,
1315 kVisitDataObjectGeneric>(); 1379 kVisitDataObjectGeneric>();
1316 1380
1317 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, 1381 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1318 kVisitJSObject, 1382 kVisitJSObject,
1319 kVisitJSObjectGeneric>(); 1383 kVisitJSObjectGeneric>();
1320 1384
1321 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, 1385 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1366 Isolate* isolate = heap->isolate(); 1430 Isolate* isolate = heap->isolate();
1367 if (isolate->logger()->is_logging() || 1431 if (isolate->logger()->is_logging() ||
1368 CpuProfiler::is_profiling(isolate)) { 1432 CpuProfiler::is_profiling(isolate)) {
1369 if (target->IsSharedFunctionInfo()) { 1433 if (target->IsSharedFunctionInfo()) {
1370 PROFILE(isolate, SharedFunctionInfoMoveEvent( 1434 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1371 source->address(), target->address())); 1435 source->address(), target->address()));
1372 } 1436 }
1373 } 1437 }
1374 } 1438 }
1375 1439
1440 if (marks_handling == TRANSFER_MARKS) {
1441 if (Marking::TransferColor(source, target)) {
1442 MemoryChunk::IncrementLiveBytes(target->address(), size);
1443 }
1444 }
1445
1376 return target; 1446 return target;
1377 } 1447 }
1378 1448
1379
1380 template<ObjectContents object_contents, SizeRestriction size_restriction> 1449 template<ObjectContents object_contents, SizeRestriction size_restriction>
1381 static inline void EvacuateObject(Map* map, 1450 static inline void EvacuateObject(Map* map,
1382 HeapObject** slot, 1451 HeapObject** slot,
1383 HeapObject* object, 1452 HeapObject* object,
1384 int object_size) { 1453 int object_size) {
1385 ASSERT((size_restriction != SMALL) || 1454 ASSERT((size_restriction != SMALL) ||
1386 (object_size <= Page::kMaxHeapObjectSize)); 1455 (object_size <= Page::kMaxHeapObjectSize));
1387 ASSERT(object->Size() == object_size); 1456 ASSERT(object->Size() == object_size);
1388 1457
1389 Heap* heap = map->heap(); 1458 Heap* heap = map->GetHeap();
1390 if (heap->ShouldBePromoted(object->address(), object_size)) { 1459 if (heap->ShouldBePromoted(object->address(), object_size)) {
1391 MaybeObject* maybe_result; 1460 MaybeObject* maybe_result;
1392 1461
1393 if ((size_restriction != SMALL) && 1462 if ((size_restriction != SMALL) &&
1394 (object_size > Page::kMaxHeapObjectSize)) { 1463 (object_size > Page::kMaxHeapObjectSize)) {
1395 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size); 1464 maybe_result = heap->lo_space()->AllocateRaw(object_size,
1465 NOT_EXECUTABLE);
1396 } else { 1466 } else {
1397 if (object_contents == DATA_OBJECT) { 1467 if (object_contents == DATA_OBJECT) {
1398 maybe_result = heap->old_data_space()->AllocateRaw(object_size); 1468 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1399 } else { 1469 } else {
1400 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size); 1470 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1401 } 1471 }
1402 } 1472 }
1403 1473
1404 Object* result = NULL; // Initialization to please compiler. 1474 Object* result = NULL; // Initialization to please compiler.
1405 if (maybe_result->ToObject(&result)) { 1475 if (maybe_result->ToObject(&result)) {
1406 HeapObject* target = HeapObject::cast(result); 1476 HeapObject* target = HeapObject::cast(result);
1407 *slot = MigrateObject(heap, object , target, object_size); 1477 *slot = MigrateObject(heap, object , target, object_size);
1408 1478
1409 if (object_contents == POINTER_OBJECT) { 1479 if (object_contents == POINTER_OBJECT) {
1410 heap->promotion_queue()->insert(target, object_size); 1480 heap->promotion_queue()->insert(target, object_size);
1411 } 1481 }
1412 1482
1413 heap->tracer()->increment_promoted_objects_size(object_size); 1483 heap->tracer()->increment_promoted_objects_size(object_size);
1414 return; 1484 return;
1415 } 1485 }
1416 } 1486 }
1417 Object* result = 1487 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1418 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked(); 1488 Object* result = allocation->ToObjectUnchecked();
1489
1419 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size); 1490 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1420 return; 1491 return;
1421 } 1492 }
1422 1493
1423 1494
1495 static inline void EvacuateJSFunction(Map* map,
1496 HeapObject** slot,
1497 HeapObject* object) {
1498 ObjectEvacuationStrategy<POINTER_OBJECT>::
1499 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1500
1501 HeapObject* target = *slot;
1502 MarkBit mark_bit = Marking::MarkBitFrom(target);
1503 if (Marking::IsBlack(mark_bit)) {
1504 // This object is black and it might not be rescanned by marker.
1505 // We should explicitly record code entry slot for compaction because
1506 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1507 // miss it as it is not HeapObject-tagged.
1508 Address code_entry_slot =
1509 target->address() + JSFunction::kCodeEntryOffset;
1510 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1511 map->GetHeap()->mark_compact_collector()->
1512 RecordCodeEntrySlot(code_entry_slot, code);
1513 }
1514 }
1515
1516
1424 static inline void EvacuateFixedArray(Map* map, 1517 static inline void EvacuateFixedArray(Map* map,
1425 HeapObject** slot, 1518 HeapObject** slot,
1426 HeapObject* object) { 1519 HeapObject* object) {
1427 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); 1520 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1428 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, 1521 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1429 slot, 1522 slot,
1430 object, 1523 object,
1431 object_size); 1524 object_size);
1432 } 1525 }
1433 1526
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1472 1565
1473 static inline bool IsShortcutCandidate(int type) { 1566 static inline bool IsShortcutCandidate(int type) {
1474 return ((type & kShortcutTypeMask) == kShortcutTypeTag); 1567 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1475 } 1568 }
1476 1569
1477 static inline void EvacuateShortcutCandidate(Map* map, 1570 static inline void EvacuateShortcutCandidate(Map* map,
1478 HeapObject** slot, 1571 HeapObject** slot,
1479 HeapObject* object) { 1572 HeapObject* object) {
1480 ASSERT(IsShortcutCandidate(map->instance_type())); 1573 ASSERT(IsShortcutCandidate(map->instance_type()));
1481 1574
1482 if (ConsString::cast(object)->unchecked_second() == 1575 Heap* heap = map->GetHeap();
1483 map->heap()->empty_string()) { 1576
1577 if (marks_handling == IGNORE_MARKS &&
1578 ConsString::cast(object)->unchecked_second() ==
1579 heap->empty_string()) {
1484 HeapObject* first = 1580 HeapObject* first =
1485 HeapObject::cast(ConsString::cast(object)->unchecked_first()); 1581 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1486 1582
1487 *slot = first; 1583 *slot = first;
1488 1584
1489 if (!map->heap()->InNewSpace(first)) { 1585 if (!heap->InNewSpace(first)) {
1490 object->set_map_word(MapWord::FromForwardingAddress(first)); 1586 object->set_map_word(MapWord::FromForwardingAddress(first));
1491 return; 1587 return;
1492 } 1588 }
1493 1589
1494 MapWord first_word = first->map_word(); 1590 MapWord first_word = first->map_word();
1495 if (first_word.IsForwardingAddress()) { 1591 if (first_word.IsForwardingAddress()) {
1496 HeapObject* target = first_word.ToForwardingAddress(); 1592 HeapObject* target = first_word.ToForwardingAddress();
1497 1593
1498 *slot = target; 1594 *slot = target;
1499 object->set_map_word(MapWord::FromForwardingAddress(target)); 1595 object->set_map_word(MapWord::FromForwardingAddress(target));
1500 return; 1596 return;
1501 } 1597 }
1502 1598
1503 DoScavengeObject(first->map(), slot, first); 1599 heap->DoScavengeObject(first->map(), slot, first);
1504 object->set_map_word(MapWord::FromForwardingAddress(*slot)); 1600 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1505 return; 1601 return;
1506 } 1602 }
1507 1603
1508 int object_size = ConsString::kSize; 1604 int object_size = ConsString::kSize;
1509 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); 1605 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1510 } 1606 }
1511 1607
1512 template<ObjectContents object_contents> 1608 template<ObjectContents object_contents>
1513 class ObjectEvacuationStrategy { 1609 class ObjectEvacuationStrategy {
(...skipping 10 matching lines...) Expand all
1524 HeapObject* object) { 1620 HeapObject* object) {
1525 int object_size = map->instance_size(); 1621 int object_size = map->instance_size();
1526 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); 1622 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1527 } 1623 }
1528 }; 1624 };
1529 1625
1530 static VisitorDispatchTable<ScavengingCallback> table_; 1626 static VisitorDispatchTable<ScavengingCallback> table_;
1531 }; 1627 };
1532 1628
1533 1629
1534 template<LoggingAndProfiling logging_and_profiling_mode> 1630 template<MarksHandling marks_handling,
1631 LoggingAndProfiling logging_and_profiling_mode>
1535 VisitorDispatchTable<ScavengingCallback> 1632 VisitorDispatchTable<ScavengingCallback>
1536 ScavengingVisitor<logging_and_profiling_mode>::table_; 1633 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1537 1634
1538 1635
1539 static void InitializeScavengingVisitorsTables() { 1636 static void InitializeScavengingVisitorsTables() {
1540 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize(); 1637 ScavengingVisitor<TRANSFER_MARKS,
1541 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize(); 1638 LOGGING_AND_PROFILING_DISABLED>::Initialize();
1542 scavenging_visitors_table_.CopyFrom( 1639 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1543 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable()); 1640 ScavengingVisitor<TRANSFER_MARKS,
1544 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED; 1641 LOGGING_AND_PROFILING_ENABLED>::Initialize();
1642 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1545 } 1643 }
1546 1644
1547 1645
1548 void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { 1646 void Heap::SelectScavengingVisitorsTable() {
1549 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) { 1647 bool logging_and_profiling =
1550 // Table was already updated by some isolate. 1648 isolate()->logger()->is_logging() ||
1551 return;
1552 }
1553
1554 if (isolate()->logger()->is_logging() |
1555 CpuProfiler::is_profiling(isolate()) || 1649 CpuProfiler::is_profiling(isolate()) ||
1556 (isolate()->heap_profiler() != NULL && 1650 (isolate()->heap_profiler() != NULL &&
1557 isolate()->heap_profiler()->is_profiling())) { 1651 isolate()->heap_profiler()->is_profiling());
1558 // If one of the isolates is doing scavenge at this moment of time
1559 // it might see this table in an inconsitent state when
1560 // some of the callbacks point to
1561 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1562 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1563 // However this does not lead to any bugs as such isolate does not have
1564 // profiling enabled and any isolate with enabled profiling is guaranteed
1565 // to see the table in the consistent state.
1566 scavenging_visitors_table_.CopyFrom(
1567 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1568 1652
1569 // We use Release_Store to prevent reordering of this write before writes 1653 if (!incremental_marking()->IsMarking()) {
1570 // to the table. 1654 if (!logging_and_profiling) {
1571 Release_Store(&scavenging_visitors_table_mode_, 1655 scavenging_visitors_table_.CopyFrom(
1572 LOGGING_AND_PROFILING_ENABLED); 1656 ScavengingVisitor<IGNORE_MARKS,
1657 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1658 } else {
1659 scavenging_visitors_table_.CopyFrom(
1660 ScavengingVisitor<IGNORE_MARKS,
1661 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1662 }
1663 } else {
1664 if (!logging_and_profiling) {
1665 scavenging_visitors_table_.CopyFrom(
1666 ScavengingVisitor<TRANSFER_MARKS,
1667 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1668 } else {
1669 scavenging_visitors_table_.CopyFrom(
1670 ScavengingVisitor<TRANSFER_MARKS,
1671 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1672 }
1573 } 1673 }
1574 } 1674 }
1575 1675
1576 1676
1577 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { 1677 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1578 ASSERT(HEAP->InFromSpace(object)); 1678 ASSERT(HEAP->InFromSpace(object));
1579 MapWord first_word = object->map_word(); 1679 MapWord first_word = object->map_word();
1580 ASSERT(!first_word.IsForwardingAddress()); 1680 ASSERT(!first_word.IsForwardingAddress());
1581 Map* map = first_word.ToMap(); 1681 Map* map = first_word.ToMap();
1582 DoScavengeObject(map, p, object); 1682 map->GetHeap()->DoScavengeObject(map, p, object);
1583 } 1683 }
1584 1684
1585 1685
1586 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, 1686 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1587 int instance_size) { 1687 int instance_size) {
1588 Object* result; 1688 Object* result;
1589 { MaybeObject* maybe_result = AllocateRawMap(); 1689 { MaybeObject* maybe_result = AllocateRawMap();
1590 if (!maybe_result->ToObject(&result)) return maybe_result; 1690 if (!maybe_result->ToObject(&result)) return maybe_result;
1591 } 1691 }
1592 1692
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
1700 if (!maybe_obj->ToObject(&obj)) return false; 1800 if (!maybe_obj->ToObject(&obj)) return false;
1701 } 1801 }
1702 set_oddball_map(Map::cast(obj)); 1802 set_oddball_map(Map::cast(obj));
1703 1803
1704 // Allocate the empty array. 1804 // Allocate the empty array.
1705 { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); 1805 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1706 if (!maybe_obj->ToObject(&obj)) return false; 1806 if (!maybe_obj->ToObject(&obj)) return false;
1707 } 1807 }
1708 set_empty_fixed_array(FixedArray::cast(obj)); 1808 set_empty_fixed_array(FixedArray::cast(obj));
1709 1809
1710 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); 1810 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1711 if (!maybe_obj->ToObject(&obj)) return false; 1811 if (!maybe_obj->ToObject(&obj)) return false;
1712 } 1812 }
1713 set_null_value(obj); 1813 set_null_value(obj);
1714 Oddball::cast(obj)->set_kind(Oddball::kNull); 1814 Oddball::cast(obj)->set_kind(Oddball::kNull);
1715 1815
1716 // Allocate the empty descriptor array. 1816 // Allocate the empty descriptor array.
1717 { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); 1817 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1718 if (!maybe_obj->ToObject(&obj)) return false; 1818 if (!maybe_obj->ToObject(&obj)) return false;
1719 } 1819 }
1720 set_empty_descriptor_array(DescriptorArray::cast(obj)); 1820 set_empty_descriptor_array(DescriptorArray::cast(obj));
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 if (!maybe_obj->ToObject(&obj)) return false; 1891 if (!maybe_obj->ToObject(&obj)) return false;
1792 } 1892 }
1793 set_fixed_double_array_map(Map::cast(obj)); 1893 set_fixed_double_array_map(Map::cast(obj));
1794 1894
1795 { MaybeObject* maybe_obj = 1895 { MaybeObject* maybe_obj =
1796 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel); 1896 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1797 if (!maybe_obj->ToObject(&obj)) return false; 1897 if (!maybe_obj->ToObject(&obj)) return false;
1798 } 1898 }
1799 set_byte_array_map(Map::cast(obj)); 1899 set_byte_array_map(Map::cast(obj));
1800 1900
1901 { MaybeObject* maybe_obj =
1902 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
1903 if (!maybe_obj->ToObject(&obj)) return false;
1904 }
1905 set_free_space_map(Map::cast(obj));
1906
1801 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED); 1907 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1802 if (!maybe_obj->ToObject(&obj)) return false; 1908 if (!maybe_obj->ToObject(&obj)) return false;
1803 } 1909 }
1804 set_empty_byte_array(ByteArray::cast(obj)); 1910 set_empty_byte_array(ByteArray::cast(obj));
1805 1911
1806 { MaybeObject* maybe_obj = 1912 { MaybeObject* maybe_obj =
1807 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize); 1913 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
1808 if (!maybe_obj->ToObject(&obj)) return false; 1914 if (!maybe_obj->ToObject(&obj)) return false;
1809 } 1915 }
1810 set_external_pixel_array_map(Map::cast(obj)); 1916 set_external_pixel_array_map(Map::cast(obj));
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
1991 HeapObject::cast(result)->set_map(global_property_cell_map()); 2097 HeapObject::cast(result)->set_map(global_property_cell_map());
1992 JSGlobalPropertyCell::cast(result)->set_value(value); 2098 JSGlobalPropertyCell::cast(result)->set_value(value);
1993 return result; 2099 return result;
1994 } 2100 }
1995 2101
1996 2102
1997 MaybeObject* Heap::CreateOddball(const char* to_string, 2103 MaybeObject* Heap::CreateOddball(const char* to_string,
1998 Object* to_number, 2104 Object* to_number,
1999 byte kind) { 2105 byte kind) {
2000 Object* result; 2106 Object* result;
2001 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE); 2107 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2002 if (!maybe_result->ToObject(&result)) return maybe_result; 2108 if (!maybe_result->ToObject(&result)) return maybe_result;
2003 } 2109 }
2004 return Oddball::cast(result)->Initialize(to_string, to_number, kind); 2110 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2005 } 2111 }
2006 2112
2007 2113
2008 bool Heap::CreateApiObjects() { 2114 bool Heap::CreateApiObjects() {
2009 Object* obj; 2115 Object* obj;
2010 2116
2011 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); 2117 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2076 if (!maybe_obj->ToObject(&obj)) return false; 2182 if (!maybe_obj->ToObject(&obj)) return false;
2077 } 2183 }
2078 set_minus_zero_value(obj); 2184 set_minus_zero_value(obj);
2079 ASSERT(signbit(minus_zero_value()->Number()) != 0); 2185 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2080 2186
2081 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); 2187 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2082 if (!maybe_obj->ToObject(&obj)) return false; 2188 if (!maybe_obj->ToObject(&obj)) return false;
2083 } 2189 }
2084 set_nan_value(obj); 2190 set_nan_value(obj);
2085 2191
2086 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); 2192 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2087 if (!maybe_obj->ToObject(&obj)) return false; 2193 if (!maybe_obj->ToObject(&obj)) return false;
2088 } 2194 }
2089 set_undefined_value(obj); 2195 set_undefined_value(obj);
2090 Oddball::cast(obj)->set_kind(Oddball::kUndefined); 2196 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2091 ASSERT(!InNewSpace(undefined_value())); 2197 ASSERT(!InNewSpace(undefined_value()));
2092 2198
2093 // Allocate initial symbol table. 2199 // Allocate initial symbol table.
2094 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize); 2200 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2095 if (!maybe_obj->ToObject(&obj)) return false; 2201 if (!maybe_obj->ToObject(&obj)) return false;
2096 } 2202 }
(...skipping 801 matching lines...) Expand 10 before | Expand all | Expand 10 after
2898 if (length < 0 || length > ByteArray::kMaxLength) { 3004 if (length < 0 || length > ByteArray::kMaxLength) {
2899 return Failure::OutOfMemoryException(); 3005 return Failure::OutOfMemoryException();
2900 } 3006 }
2901 if (pretenure == NOT_TENURED) { 3007 if (pretenure == NOT_TENURED) {
2902 return AllocateByteArray(length); 3008 return AllocateByteArray(length);
2903 } 3009 }
2904 int size = ByteArray::SizeFor(length); 3010 int size = ByteArray::SizeFor(length);
2905 Object* result; 3011 Object* result;
2906 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace()) 3012 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2907 ? old_data_space_->AllocateRaw(size) 3013 ? old_data_space_->AllocateRaw(size)
2908 : lo_space_->AllocateRaw(size); 3014 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
2909 if (!maybe_result->ToObject(&result)) return maybe_result; 3015 if (!maybe_result->ToObject(&result)) return maybe_result;
2910 } 3016 }
2911 3017
2912 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); 3018 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2913 reinterpret_cast<ByteArray*>(result)->set_length(length); 3019 reinterpret_cast<ByteArray*>(result)->set_length(length);
2914 return result; 3020 return result;
2915 } 3021 }
2916 3022
2917 3023
2918 MaybeObject* Heap::AllocateByteArray(int length) { 3024 MaybeObject* Heap::AllocateByteArray(int length) {
(...skipping 15 matching lines...) Expand all
2934 3040
2935 3041
2936 void Heap::CreateFillerObjectAt(Address addr, int size) { 3042 void Heap::CreateFillerObjectAt(Address addr, int size) {
2937 if (size == 0) return; 3043 if (size == 0) return;
2938 HeapObject* filler = HeapObject::FromAddress(addr); 3044 HeapObject* filler = HeapObject::FromAddress(addr);
2939 if (size == kPointerSize) { 3045 if (size == kPointerSize) {
2940 filler->set_map(one_pointer_filler_map()); 3046 filler->set_map(one_pointer_filler_map());
2941 } else if (size == 2 * kPointerSize) { 3047 } else if (size == 2 * kPointerSize) {
2942 filler->set_map(two_pointer_filler_map()); 3048 filler->set_map(two_pointer_filler_map());
2943 } else { 3049 } else {
2944 filler->set_map(byte_array_map()); 3050 filler->set_map(free_space_map());
2945 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size)); 3051 FreeSpace::cast(filler)->set_size(size);
2946 } 3052 }
2947 } 3053 }
2948 3054
2949 3055
2950 MaybeObject* Heap::AllocateExternalArray(int length, 3056 MaybeObject* Heap::AllocateExternalArray(int length,
2951 ExternalArrayType array_type, 3057 ExternalArrayType array_type,
2952 void* external_pointer, 3058 void* external_pointer,
2953 PretenureFlag pretenure) { 3059 PretenureFlag pretenure) {
2954 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; 3060 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2955 Object* result; 3061 Object* result;
(...skipping 25 matching lines...) Expand all
2981 } 3087 }
2982 3088
2983 // Compute size. 3089 // Compute size.
2984 int body_size = RoundUp(desc.instr_size, kObjectAlignment); 3090 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
2985 int obj_size = Code::SizeFor(body_size); 3091 int obj_size = Code::SizeFor(body_size);
2986 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment)); 3092 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
2987 MaybeObject* maybe_result; 3093 MaybeObject* maybe_result;
2988 // Large code objects and code objects which should stay at a fixed address 3094 // Large code objects and code objects which should stay at a fixed address
2989 // are allocated in large object space. 3095 // are allocated in large object space.
2990 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) { 3096 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
2991 maybe_result = lo_space_->AllocateRawCode(obj_size); 3097 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
2992 } else { 3098 } else {
2993 maybe_result = code_space_->AllocateRaw(obj_size); 3099 maybe_result = code_space_->AllocateRaw(obj_size);
2994 } 3100 }
2995 3101
2996 Object* result; 3102 Object* result;
2997 if (!maybe_result->ToObject(&result)) return maybe_result; 3103 if (!maybe_result->ToObject(&result)) return maybe_result;
2998 3104
2999 // Initialize the object 3105 // Initialize the object
3000 HeapObject::cast(result)->set_map(code_map()); 3106 HeapObject::cast(result)->set_map(code_map());
3001 Code* code = Code::cast(result); 3107 Code* code = Code::cast(result);
(...skipping 24 matching lines...) Expand all
3026 #endif 3132 #endif
3027 return code; 3133 return code;
3028 } 3134 }
3029 3135
3030 3136
3031 MaybeObject* Heap::CopyCode(Code* code) { 3137 MaybeObject* Heap::CopyCode(Code* code) {
3032 // Allocate an object the same size as the code object. 3138 // Allocate an object the same size as the code object.
3033 int obj_size = code->Size(); 3139 int obj_size = code->Size();
3034 MaybeObject* maybe_result; 3140 MaybeObject* maybe_result;
3035 if (obj_size > MaxObjectSizeInPagedSpace()) { 3141 if (obj_size > MaxObjectSizeInPagedSpace()) {
3036 maybe_result = lo_space_->AllocateRawCode(obj_size); 3142 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3037 } else { 3143 } else {
3038 maybe_result = code_space_->AllocateRaw(obj_size); 3144 maybe_result = code_space_->AllocateRaw(obj_size);
3039 } 3145 }
3040 3146
3041 Object* result; 3147 Object* result;
3042 if (!maybe_result->ToObject(&result)) return maybe_result; 3148 if (!maybe_result->ToObject(&result)) return maybe_result;
3043 3149
3044 // Copy code object. 3150 // Copy code object.
3045 Address old_addr = code->address(); 3151 Address old_addr = code->address();
3046 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 3152 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
(...skipping 22 matching lines...) Expand all
3069 3175
3070 int new_obj_size = Code::SizeFor(new_body_size); 3176 int new_obj_size = Code::SizeFor(new_body_size);
3071 3177
3072 Address old_addr = code->address(); 3178 Address old_addr = code->address();
3073 3179
3074 size_t relocation_offset = 3180 size_t relocation_offset =
3075 static_cast<size_t>(code->instruction_end() - old_addr); 3181 static_cast<size_t>(code->instruction_end() - old_addr);
3076 3182
3077 MaybeObject* maybe_result; 3183 MaybeObject* maybe_result;
3078 if (new_obj_size > MaxObjectSizeInPagedSpace()) { 3184 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
3079 maybe_result = lo_space_->AllocateRawCode(new_obj_size); 3185 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3080 } else { 3186 } else {
3081 maybe_result = code_space_->AllocateRaw(new_obj_size); 3187 maybe_result = code_space_->AllocateRaw(new_obj_size);
3082 } 3188 }
3083 3189
3084 Object* result; 3190 Object* result;
3085 if (!maybe_result->ToObject(&result)) return maybe_result; 3191 if (!maybe_result->ToObject(&result)) return maybe_result;
3086 3192
3087 // Copy code object. 3193 // Copy code object.
3088 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 3194 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3089 3195
(...skipping 731 matching lines...) Expand 10 before | Expand all | Expand 10 after
3821 if (chars > SeqTwoByteString::kMaxLength) { 3927 if (chars > SeqTwoByteString::kMaxLength) {
3822 return Failure::OutOfMemoryException(); 3928 return Failure::OutOfMemoryException();
3823 } 3929 }
3824 map = symbol_map(); 3930 map = symbol_map();
3825 size = SeqTwoByteString::SizeFor(chars); 3931 size = SeqTwoByteString::SizeFor(chars);
3826 } 3932 }
3827 3933
3828 // Allocate string. 3934 // Allocate string.
3829 Object* result; 3935 Object* result;
3830 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) 3936 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3831 ? lo_space_->AllocateRaw(size) 3937 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
3832 : old_data_space_->AllocateRaw(size); 3938 : old_data_space_->AllocateRaw(size);
3833 if (!maybe_result->ToObject(&result)) return maybe_result; 3939 if (!maybe_result->ToObject(&result)) return maybe_result;
3834 } 3940 }
3835 3941
3836 reinterpret_cast<HeapObject*>(result)->set_map(map); 3942 reinterpret_cast<HeapObject*>(result)->set_map(map);
3837 // Set length and hash fields of the allocated string. 3943 // Set length and hash fields of the allocated string.
3838 String* answer = String::cast(result); 3944 String* answer = String::cast(result);
3839 answer->set_length(chars); 3945 answer->set_length(chars);
3840 answer->set_hash_field(hash_field); 3946 answer->set_hash_field(hash_field);
3841 3947
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3938 if (length < 0 || length > FixedArray::kMaxLength) { 4044 if (length < 0 || length > FixedArray::kMaxLength) {
3939 return Failure::OutOfMemoryException(); 4045 return Failure::OutOfMemoryException();
3940 } 4046 }
3941 ASSERT(length > 0); 4047 ASSERT(length > 0);
3942 // Use the general function if we're forced to always allocate. 4048 // Use the general function if we're forced to always allocate.
3943 if (always_allocate()) return AllocateFixedArray(length, TENURED); 4049 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3944 // Allocate the raw data for a fixed array. 4050 // Allocate the raw data for a fixed array.
3945 int size = FixedArray::SizeFor(length); 4051 int size = FixedArray::SizeFor(length);
3946 return size <= kMaxObjectSizeInNewSpace 4052 return size <= kMaxObjectSizeInNewSpace
3947 ? new_space_.AllocateRaw(size) 4053 ? new_space_.AllocateRaw(size)
3948 : lo_space_->AllocateRawFixedArray(size); 4054 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3949 } 4055 }
3950 4056
3951 4057
3952 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { 4058 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3953 int len = src->length(); 4059 int len = src->length();
3954 Object* obj; 4060 Object* obj;
3955 { MaybeObject* maybe_obj = AllocateRawFixedArray(len); 4061 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3956 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 4062 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3957 } 4063 }
3958 if (InNewSpace(obj)) { 4064 if (InNewSpace(obj)) {
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after
4269 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE; 4375 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
4270 Object* result; 4376 Object* result;
4271 { MaybeObject* maybe_result = Allocate(map, space); 4377 { MaybeObject* maybe_result = Allocate(map, space);
4272 if (!maybe_result->ToObject(&result)) return maybe_result; 4378 if (!maybe_result->ToObject(&result)) return maybe_result;
4273 } 4379 }
4274 Struct::cast(result)->InitializeBody(size); 4380 Struct::cast(result)->InitializeBody(size);
4275 return result; 4381 return result;
4276 } 4382 }
4277 4383
4278 4384
4385 bool Heap::IsHeapIterable() {
4386 return (!old_pointer_space()->was_swept_conservatively() &&
4387 !old_data_space()->was_swept_conservatively());
4388 }
4389
4390
4391 void Heap::EnsureHeapIsIterable() {
4392 ASSERT(IsAllocationAllowed());
4393 if (!IsHeapIterable()) {
4394 CollectAllGarbage(kMakeHeapIterableMask);
4395 }
4396 ASSERT(IsHeapIterable());
4397 }
4398
4399
4279 bool Heap::IdleNotification() { 4400 bool Heap::IdleNotification() {
4280 static const int kIdlesBeforeScavenge = 4; 4401 static const int kIdlesBeforeScavenge = 4;
4281 static const int kIdlesBeforeMarkSweep = 7; 4402 static const int kIdlesBeforeMarkSweep = 7;
4282 static const int kIdlesBeforeMarkCompact = 8; 4403 static const int kIdlesBeforeMarkCompact = 8;
4283 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; 4404 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4284 static const unsigned int kGCsBetweenCleanup = 4; 4405 static const unsigned int kGCsBetweenCleanup = 4;
4285 4406
4286 if (!last_idle_notification_gc_count_init_) { 4407 if (!last_idle_notification_gc_count_init_) {
4287 last_idle_notification_gc_count_ = gc_count_; 4408 last_idle_notification_gc_count_ = gc_count_;
4288 last_idle_notification_gc_count_init_ = true; 4409 last_idle_notification_gc_count_init_ = true;
(...skipping 10 matching lines...) Expand all
4299 number_idle_notifications_ = 4420 number_idle_notifications_ =
4300 Min(number_idle_notifications_ + 1, kMaxIdleCount); 4421 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4301 } else { 4422 } else {
4302 number_idle_notifications_ = 0; 4423 number_idle_notifications_ = 0;
4303 last_idle_notification_gc_count_ = gc_count_; 4424 last_idle_notification_gc_count_ = gc_count_;
4304 } 4425 }
4305 4426
4306 if (number_idle_notifications_ == kIdlesBeforeScavenge) { 4427 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4307 if (contexts_disposed_ > 0) { 4428 if (contexts_disposed_ > 0) {
4308 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4429 HistogramTimerScope scope(isolate_->counters()->gc_context());
4309 CollectAllGarbage(false); 4430 CollectAllGarbage(kNoGCFlags);
4310 } else { 4431 } else {
4311 CollectGarbage(NEW_SPACE); 4432 CollectGarbage(NEW_SPACE);
4312 } 4433 }
4313 new_space_.Shrink(); 4434 new_space_.Shrink();
4314 last_idle_notification_gc_count_ = gc_count_; 4435 last_idle_notification_gc_count_ = gc_count_;
4315 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) { 4436 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4316 // Before doing the mark-sweep collections we clear the 4437 // Before doing the mark-sweep collections we clear the
4317 // compilation cache to avoid hanging on to source code and 4438 // compilation cache to avoid hanging on to source code and
4318 // generated code for cached functions. 4439 // generated code for cached functions.
4319 isolate_->compilation_cache()->Clear(); 4440 isolate_->compilation_cache()->Clear();
4320 4441
4321 CollectAllGarbage(false); 4442 CollectAllGarbage(kNoGCFlags);
4322 new_space_.Shrink(); 4443 new_space_.Shrink();
4323 last_idle_notification_gc_count_ = gc_count_; 4444 last_idle_notification_gc_count_ = gc_count_;
4324 4445
4325 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) { 4446 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4326 CollectAllGarbage(true); 4447 CollectAllGarbage(kNoGCFlags);
4327 new_space_.Shrink(); 4448 new_space_.Shrink();
4328 last_idle_notification_gc_count_ = gc_count_; 4449 last_idle_notification_gc_count_ = gc_count_;
4329 number_idle_notifications_ = 0; 4450 number_idle_notifications_ = 0;
4330 finished = true; 4451 finished = true;
4331 } else if (contexts_disposed_ > 0) { 4452 } else if (contexts_disposed_ > 0) {
4332 if (FLAG_expose_gc) { 4453 if (FLAG_expose_gc) {
4333 contexts_disposed_ = 0; 4454 contexts_disposed_ = 0;
4334 } else { 4455 } else {
4335 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4456 HistogramTimerScope scope(isolate_->counters()->gc_context());
4336 CollectAllGarbage(false); 4457 CollectAllGarbage(kNoGCFlags);
4337 last_idle_notification_gc_count_ = gc_count_; 4458 last_idle_notification_gc_count_ = gc_count_;
4338 } 4459 }
4339 // If this is the first idle notification, we reset the 4460 // If this is the first idle notification, we reset the
4340 // notification count to avoid letting idle notifications for 4461 // notification count to avoid letting idle notifications for
4341 // context disposal garbage collections start a potentially too 4462 // context disposal garbage collections start a potentially too
4342 // aggressive idle GC cycle. 4463 // aggressive idle GC cycle.
4343 if (number_idle_notifications_ <= 1) { 4464 if (number_idle_notifications_ <= 1) {
4344 number_idle_notifications_ = 0; 4465 number_idle_notifications_ = 0;
4345 uncommit = false; 4466 uncommit = false;
4346 } 4467 }
4347 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) { 4468 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4348 // If we have received more than kIdlesBeforeMarkCompact idle 4469 // If we have received more than kIdlesBeforeMarkCompact idle
4349 // notifications we do not perform any cleanup because we don't 4470 // notifications we do not perform any cleanup because we don't
4350 // expect to gain much by doing so. 4471 // expect to gain much by doing so.
4351 finished = true; 4472 finished = true;
4352 } 4473 }
4353 4474
4354 // Make sure that we have no pending context disposals and 4475 // Make sure that we have no pending context disposals and
4355 // conditionally uncommit from space. 4476 // conditionally uncommit from space.
4356 ASSERT(contexts_disposed_ == 0); 4477 ASSERT((contexts_disposed_ == 0) || incremental_marking()->IsMarking());
4357 if (uncommit) UncommitFromSpace(); 4478 if (uncommit) UncommitFromSpace();
4358 return finished; 4479 return finished;
4359 } 4480 }
4360 4481
4361 4482
4362 #ifdef DEBUG 4483 #ifdef DEBUG
4363 4484
4364 void Heap::Print() { 4485 void Heap::Print() {
4365 if (!HasBeenSetup()) return; 4486 if (!HasBeenSetup()) return;
4366 isolate()->PrintStack(); 4487 isolate()->PrintStack();
(...skipping 14 matching lines...) Expand all
4381 } 4502 }
4382 4503
4383 4504
4384 // This function expects that NewSpace's allocated objects histogram is 4505 // This function expects that NewSpace's allocated objects histogram is
4385 // populated (via a call to CollectStatistics or else as a side effect of a 4506 // populated (via a call to CollectStatistics or else as a side effect of a
4386 // just-completed scavenge collection). 4507 // just-completed scavenge collection).
4387 void Heap::ReportHeapStatistics(const char* title) { 4508 void Heap::ReportHeapStatistics(const char* title) {
4388 USE(title); 4509 USE(title);
4389 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", 4510 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4390 title, gc_count_); 4511 title, gc_count_);
4391 PrintF("mark-compact GC : %d\n", mc_count_);
4392 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n", 4512 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4393 old_gen_promotion_limit_); 4513 old_gen_promotion_limit_);
4394 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n", 4514 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4395 old_gen_allocation_limit_); 4515 old_gen_allocation_limit_);
4516 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
4396 4517
4397 PrintF("\n"); 4518 PrintF("\n");
4398 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); 4519 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
4399 isolate_->global_handles()->PrintStats(); 4520 isolate_->global_handles()->PrintStats();
4400 PrintF("\n"); 4521 PrintF("\n");
4401 4522
4402 PrintF("Heap statistics : "); 4523 PrintF("Heap statistics : ");
4403 isolate_->memory_allocator()->ReportStatistics(); 4524 isolate_->memory_allocator()->ReportStatistics();
4404 PrintF("To space : "); 4525 PrintF("To space : ");
4405 new_space_.ReportStatistics(); 4526 new_space_.ReportStatistics();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
4462 return cell_space_->Contains(addr); 4583 return cell_space_->Contains(addr);
4463 case LO_SPACE: 4584 case LO_SPACE:
4464 return lo_space_->SlowContains(addr); 4585 return lo_space_->SlowContains(addr);
4465 } 4586 }
4466 4587
4467 return false; 4588 return false;
4468 } 4589 }
4469 4590
4470 4591
4471 #ifdef DEBUG 4592 #ifdef DEBUG
4472 static void DummyScavengePointer(HeapObject** p) {
4473 }
4474
4475
4476 static void VerifyPointersUnderWatermark(
4477 PagedSpace* space,
4478 DirtyRegionCallback visit_dirty_region) {
4479 PageIterator it(space, PageIterator::PAGES_IN_USE);
4480
4481 while (it.has_next()) {
4482 Page* page = it.next();
4483 Address start = page->ObjectAreaStart();
4484 Address end = page->AllocationWatermark();
4485
4486 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
4487 start,
4488 end,
4489 visit_dirty_region,
4490 &DummyScavengePointer);
4491 }
4492 }
4493
4494
4495 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4496 LargeObjectIterator it(space);
4497 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4498 if (object->IsFixedArray()) {
4499 Address slot_address = object->address();
4500 Address end = object->address() + object->Size();
4501
4502 while (slot_address < end) {
4503 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4504 // When we are not in GC the Heap::InNewSpace() predicate
4505 // checks that pointers which satisfy predicate point into
4506 // the active semispace.
4507 HEAP->InNewSpace(*slot);
4508 slot_address += kPointerSize;
4509 }
4510 }
4511 }
4512 }
4513
4514
4515 void Heap::Verify() { 4593 void Heap::Verify() {
4516 ASSERT(HasBeenSetup()); 4594 ASSERT(HasBeenSetup());
4517 4595
4596 store_buffer()->Verify();
4597
4518 VerifyPointersVisitor visitor; 4598 VerifyPointersVisitor visitor;
4519 IterateRoots(&visitor, VISIT_ONLY_STRONG); 4599 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4520 4600
4521 new_space_.Verify(); 4601 new_space_.Verify();
4522 4602
4523 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; 4603 old_pointer_space_->Verify(&visitor);
4524 old_pointer_space_->Verify(&dirty_regions_visitor); 4604 map_space_->Verify(&visitor);
4525 map_space_->Verify(&dirty_regions_visitor);
4526
4527 VerifyPointersUnderWatermark(old_pointer_space_,
4528 &IteratePointersInDirtyRegion);
4529 VerifyPointersUnderWatermark(map_space_,
4530 &IteratePointersInDirtyMapsRegion);
4531 VerifyPointersUnderWatermark(lo_space_);
4532
4533 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4534 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4535 4605
4536 VerifyPointersVisitor no_dirty_regions_visitor; 4606 VerifyPointersVisitor no_dirty_regions_visitor;
4537 old_data_space_->Verify(&no_dirty_regions_visitor); 4607 old_data_space_->Verify(&no_dirty_regions_visitor);
4538 code_space_->Verify(&no_dirty_regions_visitor); 4608 code_space_->Verify(&no_dirty_regions_visitor);
4539 cell_space_->Verify(&no_dirty_regions_visitor); 4609 cell_space_->Verify(&no_dirty_regions_visitor);
4540 4610
4541 lo_space_->Verify(); 4611 lo_space_->Verify();
4542 } 4612 }
4613
4543 #endif // DEBUG 4614 #endif // DEBUG
4544 4615
4545 4616
4546 MaybeObject* Heap::LookupSymbol(Vector<const char> string) { 4617 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4547 Object* symbol = NULL; 4618 Object* symbol = NULL;
4548 Object* new_table; 4619 Object* new_table;
4549 { MaybeObject* maybe_new_table = 4620 { MaybeObject* maybe_new_table =
4550 symbol_table()->LookupSymbol(string, &symbol); 4621 symbol_table()->LookupSymbol(string, &symbol);
4551 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; 4622 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4552 } 4623 }
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
4628 if (string->IsSymbol()) { 4699 if (string->IsSymbol()) {
4629 *symbol = string; 4700 *symbol = string;
4630 return true; 4701 return true;
4631 } 4702 }
4632 return symbol_table()->LookupSymbolIfExists(string, symbol); 4703 return symbol_table()->LookupSymbolIfExists(string, symbol);
4633 } 4704 }
4634 4705
4635 4706
4636 #ifdef DEBUG 4707 #ifdef DEBUG
4637 void Heap::ZapFromSpace() { 4708 void Heap::ZapFromSpace() {
4638 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure()); 4709 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4639 for (Address a = new_space_.FromSpaceLow(); 4710 new_space_.FromSpaceEnd());
4640 a < new_space_.FromSpaceHigh(); 4711 while (it.has_next()) {
4641 a += kPointerSize) { 4712 NewSpacePage* page = it.next();
4642 Memory::Address_at(a) = kFromSpaceZapValue; 4713 for (Address cursor = page->body(), limit = page->body_limit();
4714 cursor < limit;
4715 cursor += kPointerSize) {
4716 Memory::Address_at(cursor) = kFromSpaceZapValue;
4717 }
4643 } 4718 }
4644 } 4719 }
4645 #endif // DEBUG 4720 #endif // DEBUG
4646 4721
4647 4722
4648 bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4649 Address start,
4650 Address end,
4651 ObjectSlotCallback copy_object_func) {
4652 Address slot_address = start;
4653 bool pointers_to_new_space_found = false;
4654
4655 while (slot_address < end) {
4656 Object** slot = reinterpret_cast<Object**>(slot_address);
4657 if (heap->InNewSpace(*slot)) {
4658 ASSERT((*slot)->IsHeapObject());
4659 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4660 if (heap->InNewSpace(*slot)) {
4661 ASSERT((*slot)->IsHeapObject());
4662 pointers_to_new_space_found = true;
4663 }
4664 }
4665 slot_address += kPointerSize;
4666 }
4667 return pointers_to_new_space_found;
4668 }
4669
4670
4671 // Compute start address of the first map following given addr.
4672 static inline Address MapStartAlign(Address addr) {
4673 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4674 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4675 }
4676
4677
4678 // Compute end address of the first map preceding given addr.
4679 static inline Address MapEndAlign(Address addr) {
4680 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4681 return page + ((addr - page) / Map::kSize * Map::kSize);
4682 }
4683
4684
4685 static bool IteratePointersInDirtyMaps(Address start,
4686 Address end,
4687 ObjectSlotCallback copy_object_func) {
4688 ASSERT(MapStartAlign(start) == start);
4689 ASSERT(MapEndAlign(end) == end);
4690
4691 Address map_address = start;
4692 bool pointers_to_new_space_found = false;
4693
4694 Heap* heap = HEAP;
4695 while (map_address < end) {
4696 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
4697 ASSERT(Memory::Object_at(map_address)->IsMap());
4698
4699 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4700 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4701
4702 if (Heap::IteratePointersInDirtyRegion(heap,
4703 pointer_fields_start,
4704 pointer_fields_end,
4705 copy_object_func)) {
4706 pointers_to_new_space_found = true;
4707 }
4708
4709 map_address += Map::kSize;
4710 }
4711
4712 return pointers_to_new_space_found;
4713 }
4714
4715
4716 bool Heap::IteratePointersInDirtyMapsRegion(
4717 Heap* heap,
4718 Address start,
4719 Address end,
4720 ObjectSlotCallback copy_object_func) {
4721 Address map_aligned_start = MapStartAlign(start);
4722 Address map_aligned_end = MapEndAlign(end);
4723
4724 bool contains_pointers_to_new_space = false;
4725
4726 if (map_aligned_start != start) {
4727 Address prev_map = map_aligned_start - Map::kSize;
4728 ASSERT(Memory::Object_at(prev_map)->IsMap());
4729
4730 Address pointer_fields_start =
4731 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4732
4733 Address pointer_fields_end =
4734 Min(prev_map + Map::kPointerFieldsEndOffset, end);
4735
4736 contains_pointers_to_new_space =
4737 IteratePointersInDirtyRegion(heap,
4738 pointer_fields_start,
4739 pointer_fields_end,
4740 copy_object_func)
4741 || contains_pointers_to_new_space;
4742 }
4743
4744 contains_pointers_to_new_space =
4745 IteratePointersInDirtyMaps(map_aligned_start,
4746 map_aligned_end,
4747 copy_object_func)
4748 || contains_pointers_to_new_space;
4749
4750 if (map_aligned_end != end) {
4751 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4752
4753 Address pointer_fields_start =
4754 map_aligned_end + Map::kPointerFieldsBeginOffset;
4755
4756 Address pointer_fields_end =
4757 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
4758
4759 contains_pointers_to_new_space =
4760 IteratePointersInDirtyRegion(heap,
4761 pointer_fields_start,
4762 pointer_fields_end,
4763 copy_object_func)
4764 || contains_pointers_to_new_space;
4765 }
4766
4767 return contains_pointers_to_new_space;
4768 }
4769
4770
4771 void Heap::IterateAndMarkPointersToFromSpace(Address start, 4723 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4772 Address end, 4724 Address end,
4773 ObjectSlotCallback callback) { 4725 ObjectSlotCallback callback) {
4774 Address slot_address = start; 4726 Address slot_address = start;
4775 Page* page = Page::FromAddress(start); 4727
4776 4728 // We are not collecting slots on new space objects during mutation
4777 uint32_t marks = page->GetRegionMarks(); 4729 // thus we have to scan for pointers to evacuation candidates when we
4730 // promote objects. But we should not record any slots in non-black
4731 // objects. Grey object's slots would be rescanned.
4732 // White object might not survive until the end of collection
4733 // it would be a violation of the invariant to record it's slots.
4734 bool record_slots = false;
4735 if (incremental_marking()->IsCompacting()) {
4736 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4737 record_slots = Marking::IsBlack(mark_bit);
4738 }
4778 4739
4779 while (slot_address < end) { 4740 while (slot_address < end) {
4780 Object** slot = reinterpret_cast<Object**>(slot_address); 4741 Object** slot = reinterpret_cast<Object**>(slot_address);
4781 if (InFromSpace(*slot)) { 4742 Object* object = *slot;
4782 ASSERT((*slot)->IsHeapObject()); 4743 // If the store buffer becomes overfull we mark pages as being exempt from
4783 callback(reinterpret_cast<HeapObject**>(slot)); 4744 // the store buffer. These pages are scanned to find pointers that point
4784 if (InNewSpace(*slot)) { 4745 // to the new space. In that case we may hit newly promoted objects and
4785 ASSERT((*slot)->IsHeapObject()); 4746 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4786 marks |= page->GetRegionMaskForAddress(slot_address); 4747 if (object->IsHeapObject()) {
4748 if (Heap::InFromSpace(object)) {
4749 callback(reinterpret_cast<HeapObject**>(slot),
4750 HeapObject::cast(object));
4751 Object* new_object = *slot;
4752 if (InNewSpace(new_object)) {
4753 ASSERT(Heap::InToSpace(new_object));
4754 ASSERT(new_object->IsHeapObject());
4755 store_buffer_.EnterDirectlyIntoStoreBuffer(
4756 reinterpret_cast<Address>(slot));
4757 }
4758 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4759 } else if (record_slots &&
4760 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4761 mark_compact_collector()->RecordSlot(slot, slot, object);
4787 } 4762 }
4788 } 4763 }
4789 slot_address += kPointerSize; 4764 slot_address += kPointerSize;
4790 } 4765 }
4791 4766 }
4792 page->SetRegionMarks(marks); 4767
4793 } 4768
4794 4769 #ifdef DEBUG
4795 4770 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4796 uint32_t Heap::IterateDirtyRegions( 4771
4797 uint32_t marks, 4772
4798 Address area_start, 4773 bool IsAMapPointerAddress(Object** addr) {
4799 Address area_end, 4774 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4800 DirtyRegionCallback visit_dirty_region, 4775 int mod = a % Map::kSize;
4801 ObjectSlotCallback copy_object_func) { 4776 return mod >= Map::kPointerFieldsBeginOffset &&
4802 uint32_t newmarks = 0; 4777 mod < Map::kPointerFieldsEndOffset;
4803 uint32_t mask = 1; 4778 }
4804 4779
4805 if (area_start >= area_end) { 4780
4806 return newmarks; 4781 bool EverythingsAPointer(Object** addr) {
4807 } 4782 return true;
4808 4783 }
4809 Address region_start = area_start; 4784
4810 4785
4811 // area_start does not necessarily coincide with start of the first region. 4786 static void CheckStoreBuffer(Heap* heap,
4812 // Thus to calculate the beginning of the next region we have to align 4787 Object** current,
4813 // area_start by Page::kRegionSize. 4788 Object** limit,
4814 Address second_region = 4789 Object**** store_buffer_position,
4815 reinterpret_cast<Address>( 4790 Object*** store_buffer_top,
4816 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & 4791 CheckStoreBufferFilter filter,
4817 ~Page::kRegionAlignmentMask); 4792 Address special_garbage_start,
4818 4793 Address special_garbage_end) {
4819 // Next region might be beyond area_end. 4794 Map* free_space_map = heap->free_space_map();
4820 Address region_end = Min(second_region, area_end); 4795 for ( ; current < limit; current++) {
4821 4796 Object* o = *current;
4822 if (marks & mask) { 4797 Address current_address = reinterpret_cast<Address>(current);
4823 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) { 4798 // Skip free space.
4824 newmarks |= mask; 4799 if (o == free_space_map) {
4825 } 4800 Address current_address = reinterpret_cast<Address>(current);
4826 } 4801 FreeSpace* free_space =
4827 mask <<= 1; 4802 FreeSpace::cast(HeapObject::FromAddress(current_address));
4828 4803 int skip = free_space->Size();
4829 // Iterate subsequent regions which fully lay inside [area_start, area_end[. 4804 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4830 region_start = region_end; 4805 ASSERT(skip > 0);
4831 region_end = region_start + Page::kRegionSize; 4806 current_address += skip - kPointerSize;
4832 4807 current = reinterpret_cast<Object**>(current_address);
4833 while (region_end <= area_end) { 4808 continue;
4834 if (marks & mask) { 4809 }
4835 if (visit_dirty_region(this, 4810 // Skip the current linear allocation space between top and limit which is
4836 region_start, 4811 // unmarked with the free space map, but can contain junk.
4837 region_end, 4812 if (current_address == special_garbage_start &&
4838 copy_object_func)) { 4813 special_garbage_end != special_garbage_start) {
4839 newmarks |= mask; 4814 current_address = special_garbage_end - kPointerSize;
4840 } 4815 current = reinterpret_cast<Object**>(current_address);
4841 } 4816 continue;
4842 4817 }
4843 region_start = region_end; 4818 if (!(*filter)(current)) continue;
4844 region_end = region_start + Page::kRegionSize; 4819 ASSERT(current_address < special_garbage_start ||
4845 4820 current_address >= special_garbage_end);
4846 mask <<= 1; 4821 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4847 } 4822 // We have to check that the pointer does not point into new space
4848 4823 // without trying to cast it to a heap object since the hash field of
4849 if (region_start != area_end) { 4824 // a string can contain values like 1 and 3 which are tagged null
4850 // A small piece of area left uniterated because area_end does not coincide 4825 // pointers.
4851 // with region end. Check whether region covering last part of area is 4826 if (!heap->InNewSpace(o)) continue;
4852 // dirty. 4827 while (**store_buffer_position < current &&
4853 if (marks & mask) { 4828 *store_buffer_position < store_buffer_top) {
4854 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) { 4829 (*store_buffer_position)++;
4855 newmarks |= mask; 4830 }
4856 } 4831 if (**store_buffer_position != current ||
4857 } 4832 *store_buffer_position == store_buffer_top) {
4858 } 4833 Object** obj_start = current;
4859 4834 while (!(*obj_start)->IsMap()) obj_start--;
4860 return newmarks; 4835 UNREACHABLE();
4861 } 4836 }
4862 4837 }
4863 4838 }
4864 4839
4865 void Heap::IterateDirtyRegions( 4840
4866 PagedSpace* space, 4841 // Check that the store buffer contains all intergenerational pointers by
4867 DirtyRegionCallback visit_dirty_region, 4842 // scanning a page and ensuring that all pointers to young space are in the
4868 ObjectSlotCallback copy_object_func, 4843 // store buffer.
4869 ExpectedPageWatermarkState expected_page_watermark_state) { 4844 void Heap::OldPointerSpaceCheckStoreBuffer() {
4870 4845 OldSpace* space = old_pointer_space();
4871 PageIterator it(space, PageIterator::PAGES_IN_USE); 4846 PageIterator pages(space);
4872 4847
4873 while (it.has_next()) { 4848 store_buffer()->SortUniq();
4874 Page* page = it.next(); 4849
4875 uint32_t marks = page->GetRegionMarks(); 4850 while (pages.has_next()) {
4876 4851 Page* page = pages.next();
4877 if (marks != Page::kAllRegionsCleanMarks) { 4852 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4878 Address start = page->ObjectAreaStart(); 4853
4879 4854 Address end = page->ObjectAreaEnd();
4880 // Do not try to visit pointers beyond page allocation watermark. 4855
4881 // Page can contain garbage pointers there. 4856 Object*** store_buffer_position = store_buffer()->Start();
4882 Address end; 4857 Object*** store_buffer_top = store_buffer()->Top();
4883 4858
4884 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || 4859 Object** limit = reinterpret_cast<Object**>(end);
4885 page->IsWatermarkValid()) { 4860 CheckStoreBuffer(this,
4886 end = page->AllocationWatermark(); 4861 current,
4887 } else { 4862 limit,
4888 end = page->CachedAllocationWatermark(); 4863 &store_buffer_position,
4889 } 4864 store_buffer_top,
4890 4865 &EverythingsAPointer,
4891 ASSERT(space == old_pointer_space_ || 4866 space->top(),
4892 (space == map_space_ && 4867 space->limit());
4893 ((page->ObjectAreaStart() - end) % Map::kSize == 0))); 4868 }
4894 4869 }
4895 page->SetRegionMarks(IterateDirtyRegions(marks, 4870
4896 start, 4871
4897 end, 4872 void Heap::MapSpaceCheckStoreBuffer() {
4898 visit_dirty_region, 4873 MapSpace* space = map_space();
4899 copy_object_func)); 4874 PageIterator pages(space);
4900 } 4875
4901 4876 store_buffer()->SortUniq();
4902 // Mark page watermark as invalid to maintain watermark validity invariant. 4877
4903 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. 4878 while (pages.has_next()) {
4904 page->InvalidateWatermark(true); 4879 Page* page = pages.next();
4905 } 4880 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4906 } 4881
4882 Address end = page->ObjectAreaEnd();
4883
4884 Object*** store_buffer_position = store_buffer()->Start();
4885 Object*** store_buffer_top = store_buffer()->Top();
4886
4887 Object** limit = reinterpret_cast<Object**>(end);
4888 CheckStoreBuffer(this,
4889 current,
4890 limit,
4891 &store_buffer_position,
4892 store_buffer_top,
4893 &IsAMapPointerAddress,
4894 space->top(),
4895 space->limit());
4896 }
4897 }
4898
4899
4900 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4901 LargeObjectIterator it(lo_space());
4902 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4903 // We only have code, sequential strings, or fixed arrays in large
4904 // object space, and only fixed arrays can possibly contain pointers to
4905 // the young generation.
4906 if (object->IsFixedArray()) {
4907 Object*** store_buffer_position = store_buffer()->Start();
4908 Object*** store_buffer_top = store_buffer()->Top();
4909 Object** current = reinterpret_cast<Object**>(object->address());
4910 Object** limit =
4911 reinterpret_cast<Object**>(object->address() + object->Size());
4912 CheckStoreBuffer(this,
4913 current,
4914 limit,
4915 &store_buffer_position,
4916 store_buffer_top,
4917 &EverythingsAPointer,
4918 NULL,
4919 NULL);
4920 }
4921 }
4922 }
4923 #endif
4907 4924
4908 4925
4909 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4926 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4910 IterateStrongRoots(v, mode); 4927 IterateStrongRoots(v, mode);
4911 IterateWeakRoots(v, mode); 4928 IterateWeakRoots(v, mode);
4912 } 4929 }
4913 4930
4914 4931
4915 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4932 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4916 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); 4933 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4948 isolate_->compilation_cache()->Iterate(v); 4965 isolate_->compilation_cache()->Iterate(v);
4949 v->Synchronize("compilationcache"); 4966 v->Synchronize("compilationcache");
4950 4967
4951 // Iterate over local handles in handle scopes. 4968 // Iterate over local handles in handle scopes.
4952 isolate_->handle_scope_implementer()->Iterate(v); 4969 isolate_->handle_scope_implementer()->Iterate(v);
4953 v->Synchronize("handlescope"); 4970 v->Synchronize("handlescope");
4954 4971
4955 // Iterate over the builtin code objects and code stubs in the 4972 // Iterate over the builtin code objects and code stubs in the
4956 // heap. Note that it is not necessary to iterate over code objects 4973 // heap. Note that it is not necessary to iterate over code objects
4957 // on scavenge collections. 4974 // on scavenge collections.
4958 if (mode != VISIT_ALL_IN_SCAVENGE && 4975 if (mode != VISIT_ALL_IN_SCAVENGE) {
4959 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4960 isolate_->builtins()->IterateBuiltins(v); 4976 isolate_->builtins()->IterateBuiltins(v);
4961 } 4977 }
4962 v->Synchronize("builtins"); 4978 v->Synchronize("builtins");
4963 4979
4964 // Iterate over global handles. 4980 // Iterate over global handles.
4965 switch (mode) { 4981 switch (mode) {
4966 case VISIT_ONLY_STRONG: 4982 case VISIT_ONLY_STRONG:
4967 isolate_->global_handles()->IterateStrongRoots(v); 4983 isolate_->global_handles()->IterateStrongRoots(v);
4968 break; 4984 break;
4969 case VISIT_ALL_IN_SCAVENGE: 4985 case VISIT_ALL_IN_SCAVENGE:
(...skipping 23 matching lines...) Expand all
4993 // output a flag to the snapshot. However at this point the serializer and 5009 // output a flag to the snapshot. However at this point the serializer and
4994 // deserializer are deliberately a little unsynchronized (see above) so the 5010 // deserializer are deliberately a little unsynchronized (see above) so the
4995 // checking of the sync flag in the snapshot would fail. 5011 // checking of the sync flag in the snapshot would fail.
4996 } 5012 }
4997 5013
4998 5014
4999 // TODO(1236194): Since the heap size is configurable on the command line 5015 // TODO(1236194): Since the heap size is configurable on the command line
5000 // and through the API, we should gracefully handle the case that the heap 5016 // and through the API, we should gracefully handle the case that the heap
5001 // size is not big enough to fit all the initial objects. 5017 // size is not big enough to fit all the initial objects.
5002 bool Heap::ConfigureHeap(int max_semispace_size, 5018 bool Heap::ConfigureHeap(int max_semispace_size,
5003 int max_old_gen_size, 5019 intptr_t max_old_gen_size,
5004 int max_executable_size) { 5020 intptr_t max_executable_size) {
5005 if (HasBeenSetup()) return false; 5021 if (HasBeenSetup()) return false;
5006 5022
5007 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size; 5023 if (max_semispace_size > 0) {
5024 if (max_semispace_size < Page::kPageSize) {
5025 max_semispace_size = Page::kPageSize;
5026 if (FLAG_trace_gc) {
5027 PrintF("Max semispace size cannot be less than %dkbytes",
5028 Page::kPageSize >> 10);
5029 }
5030 }
5031 max_semispace_size_ = max_semispace_size;
5032 }
5008 5033
5009 if (Snapshot::IsEnabled()) { 5034 if (Snapshot::IsEnabled()) {
5010 // If we are using a snapshot we always reserve the default amount 5035 // If we are using a snapshot we always reserve the default amount
5011 // of memory for each semispace because code in the snapshot has 5036 // of memory for each semispace because code in the snapshot has
5012 // write-barrier code that relies on the size and alignment of new 5037 // write-barrier code that relies on the size and alignment of new
5013 // space. We therefore cannot use a larger max semispace size 5038 // space. We therefore cannot use a larger max semispace size
5014 // than the default reserved semispace size. 5039 // than the default reserved semispace size.
5015 if (max_semispace_size_ > reserved_semispace_size_) { 5040 if (max_semispace_size_ > reserved_semispace_size_) {
5016 max_semispace_size_ = reserved_semispace_size_; 5041 max_semispace_size_ = reserved_semispace_size_;
5042 if (FLAG_trace_gc) {
5043 PrintF("Max semispace size cannot be more than %dkbytes",
5044 reserved_semispace_size_ >> 10);
5045 }
5017 } 5046 }
5018 } else { 5047 } else {
5019 // If we are not using snapshots we reserve space for the actual 5048 // If we are not using snapshots we reserve space for the actual
5020 // max semispace size. 5049 // max semispace size.
5021 reserved_semispace_size_ = max_semispace_size_; 5050 reserved_semispace_size_ = max_semispace_size_;
5022 } 5051 }
5023 5052
5024 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size; 5053 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5025 if (max_executable_size > 0) { 5054 if (max_executable_size > 0) {
5026 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize); 5055 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5027 } 5056 }
5028 5057
5029 // The max executable size must be less than or equal to the max old 5058 // The max executable size must be less than or equal to the max old
5030 // generation size. 5059 // generation size.
5031 if (max_executable_size_ > max_old_generation_size_) { 5060 if (max_executable_size_ > max_old_generation_size_) {
5032 max_executable_size_ = max_old_generation_size_; 5061 max_executable_size_ = max_old_generation_size_;
5033 } 5062 }
5034 5063
5035 // The new space size must be a power of two to support single-bit testing 5064 // The new space size must be a power of two to support single-bit testing
5036 // for containment. 5065 // for containment.
5037 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_); 5066 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5038 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); 5067 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5039 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); 5068 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5040 external_allocation_limit_ = 10 * max_semispace_size_; 5069 external_allocation_limit_ = 10 * max_semispace_size_;
5041 5070
5042 // The old generation is paged. 5071 // The old generation is paged and needs at least one page for each space.
5043 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize); 5072 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5073 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5074 Page::kPageSize),
5075 RoundUp(max_old_generation_size_,
5076 Page::kPageSize));
5044 5077
5045 configured_ = true; 5078 configured_ = true;
5046 return true; 5079 return true;
5047 } 5080 }
5048 5081
5049 5082
5050 bool Heap::ConfigureHeapDefault() { 5083 bool Heap::ConfigureHeapDefault() {
5051 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB, 5084 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5052 FLAG_max_old_space_size * MB, 5085 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5053 FLAG_max_executable_size * MB); 5086 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5054 } 5087 }
5055 5088
5056 5089
5057 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { 5090 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5058 *stats->start_marker = HeapStats::kStartMarker; 5091 *stats->start_marker = HeapStats::kStartMarker;
5059 *stats->end_marker = HeapStats::kEndMarker; 5092 *stats->end_marker = HeapStats::kEndMarker;
5060 *stats->new_space_size = new_space_.SizeAsInt(); 5093 *stats->new_space_size = new_space_.SizeAsInt();
5061 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity()); 5094 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5062 *stats->old_pointer_space_size = old_pointer_space_->Size(); 5095 *stats->old_pointer_space_size = old_pointer_space_->Size();
5063 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); 5096 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5064 *stats->old_data_space_size = old_data_space_->Size(); 5097 *stats->old_data_space_size = old_data_space_->Size();
5065 *stats->old_data_space_capacity = old_data_space_->Capacity(); 5098 *stats->old_data_space_capacity = old_data_space_->Capacity();
5066 *stats->code_space_size = code_space_->Size(); 5099 *stats->code_space_size = code_space_->Size();
5067 *stats->code_space_capacity = code_space_->Capacity(); 5100 *stats->code_space_capacity = code_space_->Capacity();
5068 *stats->map_space_size = map_space_->Size(); 5101 *stats->map_space_size = map_space_->Size();
5069 *stats->map_space_capacity = map_space_->Capacity(); 5102 *stats->map_space_capacity = map_space_->Capacity();
5070 *stats->cell_space_size = cell_space_->Size(); 5103 *stats->cell_space_size = cell_space_->Size();
5071 *stats->cell_space_capacity = cell_space_->Capacity(); 5104 *stats->cell_space_capacity = cell_space_->Capacity();
5072 *stats->lo_space_size = lo_space_->Size(); 5105 *stats->lo_space_size = lo_space_->Size();
5073 isolate_->global_handles()->RecordStats(stats); 5106 isolate_->global_handles()->RecordStats(stats);
5074 *stats->memory_allocator_size = isolate()->memory_allocator()->Size(); 5107 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5075 *stats->memory_allocator_capacity = 5108 *stats->memory_allocator_capacity =
5076 isolate()->memory_allocator()->Size() + 5109 isolate()->memory_allocator()->Size() +
5077 isolate()->memory_allocator()->Available(); 5110 isolate()->memory_allocator()->Available();
5078 *stats->os_error = OS::GetLastError(); 5111 *stats->os_error = OS::GetLastError();
5079 isolate()->memory_allocator()->Available(); 5112 isolate()->memory_allocator()->Available();
5080 if (take_snapshot) { 5113 if (take_snapshot) {
5081 HeapIterator iterator(HeapIterator::kFilterFreeListNodes); 5114 HeapIterator iterator;
5082 for (HeapObject* obj = iterator.next(); 5115 for (HeapObject* obj = iterator.next();
5083 obj != NULL; 5116 obj != NULL;
5084 obj = iterator.next()) { 5117 obj = iterator.next()) {
5085 InstanceType type = obj->map()->instance_type(); 5118 InstanceType type = obj->map()->instance_type();
5086 ASSERT(0 <= type && type <= LAST_TYPE); 5119 ASSERT(0 <= type && type <= LAST_TYPE);
5087 stats->objects_per_type[type]++; 5120 stats->objects_per_type[type]++;
5088 stats->size_per_type[type] += obj->Size(); 5121 stats->size_per_type[type] += obj->Size();
5089 } 5122 }
5090 } 5123 }
5091 } 5124 }
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
5287 // Configuration is based on the flags new-space-size (really the semispace 5320 // Configuration is based on the flags new-space-size (really the semispace
5288 // size) and old-space-size if set or the initial values of semispace_size_ 5321 // size) and old-space-size if set or the initial values of semispace_size_
5289 // and old_generation_size_ otherwise. 5322 // and old_generation_size_ otherwise.
5290 if (!configured_) { 5323 if (!configured_) {
5291 if (!ConfigureHeapDefault()) return false; 5324 if (!ConfigureHeapDefault()) return false;
5292 } 5325 }
5293 5326
5294 gc_initializer_mutex->Lock(); 5327 gc_initializer_mutex->Lock();
5295 static bool initialized_gc = false; 5328 static bool initialized_gc = false;
5296 if (!initialized_gc) { 5329 if (!initialized_gc) {
5297 initialized_gc = true; 5330 initialized_gc = true;
5298 InitializeScavengingVisitorsTables(); 5331 InitializeScavengingVisitorsTables();
5299 NewSpaceScavenger::Initialize(); 5332 NewSpaceScavenger::Initialize();
5300 MarkCompactCollector::Initialize(); 5333 MarkCompactCollector::Initialize();
5301 } 5334 }
5302 gc_initializer_mutex->Unlock(); 5335 gc_initializer_mutex->Unlock();
5303 5336
5304 MarkMapPointersAsEncoded(false); 5337 MarkMapPointersAsEncoded(false);
5305 5338
5306 // Setup memory allocator and reserve a chunk of memory for new 5339 // Setup memory allocator.
5307 // space. The chunk is double the size of the requested reserved
5308 // new space size to ensure that we can find a pair of semispaces that
5309 // are contiguous and aligned to their size.
5310 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize())) 5340 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5311 return false; 5341 return false;
5312 void* chunk =
5313 isolate_->memory_allocator()->ReserveInitialChunk(
5314 4 * reserved_semispace_size_);
5315 if (chunk == NULL) return false;
5316 5342
5317 // Align the pair of semispaces to their size, which must be a power 5343 // Setup new space.
5318 // of 2. 5344 if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
5319 Address new_space_start =
5320 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
5321 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
5322 return false; 5345 return false;
5323 } 5346 }
5324 5347
5325 // Initialize old pointer space. 5348 // Initialize old pointer space.
5326 old_pointer_space_ = 5349 old_pointer_space_ =
5327 new OldSpace(this, 5350 new OldSpace(this,
5328 max_old_generation_size_, 5351 max_old_generation_size_,
5329 OLD_POINTER_SPACE, 5352 OLD_POINTER_SPACE,
5330 NOT_EXECUTABLE); 5353 NOT_EXECUTABLE);
5331 if (old_pointer_space_ == NULL) return false; 5354 if (old_pointer_space_ == NULL) return false;
5332 if (!old_pointer_space_->Setup(NULL, 0)) return false; 5355 if (!old_pointer_space_->Setup()) return false;
5333 5356
5334 // Initialize old data space. 5357 // Initialize old data space.
5335 old_data_space_ = 5358 old_data_space_ =
5336 new OldSpace(this, 5359 new OldSpace(this,
5337 max_old_generation_size_, 5360 max_old_generation_size_,
5338 OLD_DATA_SPACE, 5361 OLD_DATA_SPACE,
5339 NOT_EXECUTABLE); 5362 NOT_EXECUTABLE);
5340 if (old_data_space_ == NULL) return false; 5363 if (old_data_space_ == NULL) return false;
5341 if (!old_data_space_->Setup(NULL, 0)) return false; 5364 if (!old_data_space_->Setup()) return false;
5342 5365
5343 // Initialize the code space, set its maximum capacity to the old 5366 // Initialize the code space, set its maximum capacity to the old
5344 // generation size. It needs executable memory. 5367 // generation size. It needs executable memory.
5345 // On 64-bit platform(s), we put all code objects in a 2 GB range of 5368 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5346 // virtual address space, so that they can call each other with near calls. 5369 // virtual address space, so that they can call each other with near calls.
5347 if (code_range_size_ > 0) { 5370 if (code_range_size_ > 0) {
5348 if (!isolate_->code_range()->Setup(code_range_size_)) { 5371 if (!isolate_->code_range()->Setup(code_range_size_)) {
5349 return false; 5372 return false;
5350 } 5373 }
5351 } 5374 }
5352 5375
5353 code_space_ = 5376 code_space_ =
5354 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); 5377 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5355 if (code_space_ == NULL) return false; 5378 if (code_space_ == NULL) return false;
5356 if (!code_space_->Setup(NULL, 0)) return false; 5379 if (!code_space_->Setup()) return false;
5357 5380
5358 // Initialize map space. 5381 // Initialize map space.
5359 map_space_ = new MapSpace(this, FLAG_use_big_map_space 5382 map_space_ = new MapSpace(this,
5360 ? max_old_generation_size_ 5383 max_old_generation_size_,
5361 : MapSpace::kMaxMapPageIndex * Page::kPageSize, 5384 FLAG_max_map_space_pages,
5362 FLAG_max_map_space_pages, 5385 MAP_SPACE);
5363 MAP_SPACE);
5364 if (map_space_ == NULL) return false; 5386 if (map_space_ == NULL) return false;
5365 if (!map_space_->Setup(NULL, 0)) return false; 5387 if (!map_space_->Setup()) return false;
5366 5388
5367 // Initialize global property cell space. 5389 // Initialize global property cell space.
5368 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); 5390 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5369 if (cell_space_ == NULL) return false; 5391 if (cell_space_ == NULL) return false;
5370 if (!cell_space_->Setup(NULL, 0)) return false; 5392 if (!cell_space_->Setup()) return false;
5371 5393
5372 // The large object code space may contain code or data. We set the memory 5394 // The large object code space may contain code or data. We set the memory
5373 // to be non-executable here for safety, but this means we need to enable it 5395 // to be non-executable here for safety, but this means we need to enable it
5374 // explicitly when allocating large code objects. 5396 // explicitly when allocating large code objects.
5375 lo_space_ = new LargeObjectSpace(this, LO_SPACE); 5397 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5376 if (lo_space_ == NULL) return false; 5398 if (lo_space_ == NULL) return false;
5377 if (!lo_space_->Setup()) return false; 5399 if (!lo_space_->Setup()) return false;
5378
5379 if (create_heap_objects) { 5400 if (create_heap_objects) {
5380 // Create initial maps. 5401 // Create initial maps.
5381 if (!CreateInitialMaps()) return false; 5402 if (!CreateInitialMaps()) return false;
5382 if (!CreateApiObjects()) return false; 5403 if (!CreateApiObjects()) return false;
5383 5404
5384 // Create initial objects 5405 // Create initial objects
5385 if (!CreateInitialObjects()) return false; 5406 if (!CreateInitialObjects()) return false;
5386 5407
5387 global_contexts_list_ = undefined_value(); 5408 global_contexts_list_ = undefined_value();
5388 } 5409 }
5389 5410
5390 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); 5411 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5391 LOG(isolate_, IntPtrTEvent("heap-available", Available())); 5412 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5392 5413
5414 store_buffer()->Setup();
5415
5393 return true; 5416 return true;
5394 } 5417 }
5395 5418
5396 5419
5397 void Heap::SetStackLimits() { 5420 void Heap::SetStackLimits() {
5398 ASSERT(isolate_ != NULL); 5421 ASSERT(isolate_ != NULL);
5399 ASSERT(isolate_ == isolate()); 5422 ASSERT(isolate_ == isolate());
5400 // On 64 bit machines, pointers are generally out of range of Smis. We write 5423 // On 64 bit machines, pointers are generally out of range of Smis. We write
5401 // something that looks like an out of range Smi to the GC. 5424 // something that looks like an out of range Smi to the GC.
5402 5425
5403 // Set up the special root array entries containing the stack limits. 5426 // Set up the special root array entries containing the stack limits.
5404 // These are actually addresses, but the tag makes the GC ignore it. 5427 // These are actually addresses, but the tag makes the GC ignore it.
5405 roots_[kStackLimitRootIndex] = 5428 roots_[kStackLimitRootIndex] =
5406 reinterpret_cast<Object*>( 5429 reinterpret_cast<Object*>(
5407 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); 5430 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5408 roots_[kRealStackLimitRootIndex] = 5431 roots_[kRealStackLimitRootIndex] =
5409 reinterpret_cast<Object*>( 5432 reinterpret_cast<Object*>(
5410 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag); 5433 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5411 } 5434 }
5412 5435
5413 5436
5414 void Heap::TearDown() { 5437 void Heap::TearDown() {
5415 if (FLAG_print_cumulative_gc_stat) { 5438 if (FLAG_print_cumulative_gc_stat) {
5416 PrintF("\n\n"); 5439 PrintF("\n\n");
5417 PrintF("gc_count=%d ", gc_count_); 5440 PrintF("gc_count=%d ", gc_count_);
5418 PrintF("mark_sweep_count=%d ", ms_count_); 5441 PrintF("mark_sweep_count=%d ", ms_count_);
5419 PrintF("mark_compact_count=%d ", mc_count_);
5420 PrintF("max_gc_pause=%d ", get_max_gc_pause()); 5442 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5421 PrintF("min_in_mutator=%d ", get_min_in_mutator()); 5443 PrintF("min_in_mutator=%d ", get_min_in_mutator());
5422 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", 5444 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5423 get_max_alive_after_gc()); 5445 get_max_alive_after_gc());
5424 PrintF("\n\n"); 5446 PrintF("\n\n");
5425 } 5447 }
5426 5448
5427 isolate_->global_handles()->TearDown(); 5449 isolate_->global_handles()->TearDown();
5428 5450
5429 external_string_table_.TearDown(); 5451 external_string_table_.TearDown();
(...skipping 29 matching lines...) Expand all
5459 delete cell_space_; 5481 delete cell_space_;
5460 cell_space_ = NULL; 5482 cell_space_ = NULL;
5461 } 5483 }
5462 5484
5463 if (lo_space_ != NULL) { 5485 if (lo_space_ != NULL) {
5464 lo_space_->TearDown(); 5486 lo_space_->TearDown();
5465 delete lo_space_; 5487 delete lo_space_;
5466 lo_space_ = NULL; 5488 lo_space_ = NULL;
5467 } 5489 }
5468 5490
5491 store_buffer()->TearDown();
5492 incremental_marking()->TearDown();
5493
5469 isolate_->memory_allocator()->TearDown(); 5494 isolate_->memory_allocator()->TearDown();
5470 5495
5471 #ifdef DEBUG 5496 #ifdef DEBUG
5472 delete debug_utils_; 5497 delete debug_utils_;
5473 debug_utils_ = NULL; 5498 debug_utils_ = NULL;
5474 #endif 5499 #endif
5475 } 5500 }
5476 5501
5477 5502
5478 void Heap::Shrink() { 5503 void Heap::Shrink() {
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
5675 } 5700 }
5676 5701
5677 5702
5678 class HeapObjectsFilter { 5703 class HeapObjectsFilter {
5679 public: 5704 public:
5680 virtual ~HeapObjectsFilter() {} 5705 virtual ~HeapObjectsFilter() {}
5681 virtual bool SkipObject(HeapObject* object) = 0; 5706 virtual bool SkipObject(HeapObject* object) = 0;
5682 }; 5707 };
5683 5708
5684 5709
5685 class FreeListNodesFilter : public HeapObjectsFilter {
5686 public:
5687 FreeListNodesFilter() {
5688 MarkFreeListNodes();
5689 }
5690
5691 bool SkipObject(HeapObject* object) {
5692 if (object->IsMarked()) {
5693 object->ClearMark();
5694 return true;
5695 } else {
5696 return false;
5697 }
5698 }
5699
5700 private:
5701 void MarkFreeListNodes() {
5702 Heap* heap = HEAP;
5703 heap->old_pointer_space()->MarkFreeListNodes();
5704 heap->old_data_space()->MarkFreeListNodes();
5705 MarkCodeSpaceFreeListNodes(heap);
5706 heap->map_space()->MarkFreeListNodes();
5707 heap->cell_space()->MarkFreeListNodes();
5708 }
5709
5710 void MarkCodeSpaceFreeListNodes(Heap* heap) {
5711 // For code space, using FreeListNode::IsFreeListNode is OK.
5712 HeapObjectIterator iter(heap->code_space());
5713 for (HeapObject* obj = iter.next_object();
5714 obj != NULL;
5715 obj = iter.next_object()) {
5716 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5717 }
5718 }
5719
5720 AssertNoAllocation no_alloc;
5721 };
5722
5723
5724 class UnreachableObjectsFilter : public HeapObjectsFilter { 5710 class UnreachableObjectsFilter : public HeapObjectsFilter {
5725 public: 5711 public:
5726 UnreachableObjectsFilter() { 5712 UnreachableObjectsFilter() {
5727 MarkUnreachableObjects(); 5713 MarkUnreachableObjects();
5728 } 5714 }
5729 5715
5730 bool SkipObject(HeapObject* object) { 5716 bool SkipObject(HeapObject* object) {
5731 if (object->IsMarked()) { 5717 if (IntrusiveMarking::IsMarked(object)) {
5732 object->ClearMark(); 5718 IntrusiveMarking::ClearMark(object);
5733 return true; 5719 return true;
5734 } else { 5720 } else {
5735 return false; 5721 return false;
5736 } 5722 }
5737 } 5723 }
5738 5724
5739 private: 5725 private:
5740 class UnmarkingVisitor : public ObjectVisitor { 5726 class UnmarkingVisitor : public ObjectVisitor {
5741 public: 5727 public:
5742 UnmarkingVisitor() : list_(10) {} 5728 UnmarkingVisitor() : list_(10) {}
5743 5729
5744 void VisitPointers(Object** start, Object** end) { 5730 void VisitPointers(Object** start, Object** end) {
5745 for (Object** p = start; p < end; p++) { 5731 for (Object** p = start; p < end; p++) {
5746 if (!(*p)->IsHeapObject()) continue; 5732 if (!(*p)->IsHeapObject()) continue;
5747 HeapObject* obj = HeapObject::cast(*p); 5733 HeapObject* obj = HeapObject::cast(*p);
5748 if (obj->IsMarked()) { 5734 if (IntrusiveMarking::IsMarked(obj)) {
5749 obj->ClearMark(); 5735 IntrusiveMarking::ClearMark(obj);
5750 list_.Add(obj); 5736 list_.Add(obj);
5751 } 5737 }
5752 } 5738 }
5753 } 5739 }
5754 5740
5755 bool can_process() { return !list_.is_empty(); } 5741 bool can_process() { return !list_.is_empty(); }
5756 5742
5757 void ProcessNext() { 5743 void ProcessNext() {
5758 HeapObject* obj = list_.RemoveLast(); 5744 HeapObject* obj = list_.RemoveLast();
5759 obj->Iterate(this); 5745 obj->Iterate(this);
5760 } 5746 }
5761 5747
5762 private: 5748 private:
5763 List<HeapObject*> list_; 5749 List<HeapObject*> list_;
5764 }; 5750 };
5765 5751
5766 void MarkUnreachableObjects() { 5752 void MarkUnreachableObjects() {
5767 HeapIterator iterator; 5753 HeapIterator iterator;
5768 for (HeapObject* obj = iterator.next(); 5754 for (HeapObject* obj = iterator.next();
5769 obj != NULL; 5755 obj != NULL;
5770 obj = iterator.next()) { 5756 obj = iterator.next()) {
5771 obj->SetMark(); 5757 IntrusiveMarking::SetMark(obj);
5772 } 5758 }
5773 UnmarkingVisitor visitor; 5759 UnmarkingVisitor visitor;
5774 HEAP->IterateRoots(&visitor, VISIT_ALL); 5760 HEAP->IterateRoots(&visitor, VISIT_ALL);
5775 while (visitor.can_process()) 5761 while (visitor.can_process())
5776 visitor.ProcessNext(); 5762 visitor.ProcessNext();
5777 } 5763 }
5778 5764
5779 AssertNoAllocation no_alloc; 5765 AssertNoAllocation no_alloc;
5780 }; 5766 };
5781 5767
(...skipping 13 matching lines...) Expand all
5795 5781
5796 5782
5797 HeapIterator::~HeapIterator() { 5783 HeapIterator::~HeapIterator() {
5798 Shutdown(); 5784 Shutdown();
5799 } 5785 }
5800 5786
5801 5787
5802 void HeapIterator::Init() { 5788 void HeapIterator::Init() {
5803 // Start the iteration. 5789 // Start the iteration.
5804 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator : 5790 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5805 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject); 5791 new SpaceIterator(Isolate::Current()->heap()->
5792 GcSafeSizeOfOldObjectFunction());
5806 switch (filtering_) { 5793 switch (filtering_) {
5807 case kFilterFreeListNodes: 5794 case kFilterFreeListNodes:
5808 filter_ = new FreeListNodesFilter; 5795 // TODO(gc): Not handled.
5809 break; 5796 break;
5810 case kFilterUnreachable: 5797 case kFilterUnreachable:
5811 filter_ = new UnreachableObjectsFilter; 5798 filter_ = new UnreachableObjectsFilter;
5812 break; 5799 break;
5813 default: 5800 default:
5814 break; 5801 break;
5815 } 5802 }
5816 object_iterator_ = space_iterator_->next(); 5803 object_iterator_ = space_iterator_->next();
5817 } 5804 }
5818 5805
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
5935 MarkVisitor mark_visitor(this); 5922 MarkVisitor mark_visitor(this);
5936 MarkRecursively(root, &mark_visitor); 5923 MarkRecursively(root, &mark_visitor);
5937 5924
5938 UnmarkVisitor unmark_visitor(this); 5925 UnmarkVisitor unmark_visitor(this);
5939 UnmarkRecursively(root, &unmark_visitor); 5926 UnmarkRecursively(root, &unmark_visitor);
5940 5927
5941 ProcessResults(); 5928 ProcessResults();
5942 } 5929 }
5943 5930
5944 5931
5932 static bool SafeIsGlobalContext(HeapObject* obj) {
5933 return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
5934 }
5935
5936
5945 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { 5937 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5946 if (!(*p)->IsHeapObject()) return; 5938 if (!(*p)->IsHeapObject()) return;
5947 5939
5948 HeapObject* obj = HeapObject::cast(*p); 5940 HeapObject* obj = HeapObject::cast(*p);
5949 5941
5950 Object* map = obj->map(); 5942 Object* map = obj->map();
5951 5943
5952 if (!map->IsHeapObject()) return; // visited before 5944 if (!map->IsHeapObject()) return; // visited before
5953 5945
5954 if (found_target_in_trace_) return; // stop if target found 5946 if (found_target_in_trace_) return; // stop if target found
5955 object_stack_.Add(obj); 5947 object_stack_.Add(obj);
5956 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || 5948 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5957 (obj == search_target_)) { 5949 (obj == search_target_)) {
5958 found_target_in_trace_ = true; 5950 found_target_in_trace_ = true;
5959 found_target_ = true; 5951 found_target_ = true;
5960 return; 5952 return;
5961 } 5953 }
5962 5954
5963 bool is_global_context = obj->IsGlobalContext(); 5955 bool is_global_context = SafeIsGlobalContext(obj);
5964 5956
5965 // not visited yet 5957 // not visited yet
5966 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); 5958 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5967 5959
5968 Address map_addr = map_p->address(); 5960 Address map_addr = map_p->address();
5969 5961
5970 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); 5962 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5971 5963
5972 // Scan the object body. 5964 // Scan the object body.
5973 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) { 5965 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
6061 } 6053 }
6062 #endif 6054 #endif
6063 6055
6064 6056
6065 static intptr_t CountTotalHolesSize() { 6057 static intptr_t CountTotalHolesSize() {
6066 intptr_t holes_size = 0; 6058 intptr_t holes_size = 0;
6067 OldSpaces spaces; 6059 OldSpaces spaces;
6068 for (OldSpace* space = spaces.next(); 6060 for (OldSpace* space = spaces.next();
6069 space != NULL; 6061 space != NULL;
6070 space = spaces.next()) { 6062 space = spaces.next()) {
6071 holes_size += space->Waste() + space->AvailableFree(); 6063 holes_size += space->Waste() + space->Available();
6072 } 6064 }
6073 return holes_size; 6065 return holes_size;
6074 } 6066 }
6075 6067
6076 6068
6077 GCTracer::GCTracer(Heap* heap) 6069 GCTracer::GCTracer(Heap* heap)
6078 : start_time_(0.0), 6070 : start_time_(0.0),
6079 start_size_(0), 6071 start_size_(0),
6080 gc_count_(0), 6072 gc_count_(0),
6081 full_gc_count_(0), 6073 full_gc_count_(0),
6082 is_compacting_(false),
6083 marked_count_(0),
6084 allocated_since_last_gc_(0), 6074 allocated_since_last_gc_(0),
6085 spent_in_mutator_(0), 6075 spent_in_mutator_(0),
6086 promoted_objects_size_(0), 6076 promoted_objects_size_(0),
6087 heap_(heap) { 6077 heap_(heap) {
6088 // These two fields reflect the state of the previous full collection.
6089 // Set them before they are changed by the collector.
6090 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
6091 previous_marked_count_ =
6092 heap_->mark_compact_collector_.previous_marked_count();
6093 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; 6078 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6094 start_time_ = OS::TimeCurrentMillis(); 6079 start_time_ = OS::TimeCurrentMillis();
6095 start_size_ = heap_->SizeOfObjects(); 6080 start_size_ = heap_->SizeOfObjects();
6096 6081
6097 for (int i = 0; i < Scope::kNumberOfScopes; i++) { 6082 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6098 scopes_[i] = 0; 6083 scopes_[i] = 0;
6099 } 6084 }
6100 6085
6101 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(); 6086 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6102 6087
6103 allocated_since_last_gc_ = 6088 allocated_since_last_gc_ =
6104 heap_->SizeOfObjects() - heap_->alive_after_last_gc_; 6089 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6105 6090
6106 if (heap_->last_gc_end_timestamp_ > 0) { 6091 if (heap_->last_gc_end_timestamp_ > 0) {
6107 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0); 6092 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6108 } 6093 }
6094
6095 steps_count_ = heap_->incremental_marking()->steps_count();
6096 steps_took_ = heap_->incremental_marking()->steps_took();
6097 longest_step_ = heap_->incremental_marking()->longest_step();
6098 steps_count_since_last_gc_ =
6099 heap_->incremental_marking()->steps_count_since_last_gc();
6100 steps_took_since_last_gc_ =
6101 heap_->incremental_marking()->steps_took_since_last_gc();
6109 } 6102 }
6110 6103
6111 6104
6112 GCTracer::~GCTracer() { 6105 GCTracer::~GCTracer() {
6113 // Printf ONE line iff flag is set. 6106 // Printf ONE line iff flag is set.
6114 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; 6107 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6115 6108
6116 bool first_gc = (heap_->last_gc_end_timestamp_ == 0); 6109 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6117 6110
6118 heap_->alive_after_last_gc_ = heap_->SizeOfObjects(); 6111 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
(...skipping 14 matching lines...) Expand all
6133 6126
6134 if (!FLAG_trace_gc_nvp) { 6127 if (!FLAG_trace_gc_nvp) {
6135 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]); 6128 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6136 6129
6137 PrintF("%s %.1f -> %.1f MB, ", 6130 PrintF("%s %.1f -> %.1f MB, ",
6138 CollectorString(), 6131 CollectorString(),
6139 static_cast<double>(start_size_) / MB, 6132 static_cast<double>(start_size_) / MB,
6140 SizeOfHeapObjects()); 6133 SizeOfHeapObjects());
6141 6134
6142 if (external_time > 0) PrintF("%d / ", external_time); 6135 if (external_time > 0) PrintF("%d / ", external_time);
6143 PrintF("%d ms.\n", time); 6136 PrintF("%d ms", time);
6137 if (steps_count_ > 0) {
6138 if (collector_ == SCAVENGER) {
6139 PrintF(" (+ %d ms in %d steps since last GC)",
6140 static_cast<int>(steps_took_since_last_gc_),
6141 steps_count_since_last_gc_);
6142 } else {
6143 PrintF(" (+ %d ms in %d steps since start of marking, "
6144 "biggest step %f ms)",
6145 static_cast<int>(steps_took_),
6146 steps_count_,
6147 longest_step_);
6148 }
6149 }
6150 PrintF(".\n");
6144 } else { 6151 } else {
6145 PrintF("pause=%d ", time); 6152 PrintF("pause=%d ", time);
6146 PrintF("mutator=%d ", 6153 PrintF("mutator=%d ",
6147 static_cast<int>(spent_in_mutator_)); 6154 static_cast<int>(spent_in_mutator_));
6148 6155
6149 PrintF("gc="); 6156 PrintF("gc=");
6150 switch (collector_) { 6157 switch (collector_) {
6151 case SCAVENGER: 6158 case SCAVENGER:
6152 PrintF("s"); 6159 PrintF("s");
6153 break; 6160 break;
6154 case MARK_COMPACTOR: 6161 case MARK_COMPACTOR:
6155 PrintF("%s", 6162 PrintF("ms");
6156 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
6157 break; 6163 break;
6158 default: 6164 default:
6159 UNREACHABLE(); 6165 UNREACHABLE();
6160 } 6166 }
6161 PrintF(" "); 6167 PrintF(" ");
6162 6168
6163 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL])); 6169 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6164 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK])); 6170 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6165 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP])); 6171 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6166 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE])); 6172 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6167 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT])); 6173 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
6168 6174
6169 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_); 6175 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
6170 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); 6176 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6171 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", 6177 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6172 in_free_list_or_wasted_before_gc_); 6178 in_free_list_or_wasted_before_gc_);
6173 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize()); 6179 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6174 6180
6175 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); 6181 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6176 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); 6182 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6177 6183
6184 if (collector_ == SCAVENGER) {
6185 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6186 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6187 } else {
6188 PrintF("stepscount=%d ", steps_count_);
6189 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6190 }
6191
6178 PrintF("\n"); 6192 PrintF("\n");
6179 } 6193 }
6180 6194
6181 heap_->PrintShortHeapStatistics(); 6195 heap_->PrintShortHeapStatistics();
6182 } 6196 }
6183 6197
6184 6198
6185 const char* GCTracer::CollectorString() { 6199 const char* GCTracer::CollectorString() {
6186 switch (collector_) { 6200 switch (collector_) {
6187 case SCAVENGER: 6201 case SCAVENGER:
6188 return "Scavenge"; 6202 return "Scavenge";
6189 case MARK_COMPACTOR: 6203 case MARK_COMPACTOR:
6190 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact" 6204 return "Mark-sweep";
6191 : "Mark-sweep";
6192 } 6205 }
6193 return "Unknown GC"; 6206 return "Unknown GC";
6194 } 6207 }
6195 6208
6196 6209
6197 int KeyedLookupCache::Hash(Map* map, String* name) { 6210 int KeyedLookupCache::Hash(Map* map, String* name) {
6198 // Uses only lower 32 bits if pointers are larger. 6211 // Uses only lower 32 bits if pointers are larger.
6199 uintptr_t addr_hash = 6212 uintptr_t addr_hash =
6200 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; 6213 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6201 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); 6214 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
6288 Verify(); 6301 Verify();
6289 } 6302 }
6290 6303
6291 6304
6292 void ExternalStringTable::TearDown() { 6305 void ExternalStringTable::TearDown() {
6293 new_space_strings_.Free(); 6306 new_space_strings_.Free();
6294 old_space_strings_.Free(); 6307 old_space_strings_.Free();
6295 } 6308 }
6296 6309
6297 6310
6311 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6312 chunk->set_next_chunk(chunks_queued_for_free_);
6313 chunks_queued_for_free_ = chunk;
6314 }
6315
6316
6317 void Heap::FreeQueuedChunks() {
6318 if (chunks_queued_for_free_ == NULL) return;
6319 MemoryChunk* next;
6320 MemoryChunk* chunk;
6321 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6322 next = chunk->next_chunk();
6323 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6324
6325 if (chunk->owner()->identity() == LO_SPACE) {
6326 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6327 // If FromAnyPointerAddress encounters a slot that belongs to a large
6328 // chunk queued for deletion it will fail to find the chunk because
6329 // it try to perform a search in the list of pages owned by of the large
6330 // object space and queued chunks were detached from that list.
6331 // To work around this we split large chunk into normal kPageSize aligned
6332 // pieces and initialize owner field and flags of every piece.
6333 // If FromAnyPointerAddress encounteres a slot that belongs to one of
6334 // these smaller pieces it will treat it as a slot on a normal Page.
6335 MemoryChunk* inner = MemoryChunk::FromAddress(
6336 chunk->address() + Page::kPageSize);
6337 MemoryChunk* inner_last = MemoryChunk::FromAddress(
6338 chunk->address() + chunk->size() - 1);
6339 while (inner <= inner_last) {
6340 // Size of a large chunk is always a multiple of
6341 // OS::AllocationAlignment() so there is always
6342 // enough space for a fake MemoryChunk header.
6343 inner->set_owner(lo_space());
6344 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6345 inner = MemoryChunk::FromAddress(
6346 inner->address() + Page::kPageSize);
6347 }
6348 }
6349 }
6350 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6351 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6352 next = chunk->next_chunk();
6353 isolate_->memory_allocator()->Free(chunk);
6354 }
6355 chunks_queued_for_free_ = NULL;
6356 }
6357
6298 } } // namespace v8::internal 6358 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698