| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 3742 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3753 if (!p->IsEvacuationCandidate()) continue; | 3753 if (!p->IsEvacuationCandidate()) continue; |
| 3754 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3754 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3755 space->Free(p->area_start(), p->area_size()); | 3755 space->Free(p->area_start(), p->area_size()); |
| 3756 p->set_scan_on_scavenge(false); | 3756 p->set_scan_on_scavenge(false); |
| 3757 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | 3757 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
| 3758 p->ResetLiveBytes(); | 3758 p->ResetLiveBytes(); |
| 3759 space->ReleasePage(p); | 3759 space->ReleasePage(p); |
| 3760 } | 3760 } |
| 3761 evacuation_candidates_.Rewind(0); | 3761 evacuation_candidates_.Rewind(0); |
| 3762 compacting_ = false; | 3762 compacting_ = false; |
| 3763 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); |
| 3763 heap()->FreeQueuedChunks(); | 3764 heap()->FreeQueuedChunks(); |
| 3764 } | 3765 } |
| 3765 | 3766 |
| 3766 | 3767 |
| 3767 static const int kStartTableEntriesPerLine = 5; | 3768 static const int kStartTableEntriesPerLine = 5; |
| 3768 static const int kStartTableLines = 171; | 3769 static const int kStartTableLines = 171; |
| 3769 static const int kStartTableInvalidLine = 127; | 3770 static const int kStartTableInvalidLine = 127; |
| 3770 static const int kStartTableUnusedEntry = 126; | 3771 static const int kStartTableUnusedEntry = 126; |
| 3771 | 3772 |
| 3772 #define _ kStartTableUnusedEntry | 3773 #define _ kStartTableUnusedEntry |
| (...skipping 532 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4305 break; | 4306 break; |
| 4306 } | 4307 } |
| 4307 default: { UNREACHABLE(); } | 4308 default: { UNREACHABLE(); } |
| 4308 } | 4309 } |
| 4309 } | 4310 } |
| 4310 | 4311 |
| 4311 if (FLAG_gc_verbose) { | 4312 if (FLAG_gc_verbose) { |
| 4312 PrintF("SweepSpace: %s (%d pages swept)\n", | 4313 PrintF("SweepSpace: %s (%d pages swept)\n", |
| 4313 AllocationSpaceName(space->identity()), pages_swept); | 4314 AllocationSpaceName(space->identity()), pages_swept); |
| 4314 } | 4315 } |
| 4315 | |
| 4316 // Give pages that are queued to be freed back to the OS. | |
| 4317 heap()->FreeQueuedChunks(); | |
| 4318 } | 4316 } |
| 4319 | 4317 |
| 4320 | 4318 |
| 4321 void MarkCompactCollector::SweepSpaces() { | 4319 void MarkCompactCollector::SweepSpaces() { |
| 4322 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 4320 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 4323 double start_time = 0.0; | 4321 double start_time = 0.0; |
| 4324 if (FLAG_print_cumulative_gc_stat) { | 4322 if (FLAG_print_cumulative_gc_stat) { |
| 4325 start_time = base::OS::TimeCurrentMillis(); | 4323 start_time = base::OS::TimeCurrentMillis(); |
| 4326 } | 4324 } |
| 4327 | 4325 |
| 4328 #ifdef DEBUG | 4326 #ifdef DEBUG |
| 4329 state_ = SWEEP_SPACES; | 4327 state_ = SWEEP_SPACES; |
| 4330 #endif | 4328 #endif |
| 4331 | 4329 |
| 4332 MoveEvacuationCandidatesToEndOfPagesList(); | 4330 MoveEvacuationCandidatesToEndOfPagesList(); |
| 4333 | 4331 |
| 4334 // Noncompacting collections simply sweep the spaces to clear the mark | |
| 4335 // bits and free the nonlive blocks (for old and map spaces). We sweep | |
| 4336 // the map space last because freeing non-live maps overwrites them and | |
| 4337 // the other spaces rely on possibly non-live maps to get the sizes for | |
| 4338 // non-live objects. | |
| 4339 { | 4332 { |
| 4340 { | 4333 { |
| 4341 GCTracer::Scope sweep_scope(heap()->tracer(), | 4334 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4342 GCTracer::Scope::MC_SWEEP_OLDSPACE); | 4335 GCTracer::Scope::MC_SWEEP_OLDSPACE); |
| 4343 SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); | 4336 SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); |
| 4344 } | 4337 } |
| 4345 { | 4338 { |
| 4346 GCTracer::Scope sweep_scope(heap()->tracer(), | 4339 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4347 GCTracer::Scope::MC_SWEEP_CODE); | 4340 GCTracer::Scope::MC_SWEEP_CODE); |
| 4348 SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING); | 4341 SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING); |
| 4349 } | 4342 } |
| 4350 { | 4343 { |
| 4351 GCTracer::Scope sweep_scope(heap()->tracer(), | 4344 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4352 GCTracer::Scope::MC_SWEEP_MAP); | 4345 GCTracer::Scope::MC_SWEEP_MAP); |
| 4353 SweepSpace(heap()->map_space(), CONCURRENT_SWEEPING); | 4346 SweepSpace(heap()->map_space(), CONCURRENT_SWEEPING); |
| 4354 } | 4347 } |
| 4355 sweeping_in_progress_ = true; | 4348 sweeping_in_progress_ = true; |
| 4356 if (heap()->concurrent_sweeping_enabled()) { | 4349 if (heap()->concurrent_sweeping_enabled()) { |
| 4357 StartSweeperThreads(); | 4350 StartSweeperThreads(); |
| 4358 } | 4351 } |
| 4359 } | 4352 } |
| 4360 | 4353 |
| 4361 EvacuateNewSpaceAndCandidates(); | 4354 // Deallocate unmarked large objects. |
| 4355 heap_->lo_space()->FreeUnmarkedObjects(); |
| 4356 |
| 4357 // Give pages that are queued to be freed back to the OS. Invalid store |
| 4358 // buffer entries are already filter out. We can just release the memory. |
| 4359 heap()->FreeQueuedChunks(); |
| 4362 | 4360 |
| 4363 heap()->FreeDeadArrayBuffers(false); | 4361 heap()->FreeDeadArrayBuffers(false); |
| 4364 | 4362 |
| 4365 // Deallocate unmarked objects and clear marked bits for marked objects. | 4363 EvacuateNewSpaceAndCandidates(); |
| 4366 heap_->lo_space()->FreeUnmarkedObjects(); | 4364 |
| 4365 // Clear the marking state of live large objects. |
| 4366 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); |
| 4367 | 4367 |
| 4368 // Deallocate evacuated candidate pages. | 4368 // Deallocate evacuated candidate pages. |
| 4369 ReleaseEvacuationCandidates(); | 4369 ReleaseEvacuationCandidates(); |
| 4370 CodeRange* code_range = heap()->isolate()->code_range(); | 4370 CodeRange* code_range = heap()->isolate()->code_range(); |
| 4371 if (code_range != NULL && code_range->valid()) { | 4371 if (code_range != NULL && code_range->valid()) { |
| 4372 code_range->ReserveEmergencyBlock(); | 4372 code_range->ReserveEmergencyBlock(); |
| 4373 } | 4373 } |
| 4374 | 4374 |
| 4375 if (FLAG_print_cumulative_gc_stat) { | 4375 if (FLAG_print_cumulative_gc_stat) { |
| 4376 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - | 4376 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - |
| (...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4713 SlotsBuffer* buffer = *buffer_address; | 4713 SlotsBuffer* buffer = *buffer_address; |
| 4714 while (buffer != NULL) { | 4714 while (buffer != NULL) { |
| 4715 SlotsBuffer* next_buffer = buffer->next(); | 4715 SlotsBuffer* next_buffer = buffer->next(); |
| 4716 DeallocateBuffer(buffer); | 4716 DeallocateBuffer(buffer); |
| 4717 buffer = next_buffer; | 4717 buffer = next_buffer; |
| 4718 } | 4718 } |
| 4719 *buffer_address = NULL; | 4719 *buffer_address = NULL; |
| 4720 } | 4720 } |
| 4721 } // namespace internal | 4721 } // namespace internal |
| 4722 } // namespace v8 | 4722 } // namespace v8 |
| OLD | NEW |