Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 517 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 528 | 528 |
| 529 | 529 |
| 530 void MarkCompactCollector::StartSweeperThreads() { | 530 void MarkCompactCollector::StartSweeperThreads() { |
| 531 sweeping_pending_ = true; | 531 sweeping_pending_ = true; |
| 532 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 532 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 533 heap()->isolate()->sweeper_threads()[i]->StartSweeping(); | 533 heap()->isolate()->sweeper_threads()[i]->StartSweeping(); |
| 534 } | 534 } |
| 535 } | 535 } |
| 536 | 536 |
| 537 | 537 |
| 538 void MarkCompactCollector::WaitUntilSweepingCompleted() { | 538 bool MarkCompactCollector::WaitUntilSweepingCompleted() { |
|
Michael Starzinger
2013/02/21 12:22:09
Why do you return a boolean here? IMHO this makes
Hannes Payer (out of office)
2013/02/21 12:52:31
Done.
| |
| 539 if (sweeping_pending_) { | 539 if (sweeping_pending_) { |
| 540 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 540 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 541 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); | 541 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
| 542 } | 542 } |
| 543 sweeping_pending_ = false; | 543 sweeping_pending_ = false; |
| 544 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); | 544 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); |
| 545 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); | 545 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); |
| 546 heap()->FreeQueuedChunks(); | 546 return true; |
| 547 } | 547 } |
| 548 return false; | |
| 548 } | 549 } |
| 549 | 550 |
| 550 | 551 |
| 551 intptr_t MarkCompactCollector:: | 552 intptr_t MarkCompactCollector:: |
| 552 StealMemoryFromSweeperThreads(PagedSpace* space) { | 553 StealMemoryFromSweeperThreads(PagedSpace* space) { |
| 553 intptr_t freed_bytes = 0; | 554 intptr_t freed_bytes = 0; |
| 554 for (int i = 0; i < FLAG_sweeper_threads; i++) { | 555 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 555 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); | 556 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); |
| 556 } | 557 } |
| 557 return freed_bytes; | 558 return freed_bytes; |
| 558 } | 559 } |
| 559 | 560 |
| 560 | 561 |
| 561 bool MarkCompactCollector::AreSweeperThreadsActivated() { | 562 bool MarkCompactCollector::AreSweeperThreadsActivated() { |
| 562 return heap()->isolate()->sweeper_threads() != NULL; | 563 return heap()->isolate()->sweeper_threads() != NULL; |
| 563 } | 564 } |
| 564 | 565 |
| 565 | 566 |
| 566 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { | 567 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { |
| 567 return sweeping_pending_; | 568 return sweeping_pending_; |
| 568 } | 569 } |
| 569 | 570 |
| 570 | 571 |
| 572 void MarkCompactCollector::FinalizeSweeping() { | |
| 573 ASSERT(sweeping_pending_ == false); | |
| 574 ReleaseEvacuationCandidates(); | |
| 575 heap()->FreeQueuedChunks(); | |
| 576 } | |
| 577 | |
| 578 | |
| 571 void MarkCompactCollector::MarkInParallel() { | 579 void MarkCompactCollector::MarkInParallel() { |
| 572 for (int i = 0; i < FLAG_marking_threads; i++) { | 580 for (int i = 0; i < FLAG_marking_threads; i++) { |
| 573 heap()->isolate()->marking_threads()[i]->StartMarking(); | 581 heap()->isolate()->marking_threads()[i]->StartMarking(); |
| 574 } | 582 } |
| 575 } | 583 } |
| 576 | 584 |
| 577 | 585 |
| 578 void MarkCompactCollector::WaitUntilMarkingCompleted() { | 586 void MarkCompactCollector::WaitUntilMarkingCompleted() { |
| 579 for (int i = 0; i < FLAG_marking_threads; i++) { | 587 for (int i = 0; i < FLAG_marking_threads; i++) { |
| 580 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); | 588 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); |
| (...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 879 | 887 |
| 880 #ifdef DEBUG | 888 #ifdef DEBUG |
| 881 ASSERT(state_ == IDLE); | 889 ASSERT(state_ == IDLE); |
| 882 state_ = PREPARE_GC; | 890 state_ = PREPARE_GC; |
| 883 #endif | 891 #endif |
| 884 | 892 |
| 885 ASSERT(!FLAG_never_compact || !FLAG_always_compact); | 893 ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
| 886 | 894 |
| 887 if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) { | 895 if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) { |
| 888 // Instead of waiting we could also abort the sweeper threads here. | 896 // Instead of waiting we could also abort the sweeper threads here. |
| 889 WaitUntilSweepingCompleted(); | 897 if (WaitUntilSweepingCompleted()) { |
| 898 FinalizeSweeping(); | |
| 899 } | |
| 890 } | 900 } |
| 891 | 901 |
| 892 // Clear marking bits if incremental marking is aborted. | 902 // Clear marking bits if incremental marking is aborted. |
| 893 if (was_marked_incrementally_ && abort_incremental_marking_) { | 903 if (was_marked_incrementally_ && abort_incremental_marking_) { |
| 894 heap()->incremental_marking()->Abort(); | 904 heap()->incremental_marking()->Abort(); |
| 895 ClearMarkbits(); | 905 ClearMarkbits(); |
| 896 AbortCompaction(); | 906 AbortCompaction(); |
| 897 was_marked_incrementally_ = false; | 907 was_marked_incrementally_ = false; |
| 898 } | 908 } |
| 899 | 909 |
| (...skipping 2373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3273 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); | 3283 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); |
| 3274 | 3284 |
| 3275 #ifdef VERIFY_HEAP | 3285 #ifdef VERIFY_HEAP |
| 3276 if (FLAG_verify_heap) { | 3286 if (FLAG_verify_heap) { |
| 3277 VerifyEvacuation(heap_); | 3287 VerifyEvacuation(heap_); |
| 3278 } | 3288 } |
| 3279 #endif | 3289 #endif |
| 3280 | 3290 |
| 3281 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); | 3291 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
| 3282 ASSERT(migration_slots_buffer_ == NULL); | 3292 ASSERT(migration_slots_buffer_ == NULL); |
| 3293 } | |
| 3294 | |
| 3295 | |
| 3296 void MarkCompactCollector::ReleaseEvacuationCandidates() { | |
| 3297 int npages = evacuation_candidates_.length(); | |
| 3283 for (int i = 0; i < npages; i++) { | 3298 for (int i = 0; i < npages; i++) { |
| 3284 Page* p = evacuation_candidates_[i]; | 3299 Page* p = evacuation_candidates_[i]; |
| 3285 if (!p->IsEvacuationCandidate()) continue; | 3300 if (!p->IsEvacuationCandidate()) continue; |
| 3286 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3301 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3287 space->Free(p->area_start(), p->area_size()); | 3302 space->Free(p->area_start(), p->area_size()); |
| 3288 p->set_scan_on_scavenge(false); | 3303 p->set_scan_on_scavenge(false); |
| 3289 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | 3304 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
| 3290 p->ResetLiveBytes(); | 3305 p->ResetLiveBytes(); |
| 3291 space->ReleasePage(p); | 3306 space->ReleasePage(p); |
| 3292 } | 3307 } |
| (...skipping 558 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3851 // the map space last because freeing non-live maps overwrites them and | 3866 // the map space last because freeing non-live maps overwrites them and |
| 3852 // the other spaces rely on possibly non-live maps to get the sizes for | 3867 // the other spaces rely on possibly non-live maps to get the sizes for |
| 3853 // non-live objects. | 3868 // non-live objects. |
| 3854 | 3869 |
| 3855 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3870 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 3856 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3871 SweepSpace(heap()->old_data_space(), how_to_sweep); |
| 3857 | 3872 |
| 3858 if (how_to_sweep == PARALLEL_CONSERVATIVE) { | 3873 if (how_to_sweep == PARALLEL_CONSERVATIVE) { |
| 3859 // TODO(hpayer): fix race with concurrent sweeper | 3874 // TODO(hpayer): fix race with concurrent sweeper |
| 3860 StartSweeperThreads(); | 3875 StartSweeperThreads(); |
| 3861 if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) { | 3876 if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) { |
|
Michael Starzinger
2013/02/21 12:22:09
How about moving all of this magic into the how_to
Hannes Payer (out of office)
2013/02/21 12:52:31
Done.
| |
| 3862 WaitUntilSweepingCompleted(); | 3877 WaitUntilSweepingCompleted(); |
| 3863 } | 3878 } |
| 3864 } | 3879 } |
| 3865 | 3880 |
| 3866 RemoveDeadInvalidatedCode(); | 3881 RemoveDeadInvalidatedCode(); |
| 3867 SweepSpace(heap()->code_space(), PRECISE); | 3882 SweepSpace(heap()->code_space(), PRECISE); |
| 3868 | 3883 |
| 3869 SweepSpace(heap()->cell_space(), PRECISE); | 3884 SweepSpace(heap()->cell_space(), PRECISE); |
| 3870 | 3885 |
| 3871 EvacuateNewSpaceAndCandidates(); | 3886 EvacuateNewSpaceAndCandidates(); |
| 3872 | 3887 |
| 3873 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3888 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 3874 // detect whether unmarked map became dead in this collection or in one | 3889 // detect whether unmarked map became dead in this collection or in one |
| 3875 // of the previous ones. | 3890 // of the previous ones. |
| 3876 SweepSpace(heap()->map_space(), PRECISE); | 3891 SweepSpace(heap()->map_space(), PRECISE); |
| 3877 | 3892 |
| 3878 // Deallocate unmarked objects and clear marked bits for marked objects. | 3893 // Deallocate unmarked objects and clear marked bits for marked objects. |
| 3879 heap_->lo_space()->FreeUnmarkedObjects(); | 3894 heap_->lo_space()->FreeUnmarkedObjects(); |
| 3895 | |
| 3896 if (!FLAG_concurrent_sweeping) { | |
|
Michael Starzinger
2013/02/21 12:22:09
This is still not entirely correct, it _has_ to de
Hannes Payer (out of office)
2013/02/21 12:52:31
Done.
| |
| 3897 FinalizeSweeping(); | |
| 3898 } | |
| 3880 } | 3899 } |
| 3881 | 3900 |
| 3882 | 3901 |
| 3883 void MarkCompactCollector::EnableCodeFlushing(bool enable) { | 3902 void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
| 3884 #ifdef ENABLE_DEBUGGER_SUPPORT | 3903 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 3885 if (heap()->isolate()->debug()->IsLoaded() || | 3904 if (heap()->isolate()->debug()->IsLoaded() || |
| 3886 heap()->isolate()->debug()->has_break_points()) { | 3905 heap()->isolate()->debug()->has_break_points()) { |
| 3887 enable = false; | 3906 enable = false; |
| 3888 } | 3907 } |
| 3889 #endif | 3908 #endif |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4070 while (buffer != NULL) { | 4089 while (buffer != NULL) { |
| 4071 SlotsBuffer* next_buffer = buffer->next(); | 4090 SlotsBuffer* next_buffer = buffer->next(); |
| 4072 DeallocateBuffer(buffer); | 4091 DeallocateBuffer(buffer); |
| 4073 buffer = next_buffer; | 4092 buffer = next_buffer; |
| 4074 } | 4093 } |
| 4075 *buffer_address = NULL; | 4094 *buffer_address = NULL; |
| 4076 } | 4095 } |
| 4077 | 4096 |
| 4078 | 4097 |
| 4079 } } // namespace v8::internal | 4098 } } // namespace v8::internal |
| OLD | NEW |