Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(257)

Side by Side Diff: src/mark-compact.cc

Issue 12499004: Unlink evacuation candidates from list of pages before starting sweeper threads. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after
553 void MarkCompactCollector::WaitUntilSweepingCompleted() { 553 void MarkCompactCollector::WaitUntilSweepingCompleted() {
554 ASSERT(sweeping_pending_ == true); 554 ASSERT(sweeping_pending_ == true);
555 for (int i = 0; i < FLAG_sweeper_threads; i++) { 555 for (int i = 0; i < FLAG_sweeper_threads; i++) {
556 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); 556 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
557 } 557 }
558 sweeping_pending_ = false; 558 sweeping_pending_ = false;
559 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); 559 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
560 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); 560 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
561 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); 561 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
562 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); 562 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
563 heap()->FreeQueuedChunks();
Michael Starzinger 2013/03/08 12:52:06 There shouldn't be any queued chunks at this point
Hannes Payer (out of office) 2013/03/08 14:02:30 Done.
563 } 564 }
564 565
565 566
566 intptr_t MarkCompactCollector:: 567 intptr_t MarkCompactCollector::
567 StealMemoryFromSweeperThreads(PagedSpace* space) { 568 StealMemoryFromSweeperThreads(PagedSpace* space) {
568 intptr_t freed_bytes = 0; 569 intptr_t freed_bytes = 0;
569 for (int i = 0; i < FLAG_sweeper_threads; i++) { 570 for (int i = 0; i < FLAG_sweeper_threads; i++) {
570 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); 571 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
571 } 572 }
572 space->AddToAccountingStats(freed_bytes); 573 space->AddToAccountingStats(freed_bytes);
573 space->DecrementUnsweptFreeBytes(freed_bytes); 574 space->DecrementUnsweptFreeBytes(freed_bytes);
574 return freed_bytes; 575 return freed_bytes;
575 } 576 }
576 577
577 578
578 bool MarkCompactCollector::AreSweeperThreadsActivated() { 579 bool MarkCompactCollector::AreSweeperThreadsActivated() {
579 return heap()->isolate()->sweeper_threads() != NULL; 580 return heap()->isolate()->sweeper_threads() != NULL;
580 } 581 }
581 582
582 583
583 bool MarkCompactCollector::IsConcurrentSweepingInProgress() { 584 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
584 return sweeping_pending_; 585 return sweeping_pending_;
585 } 586 }
586 587
587 588
588 void MarkCompactCollector::FinalizeSweeping() {
589 ASSERT(sweeping_pending_ == false);
590 ReleaseEvacuationCandidates();
591 heap()->FreeQueuedChunks();
592 }
593
594
595 void MarkCompactCollector::MarkInParallel() { 589 void MarkCompactCollector::MarkInParallel() {
596 for (int i = 0; i < FLAG_marking_threads; i++) { 590 for (int i = 0; i < FLAG_marking_threads; i++) {
597 heap()->isolate()->marking_threads()[i]->StartMarking(); 591 heap()->isolate()->marking_threads()[i]->StartMarking();
598 } 592 }
599 } 593 }
600 594
601 595
602 void MarkCompactCollector::WaitUntilMarkingCompleted() { 596 void MarkCompactCollector::WaitUntilMarkingCompleted() {
603 for (int i = 0; i < FLAG_marking_threads; i++) { 597 for (int i = 0; i < FLAG_marking_threads; i++) {
604 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread(); 598 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
904 #ifdef DEBUG 898 #ifdef DEBUG
905 ASSERT(state_ == IDLE); 899 ASSERT(state_ == IDLE);
906 state_ = PREPARE_GC; 900 state_ = PREPARE_GC;
907 #endif 901 #endif
908 902
909 ASSERT(!FLAG_never_compact || !FLAG_always_compact); 903 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
910 904
911 if (IsConcurrentSweepingInProgress()) { 905 if (IsConcurrentSweepingInProgress()) {
912 // Instead of waiting we could also abort the sweeper threads here. 906 // Instead of waiting we could also abort the sweeper threads here.
913 WaitUntilSweepingCompleted(); 907 WaitUntilSweepingCompleted();
914 FinalizeSweeping();
915 } 908 }
916 909
917 // Clear marking bits if incremental marking is aborted. 910 // Clear marking bits if incremental marking is aborted.
918 if (was_marked_incrementally_ && abort_incremental_marking_) { 911 if (was_marked_incrementally_ && abort_incremental_marking_) {
919 heap()->incremental_marking()->Abort(); 912 heap()->incremental_marking()->Abort();
920 ClearMarkbits(); 913 ClearMarkbits();
921 AbortCompaction(); 914 AbortCompaction();
922 was_marked_incrementally_ = false; 915 was_marked_incrementally_ = false;
923 } 916 }
924 917
(...skipping 1917 matching lines...) Expand 10 before | Expand all | Expand 10 after
2842 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { 2835 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2843 EvacuateLiveObjectsFromPage(p); 2836 EvacuateLiveObjectsFromPage(p);
2844 } else { 2837 } else {
2845 // Without room for expansion evacuation is not guaranteed to succeed. 2838 // Without room for expansion evacuation is not guaranteed to succeed.
2846 // Pessimistically abandon unevacuated pages. 2839 // Pessimistically abandon unevacuated pages.
2847 for (int j = i; j < npages; j++) { 2840 for (int j = i; j < npages; j++) {
2848 Page* page = evacuation_candidates_[j]; 2841 Page* page = evacuation_candidates_[j];
2849 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); 2842 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2850 page->ClearEvacuationCandidate(); 2843 page->ClearEvacuationCandidate();
2851 page->SetFlag(Page::RESCAN_ON_EVACUATION); 2844 page->SetFlag(Page::RESCAN_ON_EVACUATION);
2845 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
2852 } 2846 }
2853 return; 2847 return;
2854 } 2848 }
2855 } 2849 }
2856 } 2850 }
2857 } 2851 }
2858 2852
2859 2853
2860 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 2854 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2861 public: 2855 public:
(...skipping 457 matching lines...) Expand 10 before | Expand all | Expand 10 after
3319 p->set_scan_on_scavenge(false); 3313 p->set_scan_on_scavenge(false);
3320 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); 3314 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3321 p->ResetLiveBytes(); 3315 p->ResetLiveBytes();
3322 space->ReleasePage(p); 3316 space->ReleasePage(p);
3323 } 3317 }
3324 evacuation_candidates_.Rewind(0); 3318 evacuation_candidates_.Rewind(0);
3325 compacting_ = false; 3319 compacting_ = false;
3326 } 3320 }
3327 3321
3328 3322
3323 void MarkCompactCollector::UnlinkEvacuationCandidates() {
Michael Starzinger 2013/03/08 12:52:06 Can we move that to above the ReleaseEvacuationCan
Hannes Payer (out of office) 2013/03/08 14:02:30 Done.
3324 int npages = evacuation_candidates_.length();
3325 for (int i = 0; i < npages; i++) {
3326 Page* p = evacuation_candidates_[i];
3327 if (!p->IsEvacuationCandidate()) continue;
3328 p->Unlink();
3329 p->ClearSweptPrecisely();
3330 p->ClearSweptConservatively();
3331 }
3332 }
3333
3334
3329 static const int kStartTableEntriesPerLine = 5; 3335 static const int kStartTableEntriesPerLine = 5;
3330 static const int kStartTableLines = 171; 3336 static const int kStartTableLines = 171;
3331 static const int kStartTableInvalidLine = 127; 3337 static const int kStartTableInvalidLine = 127;
3332 static const int kStartTableUnusedEntry = 126; 3338 static const int kStartTableUnusedEntry = 126;
3333 3339
3334 #define _ kStartTableUnusedEntry 3340 #define _ kStartTableUnusedEntry
3335 #define X kStartTableInvalidLine 3341 #define X kStartTableInvalidLine
3336 // Mark-bit to object start offset table. 3342 // Mark-bit to object start offset table.
3337 // 3343 //
3338 // The line is indexed by the mark bits in a byte. The first number on 3344 // The line is indexed by the mark bits in a byte. The first number on
(...skipping 553 matching lines...) Expand 10 before | Expand all | Expand 10 after
3892 if (sweep_precisely_) how_to_sweep = PRECISE; 3898 if (sweep_precisely_) how_to_sweep = PRECISE;
3893 // Noncompacting collections simply sweep the spaces to clear the mark 3899 // Noncompacting collections simply sweep the spaces to clear the mark
3894 // bits and free the nonlive blocks (for old and map spaces). We sweep 3900 // bits and free the nonlive blocks (for old and map spaces). We sweep
3895 // the map space last because freeing non-live maps overwrites them and 3901 // the map space last because freeing non-live maps overwrites them and
3896 // the other spaces rely on possibly non-live maps to get the sizes for 3902 // the other spaces rely on possibly non-live maps to get the sizes for
3897 // non-live objects. 3903 // non-live objects.
3898 SequentialSweepingScope scope(this); 3904 SequentialSweepingScope scope(this);
3899 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 3905 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3900 SweepSpace(heap()->old_data_space(), how_to_sweep); 3906 SweepSpace(heap()->old_data_space(), how_to_sweep);
3901 3907
3908 UnlinkEvacuationCandidates();
Michael Starzinger 2013/03/08 12:52:06 Add a short comment about why this needs to be don
Hannes Payer (out of office) 2013/03/08 14:02:30 Done.
3909
3902 if (how_to_sweep == PARALLEL_CONSERVATIVE || 3910 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
3903 how_to_sweep == CONCURRENT_CONSERVATIVE) { 3911 how_to_sweep == CONCURRENT_CONSERVATIVE) {
3904 // TODO(hpayer): fix race with concurrent sweeper 3912 // TODO(hpayer): fix race with concurrent sweeper
3905 StartSweeperThreads(); 3913 StartSweeperThreads();
3906 } 3914 }
3907 3915
3908 if (how_to_sweep == PARALLEL_CONSERVATIVE) { 3916 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
3909 WaitUntilSweepingCompleted(); 3917 WaitUntilSweepingCompleted();
3910 } 3918 }
3911 3919
3912 RemoveDeadInvalidatedCode(); 3920 RemoveDeadInvalidatedCode();
3913 SweepSpace(heap()->code_space(), PRECISE); 3921 SweepSpace(heap()->code_space(), PRECISE);
3914 3922
3915 SweepSpace(heap()->cell_space(), PRECISE); 3923 SweepSpace(heap()->cell_space(), PRECISE);
3916 3924
3917 EvacuateNewSpaceAndCandidates(); 3925 EvacuateNewSpaceAndCandidates();
3918 3926
3919 // ClearNonLiveTransitions depends on precise sweeping of map space to 3927 // ClearNonLiveTransitions depends on precise sweeping of map space to
3920 // detect whether unmarked map became dead in this collection or in one 3928 // detect whether unmarked map became dead in this collection or in one
3921 // of the previous ones. 3929 // of the previous ones.
3922 SweepSpace(heap()->map_space(), PRECISE); 3930 SweepSpace(heap()->map_space(), PRECISE);
3923 3931
3924 // Deallocate unmarked objects and clear marked bits for marked objects. 3932 // Deallocate unmarked objects and clear marked bits for marked objects.
3925 heap_->lo_space()->FreeUnmarkedObjects(); 3933 heap_->lo_space()->FreeUnmarkedObjects();
3926 3934
3927 if (how_to_sweep != CONCURRENT_CONSERVATIVE) { 3935 ReleaseEvacuationCandidates();
Michael Starzinger 2013/03/08 12:52:06 The right place to call FreeQueuedChunks would be
Hannes Payer (out of office) 2013/03/08 14:02:30 Done.
3928 FinalizeSweeping();
3929 }
3930 } 3936 }
3931 3937
3932 3938
3933 void MarkCompactCollector::EnableCodeFlushing(bool enable) { 3939 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3934 #ifdef ENABLE_DEBUGGER_SUPPORT 3940 #ifdef ENABLE_DEBUGGER_SUPPORT
3935 if (heap()->isolate()->debug()->IsLoaded() || 3941 if (heap()->isolate()->debug()->IsLoaded() ||
3936 heap()->isolate()->debug()->has_break_points()) { 3942 heap()->isolate()->debug()->has_break_points()) {
3937 enable = false; 3943 enable = false;
3938 } 3944 }
3939 #endif 3945 #endif
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
4120 while (buffer != NULL) { 4126 while (buffer != NULL) {
4121 SlotsBuffer* next_buffer = buffer->next(); 4127 SlotsBuffer* next_buffer = buffer->next();
4122 DeallocateBuffer(buffer); 4128 DeallocateBuffer(buffer);
4123 buffer = next_buffer; 4129 buffer = next_buffer;
4124 } 4130 }
4125 *buffer_address = NULL; 4131 *buffer_address = NULL;
4126 } 4132 }
4127 4133
4128 4134
4129 } } // namespace v8::internal 4135 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698