Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "compilation-cache.h" | 30 #include "compilation-cache.h" |
| 31 #include "deoptimizer.h" | |
| 31 #include "execution.h" | 32 #include "execution.h" |
| 32 #include "gdb-jit.h" | 33 #include "gdb-jit.h" |
| 33 #include "global-handles.h" | 34 #include "global-handles.h" |
| 34 #include "heap-profiler.h" | 35 #include "heap-profiler.h" |
| 35 #include "ic-inl.h" | 36 #include "ic-inl.h" |
| 36 #include "incremental-marking.h" | 37 #include "incremental-marking.h" |
| 37 #include "liveobjectlist-inl.h" | 38 #include "liveobjectlist-inl.h" |
| 38 #include "mark-compact.h" | 39 #include "mark-compact.h" |
| 39 #include "objects-visiting.h" | 40 #include "objects-visiting.h" |
| 40 #include "objects-visiting-inl.h" | 41 #include "objects-visiting-inl.h" |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 52 | 53 |
| 53 // ------------------------------------------------------------------------- | 54 // ------------------------------------------------------------------------- |
| 54 // MarkCompactCollector | 55 // MarkCompactCollector |
| 55 | 56 |
| 56 MarkCompactCollector::MarkCompactCollector() : // NOLINT | 57 MarkCompactCollector::MarkCompactCollector() : // NOLINT |
| 57 #ifdef DEBUG | 58 #ifdef DEBUG |
| 58 state_(IDLE), | 59 state_(IDLE), |
| 59 #endif | 60 #endif |
| 60 sweep_precisely_(false), | 61 sweep_precisely_(false), |
| 61 compacting_(false), | 62 compacting_(false), |
| 63 was_marked_incrementally_(false), | |
| 62 collect_maps_(FLAG_collect_maps), | 64 collect_maps_(FLAG_collect_maps), |
| 63 tracer_(NULL), | 65 tracer_(NULL), |
| 64 migration_slots_buffer_(NULL), | 66 migration_slots_buffer_(NULL), |
| 65 #ifdef DEBUG | 67 #ifdef DEBUG |
| 66 live_young_objects_size_(0), | 68 live_young_objects_size_(0), |
| 67 live_old_pointer_objects_size_(0), | 69 live_old_pointer_objects_size_(0), |
| 68 live_old_data_objects_size_(0), | 70 live_old_data_objects_size_(0), |
| 69 live_code_objects_size_(0), | 71 live_code_objects_size_(0), |
| 70 live_map_objects_size_(0), | 72 live_map_objects_size_(0), |
| 71 live_cell_objects_size_(0), | 73 live_cell_objects_size_(0), |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 231 #endif | 233 #endif |
| 232 | 234 |
| 233 | 235 |
| 234 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { | 236 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
| 235 p->MarkEvacuationCandidate(); | 237 p->MarkEvacuationCandidate(); |
| 236 evacuation_candidates_.Add(p); | 238 evacuation_candidates_.Add(p); |
| 237 } | 239 } |
| 238 | 240 |
| 239 | 241 |
| 240 bool MarkCompactCollector::StartCompaction() { | 242 bool MarkCompactCollector::StartCompaction() { |
| 241 // Don't start compaction if we are in the middle of incremental | 243 if (!compacting_) { |
| 242 // marking cycle. We did not collect any slots. | |
| 243 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) { | |
| 244 ASSERT(evacuation_candidates_.length() == 0); | 244 ASSERT(evacuation_candidates_.length() == 0); |
| 245 | 245 |
| 246 CollectEvacuationCandidates(heap()->old_pointer_space()); | 246 CollectEvacuationCandidates(heap()->old_pointer_space()); |
| 247 CollectEvacuationCandidates(heap()->old_data_space()); | 247 CollectEvacuationCandidates(heap()->old_data_space()); |
| 248 CollectEvacuationCandidates(heap()->code_space()); | 248 CollectEvacuationCandidates(heap()->code_space()); |
| 249 | 249 |
| 250 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); | 250 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); | 251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); | 252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 253 | 253 |
| 254 compacting_ = evacuation_candidates_.length() > 0; | 254 compacting_ = evacuation_candidates_.length() > 0; |
| 255 } | 255 } |
| 256 | 256 |
| 257 return compacting_; | 257 return compacting_; |
| 258 } | 258 } |
| 259 | 259 |
| 260 | 260 |
| 261 void MarkCompactCollector::AbortCompaction() { | |
| 262 if (compacting_) { | |
| 263 int npages = evacuation_candidates_.length(); | |
| 264 for (int i = 0; i < npages; i++) { | |
| 265 Page* p = evacuation_candidates_[i]; | |
| 266 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
| 267 p->ClearEvacuationCandidate(); | |
| 268 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
| 269 } | |
| 270 compacting_ = false; | |
| 271 evacuation_candidates_.Rewind(0); | |
| 272 } | |
| 273 ASSERT_EQ(0, evacuation_candidates_.length()); | |
| 274 } | |
| 275 | |
| 276 | |
| 277 void MarkCompactCollector::CollectGarbage() { | 261 void MarkCompactCollector::CollectGarbage() { |
| 278 // Make sure that Prepare() has been called. The individual steps below will | 262 // Make sure that Prepare() has been called. The individual steps below will |
| 279 // update the state as they proceed. | 263 // update the state as they proceed. |
| 280 ASSERT(state_ == PREPARE_GC); | 264 ASSERT(state_ == PREPARE_GC); |
| 281 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); | 265 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
| 282 | 266 |
| 283 MarkLiveObjects(); | 267 MarkLiveObjects(); |
| 284 ASSERT(heap_->incremental_marking()->IsStopped()); | 268 ASSERT(heap_->incremental_marking()->IsStopped()); |
| 285 | 269 |
| 286 if (collect_maps_) ClearNonLiveTransitions(); | 270 if (collect_maps_) ClearNonLiveTransitions(); |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 456 } | 440 } |
| 457 | 441 |
| 458 if (count > 0 && FLAG_trace_fragmentation) { | 442 if (count > 0 && FLAG_trace_fragmentation) { |
| 459 PrintF("Collected %d evacuation candidates for space %s\n", | 443 PrintF("Collected %d evacuation candidates for space %s\n", |
| 460 count, | 444 count, |
| 461 AllocationSpaceName(space->identity())); | 445 AllocationSpaceName(space->identity())); |
| 462 } | 446 } |
| 463 } | 447 } |
| 464 | 448 |
| 465 | 449 |
| 450 void MarkCompactCollector::AbortCompaction() { | |
| 451 if (compacting_) { | |
| 452 int npages = evacuation_candidates_.length(); | |
| 453 for (int i = 0; i < npages; i++) { | |
| 454 Page* p = evacuation_candidates_[i]; | |
| 455 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
| 456 p->ClearEvacuationCandidate(); | |
| 457 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
| 458 } | |
| 459 compacting_ = false; | |
| 460 evacuation_candidates_.Rewind(0); | |
| 461 invalidated_code_.Rewind(0); | |
| 462 } | |
| 463 ASSERT_EQ(0, evacuation_candidates_.length()); | |
| 464 } | |
| 465 | |
| 466 | |
| 466 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 467 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
| 467 FLAG_flush_code = false; | 468 FLAG_flush_code = false; |
| 468 | 469 |
| 470 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); | |
| 471 | |
| 469 // Disable collection of maps if incremental marking is enabled. | 472 // Disable collection of maps if incremental marking is enabled. |
| 470 // Map collection algorithm relies on a special map transition tree traversal | 473 // Map collection algorithm relies on a special map transition tree traversal |
| 471 // order which is not implemented for incremental marking. | 474 // order which is not implemented for incremental marking. |
| 472 collect_maps_ = FLAG_collect_maps && | 475 collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; |
| 473 !heap()->incremental_marking()->IsMarking(); | |
| 474 | 476 |
| 475 // Rather than passing the tracer around we stash it in a static member | 477 // Rather than passing the tracer around we stash it in a static member |
| 476 // variable. | 478 // variable. |
| 477 tracer_ = tracer; | 479 tracer_ = tracer; |
| 478 | 480 |
| 479 #ifdef DEBUG | 481 #ifdef DEBUG |
| 480 ASSERT(state_ == IDLE); | 482 ASSERT(state_ == IDLE); |
| 481 state_ = PREPARE_GC; | 483 state_ = PREPARE_GC; |
| 482 #endif | 484 #endif |
| 483 ASSERT(!FLAG_always_compact || !FLAG_never_compact); | 485 ASSERT(!FLAG_always_compact || !FLAG_never_compact); |
| 484 | 486 |
| 485 if (collect_maps_) CreateBackPointers(); | 487 if (collect_maps_) CreateBackPointers(); |
| 486 #ifdef ENABLE_GDB_JIT_INTERFACE | 488 #ifdef ENABLE_GDB_JIT_INTERFACE |
| 487 if (FLAG_gdbjit) { | 489 if (FLAG_gdbjit) { |
| 488 // If GDBJIT interface is active disable compaction. | 490 // If GDBJIT interface is active disable compaction. |
| 489 compacting_collection_ = false; | 491 compacting_collection_ = false; |
| 490 } | 492 } |
| 491 #endif | 493 #endif |
| 492 | 494 |
| 493 // Clear marking bits for precise sweeping to collect all garbage. | 495 // Clear marking bits for precise sweeping to collect all garbage. |
| 494 if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) { | 496 if (was_marked_incrementally_ && PreciseSweepingRequired()) { |
| 495 heap()->incremental_marking()->Abort(); | 497 heap()->incremental_marking()->Abort(); |
| 496 ClearMarkbits(heap_); | 498 ClearMarkbits(heap_); |
| 497 AbortCompaction(); | 499 AbortCompaction(); |
| 500 was_marked_incrementally_ = false; | |
| 498 } | 501 } |
| 499 | 502 |
| 500 if (!FLAG_never_compact) StartCompaction(); | 503 // Don't start compaction if we are in the middle of incremental |
| 504 // marking cycle. We did not collect any slots. | |
| 505 if (!FLAG_never_compact && !was_marked_incrementally_) { | |
| 506 StartCompaction(); | |
| 507 } | |
| 501 | 508 |
| 502 PagedSpaces spaces; | 509 PagedSpaces spaces; |
| 503 for (PagedSpace* space = spaces.next(); | 510 for (PagedSpace* space = spaces.next(); |
| 504 space != NULL; | 511 space != NULL; |
| 505 space = spaces.next()) { | 512 space = spaces.next()) { |
| 506 space->PrepareForMarkCompact(); | 513 space->PrepareForMarkCompact(); |
| 507 } | 514 } |
| 508 | 515 |
| 509 #ifdef DEBUG | 516 #ifdef DEBUG |
| 510 if (!heap()->incremental_marking()->IsMarking()) { | 517 if (!was_marked_incrementally_) { |
| 511 VerifyMarkbitsAreClean(); | 518 VerifyMarkbitsAreClean(); |
| 512 } | 519 } |
| 513 #endif | 520 #endif |
| 514 | 521 |
| 515 #ifdef DEBUG | 522 #ifdef DEBUG |
| 516 live_bytes_ = 0; | 523 live_bytes_ = 0; |
| 517 live_young_objects_size_ = 0; | 524 live_young_objects_size_ = 0; |
| 518 live_old_pointer_objects_size_ = 0; | 525 live_old_pointer_objects_size_ = 0; |
| 519 live_old_data_objects_size_ = 0; | 526 live_old_data_objects_size_ = 0; |
| 520 live_code_objects_size_ = 0; | 527 live_code_objects_size_ = 0; |
| (...skipping 1445 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1966 | 1973 |
| 1967 void MarkCompactCollector::MarkLiveObjects() { | 1974 void MarkCompactCollector::MarkLiveObjects() { |
| 1968 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); | 1975 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); |
| 1969 // The recursive GC marker detects when it is nearing stack overflow, | 1976 // The recursive GC marker detects when it is nearing stack overflow, |
| 1970 // and switches to a different marking system. JS interrupts interfere | 1977 // and switches to a different marking system. JS interrupts interfere |
| 1971 // with the C stack limit check. | 1978 // with the C stack limit check. |
| 1972 PostponeInterruptsScope postpone(heap()->isolate()); | 1979 PostponeInterruptsScope postpone(heap()->isolate()); |
| 1973 | 1980 |
| 1974 bool incremental_marking_overflowed = false; | 1981 bool incremental_marking_overflowed = false; |
| 1975 IncrementalMarking* incremental_marking = heap_->incremental_marking(); | 1982 IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
| 1976 if (incremental_marking->IsMarking()) { | 1983 if (was_marked_incrementally_) { |
| 1977 // Finalize the incremental marking and check whether we had an overflow. | 1984 // Finalize the incremental marking and check whether we had an overflow. |
| 1978 // Both markers use grey color to mark overflowed objects so | 1985 // Both markers use grey color to mark overflowed objects so |
| 1979 // non-incremental marker can deal with them as if overflow | 1986 // non-incremental marker can deal with them as if overflow |
| 1980 // occured during normal marking. | 1987 // occured during normal marking. |
| 1981 // But incremental marker uses a separate marking deque | 1988 // But incremental marker uses a separate marking deque |
| 1982 // so we have to explicitly copy it's overflow state. | 1989 // so we have to explicitly copy it's overflow state. |
| 1983 incremental_marking->Finalize(); | 1990 incremental_marking->Finalize(); |
| 1984 incremental_marking_overflowed = | 1991 incremental_marking_overflowed = |
| 1985 incremental_marking->marking_deque()->overflowed(); | 1992 incremental_marking->marking_deque()->overflowed(); |
| 1986 incremental_marking->marking_deque()->ClearOverflowed(); | 1993 incremental_marking->marking_deque()->ClearOverflowed(); |
| (...skipping 850 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2837 // Clear marking bits for current cell. | 2844 // Clear marking bits for current cell. |
| 2838 cells[cell_index] = 0; | 2845 cells[cell_index] = 0; |
| 2839 } | 2846 } |
| 2840 if (free_start != p->ObjectAreaEnd()) { | 2847 if (free_start != p->ObjectAreaEnd()) { |
| 2841 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); | 2848 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
| 2842 } | 2849 } |
| 2843 p->ResetLiveBytes(); | 2850 p->ResetLiveBytes(); |
| 2844 } | 2851 } |
| 2845 | 2852 |
| 2846 | 2853 |
| 2854 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | |
| 2855 Page* p = Page::FromAddress(code->address()); | |
| 2856 | |
| 2857 if (p->IsEvacuationCandidate() || | |
| 2858 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
| 2859 return false; | |
| 2860 } | |
| 2861 | |
| 2862 Address code_start = code->address(); | |
| 2863 Address code_end = code_start + code->Size(); | |
| 2864 | |
| 2865 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); | |
| 2866 uint32_t end_index = | |
| 2867 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); | |
| 2868 | |
| 2869 Bitmap* b = p->markbits(); | |
| 2870 | |
| 2871 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); | |
| 2872 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); | |
| 2873 | |
| 2874 MarkBit::CellType* start_cell = start_mark_bit.cell(); | |
| 2875 MarkBit::CellType* end_cell = end_mark_bit.cell(); | |
| 2876 | |
| 2877 if (value) { | |
| 2878 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); | |
| 2879 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; | |
| 2880 | |
| 2881 if (start_cell == end_cell) { | |
| 2882 *start_cell |= start_mask & end_mask; | |
| 2883 } else { | |
| 2884 *start_cell |= start_mask; | |
| 2885 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { | |
| 2886 *cell = ~0; | |
| 2887 } | |
| 2888 *end_cell |= end_mask; | |
| 2889 } | |
| 2890 } else { | |
| 2891 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { | |
| 2892 *cell = 0; | |
| 2893 } | |
| 2894 } | |
| 2895 | |
| 2896 return true; | |
| 2897 } | |
| 2898 | |
| 2899 | |
| 2900 static bool IsOnInvalidatedCodeObject(Address addr) { | |
| 2901 Page* p = Page::FromAddress(addr); | |
| 2902 | |
| 2903 // First check owner's identity because old pointer and old data spaces | |
| 2904 // are swept lazily and might still have non-zero mark-bits on some | |
| 2905 // pages. | |
| 2906 if (p->owner()->identity() != CODE_SPACE) return false; | |
|
Erik Corry
2011/09/22 11:43:16
This will return immediately for large code pages.
| |
| 2907 | |
| 2908 // In code space only bits on evacuation candidates (but we don't record | |
| 2909 // any slots on them) and under invalidated code objects are non-zero. | |
| 2910 MarkBit mark_bit = | |
| 2911 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); | |
| 2912 | |
| 2913 return mark_bit.Get(); | |
| 2914 } | |
| 2915 | |
| 2916 | |
| 2917 void MarkCompactCollector::InvalidateCode(Code* code) { | |
| 2918 if (heap_->incremental_marking()->IsCompacting() && | |
| 2919 !ShouldSkipEvacuationSlotRecording(code)) { | |
| 2920 ASSERT(compacting_); | |
| 2921 | |
| 2922 // If the object is white than no slots were recorded on it yet. | |
| 2923 MarkBit mark_bit = Marking::MarkBitFrom(code); | |
| 2924 if (Marking::IsWhite(mark_bit)) return; | |
| 2925 | |
| 2926 invalidated_code_.Add(code); | |
| 2927 } | |
| 2928 } | |
| 2929 | |
| 2930 | |
| 2931 bool MarkCompactCollector::MarkInvalidatedCode() { | |
| 2932 bool code_marked = false; | |
| 2933 | |
| 2934 int length = invalidated_code_.length(); | |
| 2935 for (int i = 0; i < length; i++) { | |
| 2936 Code* code = invalidated_code_[i]; | |
| 2937 | |
| 2938 if (SetMarkBitsUnderInvalidatedCode(code, true)) { | |
| 2939 code_marked = true; | |
| 2940 } | |
| 2941 } | |
| 2942 | |
| 2943 return code_marked; | |
| 2944 } | |
| 2945 | |
| 2946 | |
| 2947 void MarkCompactCollector::RemoveDeadInvalidatedCode() { | |
| 2948 int length = invalidated_code_.length(); | |
| 2949 for (int i = 0; i < length; i++) { | |
| 2950 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; | |
| 2951 } | |
| 2952 } | |
| 2953 | |
| 2954 | |
| 2955 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { | |
| 2956 int length = invalidated_code_.length(); | |
| 2957 for (int i = 0; i < length; i++) { | |
| 2958 Code* code = invalidated_code_[i]; | |
| 2959 if (code != NULL) { | |
| 2960 code->Iterate(visitor); | |
| 2961 SetMarkBitsUnderInvalidatedCode(code, false); | |
| 2962 } | |
| 2963 } | |
| 2964 invalidated_code_.Rewind(0); | |
| 2965 } | |
| 2966 | |
| 2967 | |
| 2847 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 2968 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 2969 bool code_slots_filtering_required = MarkInvalidatedCode(); | |
| 2970 | |
| 2848 EvacuateNewSpace(); | 2971 EvacuateNewSpace(); |
| 2849 EvacuatePages(); | 2972 EvacuatePages(); |
| 2850 | 2973 |
| 2851 // Second pass: find pointers to new space and update them. | 2974 // Second pass: find pointers to new space and update them. |
| 2852 PointersUpdatingVisitor updating_visitor(heap()); | 2975 PointersUpdatingVisitor updating_visitor(heap()); |
| 2853 | 2976 |
| 2854 // Update pointers in to space. | 2977 // Update pointers in to space. |
| 2855 SemiSpaceIterator to_it(heap()->new_space()->bottom(), | 2978 SemiSpaceIterator to_it(heap()->new_space()->bottom(), |
| 2856 heap()->new_space()->top()); | 2979 heap()->new_space()->top()); |
| 2857 for (HeapObject* object = to_it.Next(); | 2980 for (HeapObject* object = to_it.Next(); |
| 2858 object != NULL; | 2981 object != NULL; |
| 2859 object = to_it.Next()) { | 2982 object = to_it.Next()) { |
| 2860 Map* map = object->map(); | 2983 Map* map = object->map(); |
| 2861 object->IterateBody(map->instance_type(), | 2984 object->IterateBody(map->instance_type(), |
| 2862 object->SizeFromMap(map), | 2985 object->SizeFromMap(map), |
| 2863 &updating_visitor); | 2986 &updating_visitor); |
| 2864 } | 2987 } |
| 2865 | 2988 |
| 2866 // Update roots. | 2989 // Update roots. |
| 2867 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 2990 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 2868 LiveObjectList::IterateElements(&updating_visitor); | 2991 LiveObjectList::IterateElements(&updating_visitor); |
| 2869 | 2992 |
| 2870 { | 2993 { |
| 2871 StoreBufferRebuildScope scope(heap_, | 2994 StoreBufferRebuildScope scope(heap_, |
| 2872 heap_->store_buffer(), | 2995 heap_->store_buffer(), |
| 2873 &Heap::ScavengeStoreBufferCallback); | 2996 &Heap::ScavengeStoreBufferCallback); |
| 2874 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 2997 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
| 2875 } | 2998 } |
| 2876 | 2999 |
| 2877 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); | 3000 SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
| 3001 migration_slots_buffer_, | |
| 3002 code_slots_filtering_required); | |
| 2878 if (FLAG_trace_fragmentation) { | 3003 if (FLAG_trace_fragmentation) { |
| 2879 PrintF(" migration slots buffer: %d\n", | 3004 PrintF(" migration slots buffer: %d\n", |
| 2880 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); | 3005 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
| 2881 } | 3006 } |
| 2882 | 3007 |
| 3008 if (compacting_ && was_marked_incrementally_) { | |
| 3009 // It's difficult to filter out slots recorded for large objects. | |
| 3010 LargeObjectIterator it(heap_->lo_space()); | |
| 3011 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
| 3012 if (obj->IsFixedArray() || obj->IsCode()) { | |
| 3013 obj->Iterate(&updating_visitor); | |
| 3014 } | |
| 3015 } | |
| 3016 } | |
| 3017 | |
| 2883 int npages = evacuation_candidates_.length(); | 3018 int npages = evacuation_candidates_.length(); |
| 2884 for (int i = 0; i < npages; i++) { | 3019 for (int i = 0; i < npages; i++) { |
| 2885 Page* p = evacuation_candidates_[i]; | 3020 Page* p = evacuation_candidates_[i]; |
| 2886 ASSERT(p->IsEvacuationCandidate() || | 3021 ASSERT(p->IsEvacuationCandidate() || |
| 2887 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3022 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2888 | 3023 |
| 2889 if (p->IsEvacuationCandidate()) { | 3024 if (p->IsEvacuationCandidate()) { |
| 2890 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); | 3025 SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
| 3026 p->slots_buffer(), | |
| 3027 code_slots_filtering_required); | |
| 2891 if (FLAG_trace_fragmentation) { | 3028 if (FLAG_trace_fragmentation) { |
| 2892 PrintF(" page %p slots buffer: %d\n", | 3029 PrintF(" page %p slots buffer: %d\n", |
| 2893 reinterpret_cast<void*>(p), | 3030 reinterpret_cast<void*>(p), |
| 2894 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3031 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 2895 } | 3032 } |
| 2896 | 3033 |
| 2897 // Important: skip list should be cleared only after roots were updated | 3034 // Important: skip list should be cleared only after roots were updated |
| 2898 // because root iteration traverses the stack and might have to find code | 3035 // because root iteration traverses the stack and might have to find code |
| 2899 // objects from non-updated pc pointing into evacuation candidate. | 3036 // objects from non-updated pc pointing into evacuation candidate. |
| 2900 SkipList* list = p->skip_list(); | 3037 SkipList* list = p->skip_list(); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2952 heap_->UpdateReferencesInExternalStringTable( | 3089 heap_->UpdateReferencesInExternalStringTable( |
| 2953 &UpdateReferenceInExternalStringTableEntry); | 3090 &UpdateReferenceInExternalStringTableEntry); |
| 2954 | 3091 |
| 2955 // Update JSFunction pointers from the runtime profiler. | 3092 // Update JSFunction pointers from the runtime profiler. |
| 2956 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( | 3093 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
| 2957 &updating_visitor); | 3094 &updating_visitor); |
| 2958 | 3095 |
| 2959 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3096 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 2960 heap()->ProcessWeakReferences(&evacuation_object_retainer); | 3097 heap()->ProcessWeakReferences(&evacuation_object_retainer); |
| 2961 | 3098 |
| 3099 // Visit invalidated code (we ignored all slots on it) and clear mark-bits | |
| 3100 // under it. | |
| 3101 ProcessInvalidatedCode(&updating_visitor); | |
| 3102 | |
| 2962 #ifdef DEBUG | 3103 #ifdef DEBUG |
| 2963 if (FLAG_verify_heap) { | 3104 if (FLAG_verify_heap) { |
| 2964 VerifyEvacuation(heap_); | 3105 VerifyEvacuation(heap_); |
| 2965 } | 3106 } |
| 2966 #endif | 3107 #endif |
| 2967 | 3108 |
| 2968 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); | 3109 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
| 2969 ASSERT(migration_slots_buffer_ == NULL); | 3110 ASSERT(migration_slots_buffer_ == NULL); |
| 2970 for (int i = 0; i < npages; i++) { | 3111 for (int i = 0; i < npages; i++) { |
| 2971 Page* p = evacuation_candidates_[i]; | 3112 Page* p = evacuation_candidates_[i]; |
| (...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3473 SweeperType how_to_sweep = | 3614 SweeperType how_to_sweep = |
| 3474 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; | 3615 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
| 3475 if (sweep_precisely_) how_to_sweep = PRECISE; | 3616 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 3476 // Noncompacting collections simply sweep the spaces to clear the mark | 3617 // Noncompacting collections simply sweep the spaces to clear the mark |
| 3477 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3618 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 3478 // the map space last because freeing non-live maps overwrites them and | 3619 // the map space last because freeing non-live maps overwrites them and |
| 3479 // the other spaces rely on possibly non-live maps to get the sizes for | 3620 // the other spaces rely on possibly non-live maps to get the sizes for |
| 3480 // non-live objects. | 3621 // non-live objects. |
| 3481 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3622 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 3482 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3623 SweepSpace(heap()->old_data_space(), how_to_sweep); |
| 3624 | |
| 3625 RemoveDeadInvalidatedCode(); | |
| 3483 SweepSpace(heap()->code_space(), PRECISE); | 3626 SweepSpace(heap()->code_space(), PRECISE); |
| 3627 | |
| 3484 SweepSpace(heap()->cell_space(), PRECISE); | 3628 SweepSpace(heap()->cell_space(), PRECISE); |
| 3629 | |
| 3485 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); | 3630 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
| 3486 EvacuateNewSpaceAndCandidates(); | 3631 EvacuateNewSpaceAndCandidates(); |
| 3487 } | 3632 } |
| 3633 | |
| 3488 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3634 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 3489 // detect whether unmarked map became dead in this collection or in one | 3635 // detect whether unmarked map became dead in this collection or in one |
| 3490 // of the previous ones. | 3636 // of the previous ones. |
| 3491 SweepSpace(heap()->map_space(), PRECISE); | 3637 SweepSpace(heap()->map_space(), PRECISE); |
| 3492 | 3638 |
| 3493 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); | 3639 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); |
| 3494 | 3640 |
| 3495 // Deallocate unmarked objects and clear marked bits for marked objects. | 3641 // Deallocate unmarked objects and clear marked bits for marked objects. |
| 3496 heap_->lo_space()->FreeUnmarkedObjects(); | 3642 heap_->lo_space()->FreeUnmarkedObjects(); |
| 3497 } | 3643 } |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3619 ++slot_idx; | 3765 ++slot_idx; |
| 3620 ASSERT(slot_idx < idx_); | 3766 ASSERT(slot_idx < idx_); |
| 3621 UpdateSlot(&v, | 3767 UpdateSlot(&v, |
| 3622 DecodeSlotType(slot), | 3768 DecodeSlotType(slot), |
| 3623 reinterpret_cast<Address>(slots_[slot_idx])); | 3769 reinterpret_cast<Address>(slots_[slot_idx])); |
| 3624 } | 3770 } |
| 3625 } | 3771 } |
| 3626 } | 3772 } |
| 3627 | 3773 |
| 3628 | 3774 |
| 3775 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { | |
| 3776 PointersUpdatingVisitor v(heap); | |
| 3777 | |
| 3778 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { | |
| 3779 ObjectSlot slot = slots_[slot_idx]; | |
| 3780 if (!IsTypedSlot(slot)) { | |
| 3781 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { | |
| 3782 UpdateSlot(slot); | |
| 3783 } | |
| 3784 } else { | |
| 3785 ++slot_idx; | |
| 3786 ASSERT(slot_idx < idx_); | |
| 3787 Address pc = reinterpret_cast<Address>(slots_[slot_idx]); | |
| 3788 if (!IsOnInvalidatedCodeObject(pc)) { | |
| 3789 UpdateSlot(&v, | |
| 3790 DecodeSlotType(slot), | |
| 3791 reinterpret_cast<Address>(slots_[slot_idx])); | |
| 3792 } | |
| 3793 } | |
| 3794 } | |
| 3795 } | |
| 3796 | |
| 3797 | |
| 3629 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { | 3798 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
| 3630 return new SlotsBuffer(next_buffer); | 3799 return new SlotsBuffer(next_buffer); |
| 3631 } | 3800 } |
| 3632 | 3801 |
| 3633 | 3802 |
| 3634 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { | 3803 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { |
| 3635 delete buffer; | 3804 delete buffer; |
| 3636 } | 3805 } |
| 3637 | 3806 |
| 3638 | 3807 |
| 3639 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { | 3808 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { |
| 3640 SlotsBuffer* buffer = *buffer_address; | 3809 SlotsBuffer* buffer = *buffer_address; |
| 3641 while (buffer != NULL) { | 3810 while (buffer != NULL) { |
| 3642 SlotsBuffer* next_buffer = buffer->next(); | 3811 SlotsBuffer* next_buffer = buffer->next(); |
| 3643 DeallocateBuffer(buffer); | 3812 DeallocateBuffer(buffer); |
| 3644 buffer = next_buffer; | 3813 buffer = next_buffer; |
| 3645 } | 3814 } |
| 3646 *buffer_address = NULL; | 3815 *buffer_address = NULL; |
| 3647 } | 3816 } |
| 3648 | 3817 |
| 3649 | 3818 |
| 3650 } } // namespace v8::internal | 3819 } } // namespace v8::internal |
| OLD | NEW |