Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: src/mark-compact.cc

Issue 7983045: Notify collector about lazily deoptimized code objects. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: clear lo from rescan flag, avoid ShouldSweepLazily predicate in preparation Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | src/spaces.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 10 matching lines...) Expand all
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "compilation-cache.h" 30 #include "compilation-cache.h"
31 #include "deoptimizer.h"
31 #include "execution.h" 32 #include "execution.h"
32 #include "gdb-jit.h" 33 #include "gdb-jit.h"
33 #include "global-handles.h" 34 #include "global-handles.h"
34 #include "heap-profiler.h" 35 #include "heap-profiler.h"
35 #include "ic-inl.h" 36 #include "ic-inl.h"
36 #include "incremental-marking.h" 37 #include "incremental-marking.h"
37 #include "liveobjectlist-inl.h" 38 #include "liveobjectlist-inl.h"
38 #include "mark-compact.h" 39 #include "mark-compact.h"
39 #include "objects-visiting.h" 40 #include "objects-visiting.h"
40 #include "objects-visiting-inl.h" 41 #include "objects-visiting-inl.h"
(...skipping 11 matching lines...) Expand all
52 53
53 // ------------------------------------------------------------------------- 54 // -------------------------------------------------------------------------
54 // MarkCompactCollector 55 // MarkCompactCollector
55 56
56 MarkCompactCollector::MarkCompactCollector() : // NOLINT 57 MarkCompactCollector::MarkCompactCollector() : // NOLINT
57 #ifdef DEBUG 58 #ifdef DEBUG
58 state_(IDLE), 59 state_(IDLE),
59 #endif 60 #endif
60 sweep_precisely_(false), 61 sweep_precisely_(false),
61 compacting_(false), 62 compacting_(false),
63 was_marked_incrementally_(false),
62 collect_maps_(FLAG_collect_maps), 64 collect_maps_(FLAG_collect_maps),
63 tracer_(NULL), 65 tracer_(NULL),
64 migration_slots_buffer_(NULL), 66 migration_slots_buffer_(NULL),
65 #ifdef DEBUG 67 #ifdef DEBUG
66 live_young_objects_size_(0), 68 live_young_objects_size_(0),
67 live_old_pointer_objects_size_(0), 69 live_old_pointer_objects_size_(0),
68 live_old_data_objects_size_(0), 70 live_old_data_objects_size_(0),
69 live_code_objects_size_(0), 71 live_code_objects_size_(0),
70 live_map_objects_size_(0), 72 live_map_objects_size_(0),
71 live_cell_objects_size_(0), 73 live_cell_objects_size_(0),
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
231 #endif 233 #endif
232 234
233 235
234 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { 236 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
235 p->MarkEvacuationCandidate(); 237 p->MarkEvacuationCandidate();
236 evacuation_candidates_.Add(p); 238 evacuation_candidates_.Add(p);
237 } 239 }
238 240
239 241
240 bool MarkCompactCollector::StartCompaction() { 242 bool MarkCompactCollector::StartCompaction() {
241 // Don't start compaction if we are in the middle of incremental 243 if (!compacting_) {
242 // marking cycle. We did not collect any slots.
243 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
244 ASSERT(evacuation_candidates_.length() == 0); 244 ASSERT(evacuation_candidates_.length() == 0);
245 245
246 CollectEvacuationCandidates(heap()->old_pointer_space()); 246 CollectEvacuationCandidates(heap()->old_pointer_space());
247 CollectEvacuationCandidates(heap()->old_data_space()); 247 CollectEvacuationCandidates(heap()->old_data_space());
248 CollectEvacuationCandidates(heap()->code_space()); 248 CollectEvacuationCandidates(heap()->code_space());
249 249
250 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); 250 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); 251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); 252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
253 253
254 compacting_ = evacuation_candidates_.length() > 0; 254 compacting_ = evacuation_candidates_.length() > 0;
255 } 255 }
256 256
257 return compacting_; 257 return compacting_;
258 } 258 }
259 259
260 260
261 void MarkCompactCollector::AbortCompaction() {
262 if (compacting_) {
263 int npages = evacuation_candidates_.length();
264 for (int i = 0; i < npages; i++) {
265 Page* p = evacuation_candidates_[i];
266 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
267 p->ClearEvacuationCandidate();
268 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
269 }
270 compacting_ = false;
271 evacuation_candidates_.Rewind(0);
272 }
273 ASSERT_EQ(0, evacuation_candidates_.length());
274 }
275
276
277 void MarkCompactCollector::CollectGarbage() { 261 void MarkCompactCollector::CollectGarbage() {
278 // Make sure that Prepare() has been called. The individual steps below will 262 // Make sure that Prepare() has been called. The individual steps below will
279 // update the state as they proceed. 263 // update the state as they proceed.
280 ASSERT(state_ == PREPARE_GC); 264 ASSERT(state_ == PREPARE_GC);
281 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); 265 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
282 266
283 MarkLiveObjects(); 267 MarkLiveObjects();
284 ASSERT(heap_->incremental_marking()->IsStopped()); 268 ASSERT(heap_->incremental_marking()->IsStopped());
285 269
286 if (collect_maps_) ClearNonLiveTransitions(); 270 if (collect_maps_) ClearNonLiveTransitions();
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
456 } 440 }
457 441
458 if (count > 0 && FLAG_trace_fragmentation) { 442 if (count > 0 && FLAG_trace_fragmentation) {
459 PrintF("Collected %d evacuation candidates for space %s\n", 443 PrintF("Collected %d evacuation candidates for space %s\n",
460 count, 444 count,
461 AllocationSpaceName(space->identity())); 445 AllocationSpaceName(space->identity()));
462 } 446 }
463 } 447 }
464 448
465 449
450 void MarkCompactCollector::AbortCompaction() {
451 if (compacting_) {
452 int npages = evacuation_candidates_.length();
453 for (int i = 0; i < npages; i++) {
454 Page* p = evacuation_candidates_[i];
455 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
456 p->ClearEvacuationCandidate();
457 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
458 }
459 compacting_ = false;
460 evacuation_candidates_.Rewind(0);
461 invalidated_code_.Rewind(0);
462 }
463 ASSERT_EQ(0, evacuation_candidates_.length());
464 }
465
466
466 void MarkCompactCollector::Prepare(GCTracer* tracer) { 467 void MarkCompactCollector::Prepare(GCTracer* tracer) {
467 FLAG_flush_code = false; 468 FLAG_flush_code = false;
468 469
470 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
471
469 // Disable collection of maps if incremental marking is enabled. 472 // Disable collection of maps if incremental marking is enabled.
470 // Map collection algorithm relies on a special map transition tree traversal 473 // Map collection algorithm relies on a special map transition tree traversal
471 // order which is not implemented for incremental marking. 474 // order which is not implemented for incremental marking.
472 collect_maps_ = FLAG_collect_maps && 475 collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
473 !heap()->incremental_marking()->IsMarking();
474 476
475 // Rather than passing the tracer around we stash it in a static member 477 // Rather than passing the tracer around we stash it in a static member
476 // variable. 478 // variable.
477 tracer_ = tracer; 479 tracer_ = tracer;
478 480
479 #ifdef DEBUG 481 #ifdef DEBUG
480 ASSERT(state_ == IDLE); 482 ASSERT(state_ == IDLE);
481 state_ = PREPARE_GC; 483 state_ = PREPARE_GC;
482 #endif 484 #endif
483 ASSERT(!FLAG_always_compact || !FLAG_never_compact); 485 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
484 486
485 if (collect_maps_) CreateBackPointers(); 487 if (collect_maps_) CreateBackPointers();
486 #ifdef ENABLE_GDB_JIT_INTERFACE 488 #ifdef ENABLE_GDB_JIT_INTERFACE
487 if (FLAG_gdbjit) { 489 if (FLAG_gdbjit) {
488 // If GDBJIT interface is active disable compaction. 490 // If GDBJIT interface is active disable compaction.
489 compacting_collection_ = false; 491 compacting_collection_ = false;
490 } 492 }
491 #endif 493 #endif
492 494
493 // Clear marking bits for precise sweeping to collect all garbage. 495 // Clear marking bits for precise sweeping to collect all garbage.
494 if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) { 496 if (was_marked_incrementally_ && PreciseSweepingRequired()) {
495 heap()->incremental_marking()->Abort(); 497 heap()->incremental_marking()->Abort();
496 ClearMarkbits(heap_); 498 ClearMarkbits(heap_);
497 AbortCompaction(); 499 AbortCompaction();
500 was_marked_incrementally_ = false;
498 } 501 }
499 502
500 if (!FLAG_never_compact) StartCompaction(); 503 // Don't start compaction if we are in the middle of incremental
504 // marking cycle. We did not collect any slots.
505 if (!FLAG_never_compact && !was_marked_incrementally_) {
506 StartCompaction();
507 }
501 508
502 PagedSpaces spaces; 509 PagedSpaces spaces;
503 for (PagedSpace* space = spaces.next(); 510 for (PagedSpace* space = spaces.next();
504 space != NULL; 511 space != NULL;
505 space = spaces.next()) { 512 space = spaces.next()) {
506 space->PrepareForMarkCompact(); 513 space->PrepareForMarkCompact();
507 } 514 }
508 515
509 #ifdef DEBUG 516 #ifdef DEBUG
510 if (!heap()->incremental_marking()->IsMarking()) { 517 if (!was_marked_incrementally_) {
511 VerifyMarkbitsAreClean(); 518 VerifyMarkbitsAreClean();
512 } 519 }
513 #endif 520 #endif
514 521
515 #ifdef DEBUG 522 #ifdef DEBUG
516 live_bytes_ = 0; 523 live_bytes_ = 0;
517 live_young_objects_size_ = 0; 524 live_young_objects_size_ = 0;
518 live_old_pointer_objects_size_ = 0; 525 live_old_pointer_objects_size_ = 0;
519 live_old_data_objects_size_ = 0; 526 live_old_data_objects_size_ = 0;
520 live_code_objects_size_ = 0; 527 live_code_objects_size_ = 0;
(...skipping 1445 matching lines...) Expand 10 before | Expand all | Expand 10 after
1966 1973
1967 void MarkCompactCollector::MarkLiveObjects() { 1974 void MarkCompactCollector::MarkLiveObjects() {
1968 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); 1975 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
1969 // The recursive GC marker detects when it is nearing stack overflow, 1976 // The recursive GC marker detects when it is nearing stack overflow,
1970 // and switches to a different marking system. JS interrupts interfere 1977 // and switches to a different marking system. JS interrupts interfere
1971 // with the C stack limit check. 1978 // with the C stack limit check.
1972 PostponeInterruptsScope postpone(heap()->isolate()); 1979 PostponeInterruptsScope postpone(heap()->isolate());
1973 1980
1974 bool incremental_marking_overflowed = false; 1981 bool incremental_marking_overflowed = false;
1975 IncrementalMarking* incremental_marking = heap_->incremental_marking(); 1982 IncrementalMarking* incremental_marking = heap_->incremental_marking();
1976 if (incremental_marking->IsMarking()) { 1983 if (was_marked_incrementally_) {
1977 // Finalize the incremental marking and check whether we had an overflow. 1984 // Finalize the incremental marking and check whether we had an overflow.
1978 // Both markers use grey color to mark overflowed objects so 1985 // Both markers use grey color to mark overflowed objects so
1979 // non-incremental marker can deal with them as if overflow 1986 // non-incremental marker can deal with them as if overflow
1980 // occured during normal marking. 1987 // occured during normal marking.
1981 // But incremental marker uses a separate marking deque 1988 // But incremental marker uses a separate marking deque
1982 // so we have to explicitly copy it's overflow state. 1989 // so we have to explicitly copy it's overflow state.
1983 incremental_marking->Finalize(); 1990 incremental_marking->Finalize();
1984 incremental_marking_overflowed = 1991 incremental_marking_overflowed =
1985 incremental_marking->marking_deque()->overflowed(); 1992 incremental_marking->marking_deque()->overflowed();
1986 incremental_marking->marking_deque()->ClearOverflowed(); 1993 incremental_marking->marking_deque()->ClearOverflowed();
(...skipping 850 matching lines...) Expand 10 before | Expand all | Expand 10 after
2837 // Clear marking bits for current cell. 2844 // Clear marking bits for current cell.
2838 cells[cell_index] = 0; 2845 cells[cell_index] = 0;
2839 } 2846 }
2840 if (free_start != p->ObjectAreaEnd()) { 2847 if (free_start != p->ObjectAreaEnd()) {
2841 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); 2848 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
2842 } 2849 }
2843 p->ResetLiveBytes(); 2850 p->ResetLiveBytes();
2844 } 2851 }
2845 2852
2846 2853
2854 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
2855 Page* p = Page::FromAddress(code->address());
2856
2857 if (p->IsEvacuationCandidate() ||
2858 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2859 return false;
2860 }
2861
2862 Address code_start = code->address();
2863 Address code_end = code_start + code->Size();
2864
2865 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
2866 uint32_t end_index =
2867 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
2868
2869 Bitmap* b = p->markbits();
2870
2871 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
2872 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
2873
2874 MarkBit::CellType* start_cell = start_mark_bit.cell();
2875 MarkBit::CellType* end_cell = end_mark_bit.cell();
2876
2877 if (value) {
2878 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
2879 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
2880
2881 if (start_cell == end_cell) {
2882 *start_cell |= start_mask & end_mask;
2883 } else {
2884 *start_cell |= start_mask;
2885 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
2886 *cell = ~0;
2887 }
2888 *end_cell |= end_mask;
2889 }
2890 } else {
2891 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
2892 *cell = 0;
2893 }
2894 }
2895
2896 return true;
2897 }
2898
2899
2900 static bool IsOnInvalidatedCodeObject(Address addr) {
2901 // We did not record any slots in large objects thus
2902 // we can safely go to the page from the slot address.
2903 Page* p = Page::FromAddress(addr);
2904
2905 // First check owner's identity because old pointer and old data spaces
2906 // are swept lazily and might still have non-zero mark-bits on some
2907 // pages.
2908 if (p->owner()->identity() != CODE_SPACE) return false;
2909
2910 // In code space only bits on evacuation candidates (but we don't record
2911 // any slots on them) and under invalidated code objects are non-zero.
2912 MarkBit mark_bit =
2913 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
2914
2915 return mark_bit.Get();
2916 }
2917
2918
2919 void MarkCompactCollector::InvalidateCode(Code* code) {
2920 if (heap_->incremental_marking()->IsCompacting() &&
2921 !ShouldSkipEvacuationSlotRecording(code)) {
2922 ASSERT(compacting_);
2923
2924 // If the object is white than no slots were recorded on it yet.
2925 MarkBit mark_bit = Marking::MarkBitFrom(code);
2926 if (Marking::IsWhite(mark_bit)) return;
2927
2928 invalidated_code_.Add(code);
2929 }
2930 }
2931
2932
2933 bool MarkCompactCollector::MarkInvalidatedCode() {
2934 bool code_marked = false;
2935
2936 int length = invalidated_code_.length();
2937 for (int i = 0; i < length; i++) {
2938 Code* code = invalidated_code_[i];
2939
2940 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
2941 code_marked = true;
2942 }
2943 }
2944
2945 return code_marked;
2946 }
2947
2948
2949 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
2950 int length = invalidated_code_.length();
2951 for (int i = 0; i < length; i++) {
2952 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
2953 }
2954 }
2955
2956
2957 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
2958 int length = invalidated_code_.length();
2959 for (int i = 0; i < length; i++) {
2960 Code* code = invalidated_code_[i];
2961 if (code != NULL) {
2962 code->Iterate(visitor);
2963 SetMarkBitsUnderInvalidatedCode(code, false);
2964 }
2965 }
2966 invalidated_code_.Rewind(0);
2967 }
2968
2969
2847 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 2970 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2971 bool code_slots_filtering_required = MarkInvalidatedCode();
2972
2848 EvacuateNewSpace(); 2973 EvacuateNewSpace();
2849 EvacuatePages(); 2974 EvacuatePages();
2850 2975
2851 // Second pass: find pointers to new space and update them. 2976 // Second pass: find pointers to new space and update them.
2852 PointersUpdatingVisitor updating_visitor(heap()); 2977 PointersUpdatingVisitor updating_visitor(heap());
2853 2978
2854 // Update pointers in to space. 2979 // Update pointers in to space.
2855 SemiSpaceIterator to_it(heap()->new_space()->bottom(), 2980 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2856 heap()->new_space()->top()); 2981 heap()->new_space()->top());
2857 for (HeapObject* object = to_it.Next(); 2982 for (HeapObject* object = to_it.Next();
2858 object != NULL; 2983 object != NULL;
2859 object = to_it.Next()) { 2984 object = to_it.Next()) {
2860 Map* map = object->map(); 2985 Map* map = object->map();
2861 object->IterateBody(map->instance_type(), 2986 object->IterateBody(map->instance_type(),
2862 object->SizeFromMap(map), 2987 object->SizeFromMap(map),
2863 &updating_visitor); 2988 &updating_visitor);
2864 } 2989 }
2865 2990
2866 // Update roots. 2991 // Update roots.
2867 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 2992 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2868 LiveObjectList::IterateElements(&updating_visitor); 2993 LiveObjectList::IterateElements(&updating_visitor);
2869 2994
2870 { 2995 {
2871 StoreBufferRebuildScope scope(heap_, 2996 StoreBufferRebuildScope scope(heap_,
2872 heap_->store_buffer(), 2997 heap_->store_buffer(),
2873 &Heap::ScavengeStoreBufferCallback); 2998 &Heap::ScavengeStoreBufferCallback);
2874 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 2999 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2875 } 3000 }
2876 3001
2877 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); 3002 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3003 migration_slots_buffer_,
3004 code_slots_filtering_required);
2878 if (FLAG_trace_fragmentation) { 3005 if (FLAG_trace_fragmentation) {
2879 PrintF(" migration slots buffer: %d\n", 3006 PrintF(" migration slots buffer: %d\n",
2880 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); 3007 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
2881 } 3008 }
2882 3009
3010 if (compacting_ && was_marked_incrementally_) {
3011 // It's difficult to filter out slots recorded for large objects.
3012 LargeObjectIterator it(heap_->lo_space());
3013 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3014 Page* p = Page::FromAddress(obj->address());
3015 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3016 obj->Iterate(&updating_visitor);
3017 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3018 }
3019 }
3020 }
3021
2883 int npages = evacuation_candidates_.length(); 3022 int npages = evacuation_candidates_.length();
2884 for (int i = 0; i < npages; i++) { 3023 for (int i = 0; i < npages; i++) {
2885 Page* p = evacuation_candidates_[i]; 3024 Page* p = evacuation_candidates_[i];
2886 ASSERT(p->IsEvacuationCandidate() || 3025 ASSERT(p->IsEvacuationCandidate() ||
2887 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3026 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2888 3027
2889 if (p->IsEvacuationCandidate()) { 3028 if (p->IsEvacuationCandidate()) {
2890 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); 3029 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3030 p->slots_buffer(),
3031 code_slots_filtering_required);
2891 if (FLAG_trace_fragmentation) { 3032 if (FLAG_trace_fragmentation) {
2892 PrintF(" page %p slots buffer: %d\n", 3033 PrintF(" page %p slots buffer: %d\n",
2893 reinterpret_cast<void*>(p), 3034 reinterpret_cast<void*>(p),
2894 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3035 SlotsBuffer::SizeOfChain(p->slots_buffer()));
2895 } 3036 }
2896 3037
2897 // Important: skip list should be cleared only after roots were updated 3038 // Important: skip list should be cleared only after roots were updated
2898 // because root iteration traverses the stack and might have to find code 3039 // because root iteration traverses the stack and might have to find code
2899 // objects from non-updated pc pointing into evacuation candidate. 3040 // objects from non-updated pc pointing into evacuation candidate.
2900 SkipList* list = p->skip_list(); 3041 SkipList* list = p->skip_list();
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
2952 heap_->UpdateReferencesInExternalStringTable( 3093 heap_->UpdateReferencesInExternalStringTable(
2953 &UpdateReferenceInExternalStringTableEntry); 3094 &UpdateReferenceInExternalStringTableEntry);
2954 3095
2955 // Update JSFunction pointers from the runtime profiler. 3096 // Update JSFunction pointers from the runtime profiler.
2956 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( 3097 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2957 &updating_visitor); 3098 &updating_visitor);
2958 3099
2959 EvacuationWeakObjectRetainer evacuation_object_retainer; 3100 EvacuationWeakObjectRetainer evacuation_object_retainer;
2960 heap()->ProcessWeakReferences(&evacuation_object_retainer); 3101 heap()->ProcessWeakReferences(&evacuation_object_retainer);
2961 3102
3103 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3104 // under it.
3105 ProcessInvalidatedCode(&updating_visitor);
3106
2962 #ifdef DEBUG 3107 #ifdef DEBUG
2963 if (FLAG_verify_heap) { 3108 if (FLAG_verify_heap) {
2964 VerifyEvacuation(heap_); 3109 VerifyEvacuation(heap_);
2965 } 3110 }
2966 #endif 3111 #endif
2967 3112
2968 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); 3113 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
2969 ASSERT(migration_slots_buffer_ == NULL); 3114 ASSERT(migration_slots_buffer_ == NULL);
2970 for (int i = 0; i < npages; i++) { 3115 for (int i = 0; i < npages; i++) {
2971 Page* p = evacuation_candidates_[i]; 3116 Page* p = evacuation_candidates_[i];
(...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after
3473 SweeperType how_to_sweep = 3618 SweeperType how_to_sweep =
3474 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; 3619 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3475 if (sweep_precisely_) how_to_sweep = PRECISE; 3620 if (sweep_precisely_) how_to_sweep = PRECISE;
3476 // Noncompacting collections simply sweep the spaces to clear the mark 3621 // Noncompacting collections simply sweep the spaces to clear the mark
3477 // bits and free the nonlive blocks (for old and map spaces). We sweep 3622 // bits and free the nonlive blocks (for old and map spaces). We sweep
3478 // the map space last because freeing non-live maps overwrites them and 3623 // the map space last because freeing non-live maps overwrites them and
3479 // the other spaces rely on possibly non-live maps to get the sizes for 3624 // the other spaces rely on possibly non-live maps to get the sizes for
3480 // non-live objects. 3625 // non-live objects.
3481 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 3626 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3482 SweepSpace(heap()->old_data_space(), how_to_sweep); 3627 SweepSpace(heap()->old_data_space(), how_to_sweep);
3628
3629 RemoveDeadInvalidatedCode();
3483 SweepSpace(heap()->code_space(), PRECISE); 3630 SweepSpace(heap()->code_space(), PRECISE);
3631
3484 SweepSpace(heap()->cell_space(), PRECISE); 3632 SweepSpace(heap()->cell_space(), PRECISE);
3633
3485 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 3634 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3486 EvacuateNewSpaceAndCandidates(); 3635 EvacuateNewSpaceAndCandidates();
3487 } 3636 }
3637
3488 // ClearNonLiveTransitions depends on precise sweeping of map space to 3638 // ClearNonLiveTransitions depends on precise sweeping of map space to
3489 // detect whether unmarked map became dead in this collection or in one 3639 // detect whether unmarked map became dead in this collection or in one
3490 // of the previous ones. 3640 // of the previous ones.
3491 SweepSpace(heap()->map_space(), PRECISE); 3641 SweepSpace(heap()->map_space(), PRECISE);
3492 3642
3493 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); 3643 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
3494 3644
3495 // Deallocate unmarked objects and clear marked bits for marked objects. 3645 // Deallocate unmarked objects and clear marked bits for marked objects.
3496 heap_->lo_space()->FreeUnmarkedObjects(); 3646 heap_->lo_space()->FreeUnmarkedObjects();
3497 } 3647 }
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
3619 ++slot_idx; 3769 ++slot_idx;
3620 ASSERT(slot_idx < idx_); 3770 ASSERT(slot_idx < idx_);
3621 UpdateSlot(&v, 3771 UpdateSlot(&v,
3622 DecodeSlotType(slot), 3772 DecodeSlotType(slot),
3623 reinterpret_cast<Address>(slots_[slot_idx])); 3773 reinterpret_cast<Address>(slots_[slot_idx]));
3624 } 3774 }
3625 } 3775 }
3626 } 3776 }
3627 3777
3628 3778
3779 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
3780 PointersUpdatingVisitor v(heap);
3781
3782 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3783 ObjectSlot slot = slots_[slot_idx];
3784 if (!IsTypedSlot(slot)) {
3785 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
3786 UpdateSlot(slot);
3787 }
3788 } else {
3789 ++slot_idx;
3790 ASSERT(slot_idx < idx_);
3791 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
3792 if (!IsOnInvalidatedCodeObject(pc)) {
3793 UpdateSlot(&v,
3794 DecodeSlotType(slot),
3795 reinterpret_cast<Address>(slots_[slot_idx]));
3796 }
3797 }
3798 }
3799 }
3800
3801
3629 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { 3802 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
3630 return new SlotsBuffer(next_buffer); 3803 return new SlotsBuffer(next_buffer);
3631 } 3804 }
3632 3805
3633 3806
3634 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { 3807 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
3635 delete buffer; 3808 delete buffer;
3636 } 3809 }
3637 3810
3638 3811
3639 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { 3812 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
3640 SlotsBuffer* buffer = *buffer_address; 3813 SlotsBuffer* buffer = *buffer_address;
3641 while (buffer != NULL) { 3814 while (buffer != NULL) {
3642 SlotsBuffer* next_buffer = buffer->next(); 3815 SlotsBuffer* next_buffer = buffer->next();
3643 DeallocateBuffer(buffer); 3816 DeallocateBuffer(buffer);
3644 buffer = next_buffer; 3817 buffer = next_buffer;
3645 } 3818 }
3646 *buffer_address = NULL; 3819 *buffer_address = NULL;
3647 } 3820 }
3648 3821
3649 3822
3650 } } // namespace v8::internal 3823 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | src/spaces.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698