| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 3362 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3373 HeapObject* heap_object = HeapObject::cast(object); | 3373 HeapObject* heap_object = HeapObject::cast(object); |
| 3374 MapWord map_word = heap_object->map_word(); | 3374 MapWord map_word = heap_object->map_word(); |
| 3375 if (map_word.IsForwardingAddress()) { | 3375 if (map_word.IsForwardingAddress()) { |
| 3376 return map_word.ToForwardingAddress(); | 3376 return map_word.ToForwardingAddress(); |
| 3377 } | 3377 } |
| 3378 } | 3378 } |
| 3379 return object; | 3379 return object; |
| 3380 } | 3380 } |
| 3381 }; | 3381 }; |
| 3382 | 3382 |
| 3383 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, | 3383 int MarkCompactCollector::Sweeper::RawSweep( |
| 3384 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, | 3384 Page* p, FreeListRebuildingMode free_list_mode, |
| 3385 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, | 3385 FreeSpaceTreatmentMode free_space_mode) { |
| 3386 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode, | 3386 Space* space = p->owner(); |
| 3387 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> | 3387 DCHECK_NOT_NULL(space); |
| 3388 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, | 3388 DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE || |
| 3389 ObjectVisitor* v) { | 3389 space->identity() == CODE_SPACE || space->identity() == MAP_SPACE); |
| 3390 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3390 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
| 3391 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3391 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
| 3392 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) || | |
| 3393 (skip_list_mode == REBUILD_SKIP_LIST)); | |
| 3394 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | |
| 3395 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); | |
| 3396 | 3392 |
| 3397 // Before we sweep objects on the page, we free dead array buffers which | 3393 // Before we sweep objects on the page, we free dead array buffers which |
| 3398 // requires valid mark bits. | 3394 // requires valid mark bits. |
| 3399 ArrayBufferTracker::FreeDead(p); | 3395 ArrayBufferTracker::FreeDead(p); |
| 3400 | 3396 |
| 3401 Address free_start = p->area_start(); | 3397 Address free_start = p->area_start(); |
| 3402 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3398 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 3403 | 3399 |
| 3404 // If we use the skip list for code space pages, we have to lock the skip | 3400 // If we use the skip list for code space pages, we have to lock the skip |
| 3405 // list because it could be accessed concurrently by the runtime or the | 3401 // list because it could be accessed concurrently by the runtime or the |
| 3406 // deoptimizer. | 3402 // deoptimizer. |
| 3403 const bool rebuild_skip_list = |
| 3404 space->identity() == CODE_SPACE && p->skip_list() != nullptr; |
| 3407 SkipList* skip_list = p->skip_list(); | 3405 SkipList* skip_list = p->skip_list(); |
| 3408 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { | 3406 if (rebuild_skip_list) { |
| 3409 skip_list->Clear(); | 3407 skip_list->Clear(); |
| 3410 } | 3408 } |
| 3411 | 3409 |
| 3412 intptr_t freed_bytes = 0; | 3410 intptr_t freed_bytes = 0; |
| 3413 intptr_t max_freed_bytes = 0; | 3411 intptr_t max_freed_bytes = 0; |
| 3414 int curr_region = -1; | 3412 int curr_region = -1; |
| 3415 | 3413 |
| 3416 LiveObjectIterator<kBlackObjects> it(p); | 3414 LiveObjectIterator<kBlackObjects> it(p); |
| 3417 HeapObject* object = NULL; | 3415 HeapObject* object = NULL; |
| 3418 while ((object = it.Next()) != NULL) { | 3416 while ((object = it.Next()) != NULL) { |
| 3419 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3417 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3420 Address free_end = object->address(); | 3418 Address free_end = object->address(); |
| 3421 if (free_end != free_start) { | 3419 if (free_end != free_start) { |
| 3422 int size = static_cast<int>(free_end - free_start); | 3420 int size = static_cast<int>(free_end - free_start); |
| 3423 if (free_space_mode == ZAP_FREE_SPACE) { | 3421 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3424 memset(free_start, 0xcc, size); | 3422 memset(free_start, 0xcc, size); |
| 3425 } | 3423 } |
| 3426 if (free_list_mode == REBUILD_FREE_LIST) { | 3424 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3427 freed_bytes = space->UnaccountedFree(free_start, size); | 3425 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( |
| 3426 free_start, size); |
| 3428 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3427 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3429 } else { | 3428 } else { |
| 3430 p->heap()->CreateFillerObjectAt(free_start, size, | 3429 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3431 ClearRecordedSlots::kNo); | 3430 ClearRecordedSlots::kNo); |
| 3432 } | 3431 } |
| 3433 } | 3432 } |
| 3434 Map* map = object->synchronized_map(); | 3433 Map* map = object->synchronized_map(); |
| 3435 int size = object->SizeFromMap(map); | 3434 int size = object->SizeFromMap(map); |
| 3436 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | 3435 if (rebuild_skip_list) { |
| 3437 object->IterateBody(map->instance_type(), size, v); | |
| 3438 } | |
| 3439 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | |
| 3440 int new_region_start = SkipList::RegionNumber(free_end); | 3436 int new_region_start = SkipList::RegionNumber(free_end); |
| 3441 int new_region_end = | 3437 int new_region_end = |
| 3442 SkipList::RegionNumber(free_end + size - kPointerSize); | 3438 SkipList::RegionNumber(free_end + size - kPointerSize); |
| 3443 if (new_region_start != curr_region || new_region_end != curr_region) { | 3439 if (new_region_start != curr_region || new_region_end != curr_region) { |
| 3444 skip_list->AddObject(free_end, size); | 3440 skip_list->AddObject(free_end, size); |
| 3445 curr_region = new_region_end; | 3441 curr_region = new_region_end; |
| 3446 } | 3442 } |
| 3447 } | 3443 } |
| 3448 free_start = free_end + size; | 3444 free_start = free_end + size; |
| 3449 } | 3445 } |
| 3450 | 3446 |
| 3451 // Clear the mark bits of that page and reset live bytes count. | 3447 // Clear the mark bits of that page and reset live bytes count. |
| 3452 Bitmap::Clear(p); | 3448 Bitmap::Clear(p); |
| 3453 | 3449 |
| 3454 if (free_start != p->area_end()) { | 3450 if (free_start != p->area_end()) { |
| 3455 int size = static_cast<int>(p->area_end() - free_start); | 3451 int size = static_cast<int>(p->area_end() - free_start); |
| 3456 if (free_space_mode == ZAP_FREE_SPACE) { | 3452 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3457 memset(free_start, 0xcc, size); | 3453 memset(free_start, 0xcc, size); |
| 3458 } | 3454 } |
| 3459 if (free_list_mode == REBUILD_FREE_LIST) { | 3455 if (free_list_mode == REBUILD_FREE_LIST) { |
| 3460 freed_bytes = space->UnaccountedFree(free_start, size); | 3456 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( |
| 3457 free_start, size); |
| 3461 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3458 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3462 } else { | 3459 } else { |
| 3463 p->heap()->CreateFillerObjectAt(free_start, size, | 3460 p->heap()->CreateFillerObjectAt(free_start, size, |
| 3464 ClearRecordedSlots::kNo); | 3461 ClearRecordedSlots::kNo); |
| 3465 } | 3462 } |
| 3466 } | 3463 } |
| 3467 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3464 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3468 if (free_list_mode == IGNORE_FREE_LIST) return 0; | 3465 if (free_list_mode == IGNORE_FREE_LIST) return 0; |
| 3469 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3466 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3470 } | 3467 } |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3876 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, | 3873 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
| 3877 AllocationSpace identity) { | 3874 AllocationSpace identity) { |
| 3878 int max_freed = 0; | 3875 int max_freed = 0; |
| 3879 if (page->mutex()->TryLock()) { | 3876 if (page->mutex()->TryLock()) { |
| 3880 // If this page was already swept in the meantime, we can return here. | 3877 // If this page was already swept in the meantime, we can return here. |
| 3881 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3878 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
| 3882 page->mutex()->Unlock(); | 3879 page->mutex()->Unlock(); |
| 3883 return 0; | 3880 return 0; |
| 3884 } | 3881 } |
| 3885 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3882 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3883 const Sweeper::FreeSpaceTreatmentMode free_space_mode = |
| 3884 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE; |
| 3886 if (identity == NEW_SPACE) { | 3885 if (identity == NEW_SPACE) { |
| 3887 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3886 RawSweep(page, IGNORE_FREE_LIST, free_space_mode); |
| 3888 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr); | |
| 3889 } else if (identity == OLD_SPACE) { | 3887 } else if (identity == OLD_SPACE) { |
| 3890 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3888 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); |
| 3891 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | |
| 3892 heap_->paged_space(identity), page, nullptr); | |
| 3893 } else if (identity == CODE_SPACE) { | 3889 } else if (identity == CODE_SPACE) { |
| 3894 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3890 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); |
| 3895 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | |
| 3896 heap_->paged_space(identity), page, nullptr); | |
| 3897 } else { | 3891 } else { |
| 3898 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3892 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode); |
| 3899 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>( | |
| 3900 heap_->paged_space(identity), page, nullptr); | |
| 3901 } | 3893 } |
| 3902 { | 3894 { |
| 3903 base::LockGuard<base::Mutex> guard(&mutex_); | 3895 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3904 swept_list_[identity].Add(page); | 3896 swept_list_[identity].Add(page); |
| 3905 } | 3897 } |
| 3906 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3898 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3907 page->mutex()->Unlock(); | 3899 page->mutex()->Unlock(); |
| 3908 } | 3900 } |
| 3909 return max_freed; | 3901 return max_freed; |
| 3910 } | 3902 } |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3983 space->Free(free_start, size); | 3975 space->Free(free_start, size); |
| 3984 continue; | 3976 continue; |
| 3985 } | 3977 } |
| 3986 | 3978 |
| 3987 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3979 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
| 3988 // We need to sweep the page to get it into an iterable state again. Note | 3980 // We need to sweep the page to get it into an iterable state again. Note |
| 3989 // that this adds unusable memory into the free list that is later on | 3981 // that this adds unusable memory into the free list that is later on |
| 3990 // (in the free list) dropped again. Since we only use the flag for | 3982 // (in the free list) dropped again. Since we only use the flag for |
| 3991 // testing this is fine. | 3983 // testing this is fine. |
| 3992 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3984 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3993 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, | 3985 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST, |
| 3994 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST, | 3986 Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE |
| 3995 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); | 3987 : Sweeper::IGNORE_FREE_SPACE); |
| 3996 continue; | 3988 continue; |
| 3997 } | 3989 } |
| 3998 | 3990 |
| 3999 // One unused page is kept, all further are released before sweeping them. | 3991 // One unused page is kept, all further are released before sweeping them. |
| 4000 if (p->LiveBytes() == 0) { | 3992 if (p->LiveBytes() == 0) { |
| 4001 if (unused_page_present) { | 3993 if (unused_page_present) { |
| 4002 if (FLAG_gc_verbose) { | 3994 if (FLAG_gc_verbose) { |
| 4003 PrintIsolate(isolate(), "sweeping: released page: %p", | 3995 PrintIsolate(isolate(), "sweeping: released page: %p", |
| 4004 static_cast<void*>(p)); | 3996 static_cast<void*>(p)); |
| 4005 } | 3997 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4094 // The target is always in old space, we don't have to record the slot in | 4086 // The target is always in old space, we don't have to record the slot in |
| 4095 // the old-to-new remembered set. | 4087 // the old-to-new remembered set. |
| 4096 DCHECK(!heap()->InNewSpace(target)); | 4088 DCHECK(!heap()->InNewSpace(target)); |
| 4097 RecordRelocSlot(host, &rinfo, target); | 4089 RecordRelocSlot(host, &rinfo, target); |
| 4098 } | 4090 } |
| 4099 } | 4091 } |
| 4100 } | 4092 } |
| 4101 | 4093 |
| 4102 } // namespace internal | 4094 } // namespace internal |
| 4103 } // namespace v8 | 4095 } // namespace v8 |
| OLD | NEW |