Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(524)

Side by Side Diff: src/mark-compact.cc

Issue 8692002: Only sweep one page eagerly unless we are running out of space. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.cc ('k') | src/spaces.h » ('j') | src/spaces.h » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
411 411
412 return NULL; 412 return NULL;
413 } 413 }
414 414
415 415
416 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 416 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
417 ASSERT(space->identity() == OLD_POINTER_SPACE || 417 ASSERT(space->identity() == OLD_POINTER_SPACE ||
418 space->identity() == OLD_DATA_SPACE || 418 space->identity() == OLD_DATA_SPACE ||
419 space->identity() == CODE_SPACE); 419 space->identity() == CODE_SPACE);
420 420
421 int number_of_pages = 0;
Michael Starzinger 2011/11/24 14:43:49 Can we instead use PagedSpace::CountTotalPages() a
Erik Corry 2011/11/24 17:02:30 Done.
422 {
423 PageIterator counter(space);
424 while (counter.has_next()) {
425 number_of_pages++;
426 counter.next();
427 }
428 }
429
421 PageIterator it(space); 430 PageIterator it(space);
431
432 const int kMaxMaxEvacuationCandidates = 1000;
Michael Starzinger 2011/11/24 14:43:49 Why two 'Max' in that constant?
Erik Corry 2011/11/24 17:02:30 You can't make the max any higher than this. Fixe
433 int max_evacuation_candidates =
434 static_cast<int>(sqrt(number_of_pages / 2) + 1);
435
436 if (FLAG_stress_compaction || FLAG_always_compact) {
437 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
438 }
439
440 class Candidate {
441 public:
442 Candidate() : fragmentation_(0), page_(NULL) { }
443 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
444
445 int fragmentation() { return fragmentation_; }
446 Page* page() { return page_; }
447
448 private:
449 int fragmentation_;
450 Page* page_;
451 };
452
453 Candidate candidates[kMaxMaxEvacuationCandidates];
454
422 int count = 0; 455 int count = 0;
423 if (it.has_next()) it.next(); // Never compact the first page. 456 if (it.has_next()) it.next(); // Never compact the first page.
457 int fragmentation = 0;
458 Candidate* least = NULL;
424 while (it.has_next()) { 459 while (it.has_next()) {
425 Page* p = it.next(); 460 Page* p = it.next();
426 bool evacuate = false; 461 p->ClearEvacuationCandidate();
427 if (FLAG_stress_compaction) { 462 if (FLAG_stress_compaction) {
428 int counter = space->heap()->ms_count(); 463 int counter = space->heap()->ms_count();
429 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; 464 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
430 if ((counter & 1) == (page_number & 1)) evacuate = true; 465 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
431 } else { 466 } else {
432 if (space->IsFragmented(p)) evacuate = true; 467 fragmentation = space->Fragmentation(p);
433 } 468 }
434 if (evacuate) { 469 if (fragmentation != 0) {
435 AddEvacuationCandidate(p); 470 if (count < max_evacuation_candidates) {
436 count++; 471 candidates[count++] = Candidate(fragmentation, p);
437 } else { 472 } else {
438 p->ClearEvacuationCandidate(); 473 if (least == NULL) {
474 for (int i = 0; i < max_evacuation_candidates; i++) {
475 if (least == NULL ||
476 candidates[i].fragmentation() < least->fragmentation()) {
477 least = candidates + i;
478 }
479 }
480 }
481 if (least->fragmentation() < fragmentation) {
482 *least = Candidate(fragmentation, p);
483 least = NULL;
484 }
485 }
439 } 486 }
440 } 487 }
488 for (int i = 0; i < count; i++) {
489 AddEvacuationCandidate(candidates[i].page());
490 }
441 491
442 if (count > 0 && FLAG_trace_fragmentation) { 492 if (count > 0 && FLAG_trace_fragmentation) {
443 PrintF("Collected %d evacuation candidates for space %s\n", 493 PrintF("Collected %d evacuation candidates for space %s\n",
444 count, 494 count,
445 AllocationSpaceName(space->identity())); 495 AllocationSpaceName(space->identity()));
446 } 496 }
447 } 497 }
448 498
449 499
450 void MarkCompactCollector::AbortCompaction() { 500 void MarkCompactCollector::AbortCompaction() {
(...skipping 2496 matching lines...) Expand 10 before | Expand all | Expand 10 after
2947 if (code != NULL) { 2997 if (code != NULL) {
2948 code->Iterate(visitor); 2998 code->Iterate(visitor);
2949 SetMarkBitsUnderInvalidatedCode(code, false); 2999 SetMarkBitsUnderInvalidatedCode(code, false);
2950 } 3000 }
2951 } 3001 }
2952 invalidated_code_.Rewind(0); 3002 invalidated_code_.Rewind(0);
2953 } 3003 }
2954 3004
2955 3005
2956 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3006 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2957 bool code_slots_filtering_required = MarkInvalidatedCode(); 3007 bool code_slots_filtering_required;
3008 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3009 code_slots_filtering_required = MarkInvalidatedCode();
2958 3010
2959 EvacuateNewSpace(); 3011 EvacuateNewSpace();
2960 EvacuatePages(); 3012 }
3013
3014
3015 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3016 EvacuatePages();
3017 }
2961 3018
2962 // Second pass: find pointers to new space and update them. 3019 // Second pass: find pointers to new space and update them.
2963 PointersUpdatingVisitor updating_visitor(heap()); 3020 PointersUpdatingVisitor updating_visitor(heap());
2964 3021
2965 // Update pointers in to space. 3022 { GCTracer::Scope gc_scope(tracer_,
2966 SemiSpaceIterator to_it(heap()->new_space()->bottom(), 3023 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
2967 heap()->new_space()->top()); 3024 // Update pointers in to space.
2968 for (HeapObject* object = to_it.Next(); 3025 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2969 object != NULL; 3026 heap()->new_space()->top());
2970 object = to_it.Next()) { 3027 for (HeapObject* object = to_it.Next();
2971 Map* map = object->map(); 3028 object != NULL;
2972 object->IterateBody(map->instance_type(), 3029 object = to_it.Next()) {
2973 object->SizeFromMap(map), 3030 Map* map = object->map();
2974 &updating_visitor); 3031 object->IterateBody(map->instance_type(),
3032 object->SizeFromMap(map),
3033 &updating_visitor);
3034 }
2975 } 3035 }
2976 3036
2977 // Update roots. 3037 { GCTracer::Scope gc_scope(tracer_,
2978 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3038 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
2979 LiveObjectList::IterateElements(&updating_visitor); 3039 // Update roots.
3040 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3041 LiveObjectList::IterateElements(&updating_visitor);
3042 }
2980 3043
2981 { 3044 { GCTracer::Scope gc_scope(tracer_,
3045 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
2982 StoreBufferRebuildScope scope(heap_, 3046 StoreBufferRebuildScope scope(heap_,
2983 heap_->store_buffer(), 3047 heap_->store_buffer(),
2984 &Heap::ScavengeStoreBufferCallback); 3048 &Heap::ScavengeStoreBufferCallback);
2985 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3049 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2986 } 3050 }
2987 3051
2988 SlotsBuffer::UpdateSlotsRecordedIn(heap_, 3052 { GCTracer::Scope gc_scope(tracer_,
2989 migration_slots_buffer_, 3053 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
2990 code_slots_filtering_required); 3054 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
2991 if (FLAG_trace_fragmentation) { 3055 migration_slots_buffer_,
2992 PrintF(" migration slots buffer: %d\n", 3056 code_slots_filtering_required);
2993 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); 3057 if (FLAG_trace_fragmentation) {
2994 } 3058 PrintF(" migration slots buffer: %d\n",
3059 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3060 }
2995 3061
2996 if (compacting_ && was_marked_incrementally_) { 3062 if (compacting_ && was_marked_incrementally_) {
2997 // It's difficult to filter out slots recorded for large objects. 3063 // It's difficult to filter out slots recorded for large objects.
2998 LargeObjectIterator it(heap_->lo_space()); 3064 LargeObjectIterator it(heap_->lo_space());
2999 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 3065 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3000 // LargeObjectSpace is not swept yet thus we have to skip 3066 // LargeObjectSpace is not swept yet thus we have to skip
3001 // dead objects explicitly. 3067 // dead objects explicitly.
3002 if (!IsMarked(obj)) continue; 3068 if (!IsMarked(obj)) continue;
3003 3069
3004 Page* p = Page::FromAddress(obj->address()); 3070 Page* p = Page::FromAddress(obj->address());
3005 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 3071 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3006 obj->Iterate(&updating_visitor); 3072 obj->Iterate(&updating_visitor);
3007 p->ClearFlag(Page::RESCAN_ON_EVACUATION); 3073 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3074 }
3008 } 3075 }
3009 } 3076 }
3010 } 3077 }
3011 3078
3012 int npages = evacuation_candidates_.length(); 3079 int npages = evacuation_candidates_.length();
3013 for (int i = 0; i < npages; i++) { 3080 { GCTracer::Scope gc_scope(
3014 Page* p = evacuation_candidates_[i]; 3081 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3015 ASSERT(p->IsEvacuationCandidate() || 3082 for (int i = 0; i < npages; i++) {
3016 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3083 Page* p = evacuation_candidates_[i];
3084 ASSERT(p->IsEvacuationCandidate() ||
3085 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3017 3086
3018 if (p->IsEvacuationCandidate()) { 3087 if (p->IsEvacuationCandidate()) {
3019 SlotsBuffer::UpdateSlotsRecordedIn(heap_, 3088 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3020 p->slots_buffer(), 3089 p->slots_buffer(),
3021 code_slots_filtering_required); 3090 code_slots_filtering_required);
3022 if (FLAG_trace_fragmentation) { 3091 if (FLAG_trace_fragmentation) {
3023 PrintF(" page %p slots buffer: %d\n", 3092 PrintF(" page %p slots buffer: %d\n",
3024 reinterpret_cast<void*>(p), 3093 reinterpret_cast<void*>(p),
3025 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3094 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3026 } 3095 }
3027 3096
3028 // Important: skip list should be cleared only after roots were updated 3097 // Important: skip list should be cleared only after roots were updated
3029 // because root iteration traverses the stack and might have to find code 3098 // because root iteration traverses the stack and might have to find
3030 // objects from non-updated pc pointing into evacuation candidate. 3099 // code objects from non-updated pc pointing into evacuation candidate.
3031 SkipList* list = p->skip_list(); 3100 SkipList* list = p->skip_list();
3032 if (list != NULL) list->Clear(); 3101 if (list != NULL) list->Clear();
3033 } else { 3102 } else {
3034 if (FLAG_gc_verbose) { 3103 if (FLAG_gc_verbose) {
3035 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 3104 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3036 reinterpret_cast<intptr_t>(p)); 3105 reinterpret_cast<intptr_t>(p));
3037 } 3106 }
3038 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3107 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3039 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3108 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3040 3109
3041 switch (space->identity()) { 3110 switch (space->identity()) {
3042 case OLD_DATA_SPACE: 3111 case OLD_DATA_SPACE:
3043 SweepConservatively(space, p); 3112 SweepConservatively(space, p);
3044 break; 3113 break;
3045 case OLD_POINTER_SPACE: 3114 case OLD_POINTER_SPACE:
3046 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( 3115 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3047 space, p, &updating_visitor); 3116 space, p, &updating_visitor);
3048 break; 3117 break;
3049 case CODE_SPACE: 3118 case CODE_SPACE:
3050 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( 3119 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3051 space, p, &updating_visitor); 3120 space, p, &updating_visitor);
3052 break; 3121 break;
3053 default: 3122 default:
3054 UNREACHABLE(); 3123 UNREACHABLE();
3055 break; 3124 break;
3125 }
3056 } 3126 }
3057 } 3127 }
3058 } 3128 }
3059 3129
3130 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3131
3060 // Update pointers from cells. 3132 // Update pointers from cells.
3061 HeapObjectIterator cell_iterator(heap_->cell_space()); 3133 HeapObjectIterator cell_iterator(heap_->cell_space());
3062 for (HeapObject* cell = cell_iterator.Next(); 3134 for (HeapObject* cell = cell_iterator.Next();
3063 cell != NULL; 3135 cell != NULL;
3064 cell = cell_iterator.Next()) { 3136 cell = cell_iterator.Next()) {
3065 if (cell->IsJSGlobalPropertyCell()) { 3137 if (cell->IsJSGlobalPropertyCell()) {
3066 Address value_address = 3138 Address value_address =
3067 reinterpret_cast<Address>(cell) + 3139 reinterpret_cast<Address>(cell) +
3068 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 3140 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3069 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 3141 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after
3476 free_start = DigestFreeStart(free_start, free_start_cell); 3548 free_start = DigestFreeStart(free_start, free_start_cell);
3477 freed_bytes += space->Free(free_start, 3549 freed_bytes += space->Free(free_start,
3478 static_cast<int>(block_address - free_start)); 3550 static_cast<int>(block_address - free_start));
3479 } 3551 }
3480 3552
3481 p->ResetLiveBytes(); 3553 p->ResetLiveBytes();
3482 return freed_bytes; 3554 return freed_bytes;
3483 } 3555 }
3484 3556
3485 3557
3486 void MarkCompactCollector::SweepSpace(PagedSpace* space, 3558 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3487 SweeperType sweeper) {
3488 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || 3559 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3489 sweeper == LAZY_CONSERVATIVE); 3560 sweeper == LAZY_CONSERVATIVE);
3490 3561
3491 space->ClearStats(); 3562 space->ClearStats();
3492 3563
3493 PageIterator it(space); 3564 PageIterator it(space);
3494 3565
3495 intptr_t freed_bytes = 0; 3566 intptr_t freed_bytes = 0;
3567 int pages_swept = 0;
3496 intptr_t newspace_size = space->heap()->new_space()->Size(); 3568 intptr_t newspace_size = space->heap()->new_space()->Size();
3497 bool lazy_sweeping_active = false; 3569 bool lazy_sweeping_active = false;
3498 bool unused_page_present = false; 3570 bool unused_page_present = false;
3499 3571
3572 intptr_t old_space_size = heap()->PromotedSpaceSize();
3573 intptr_t space_left =
3574 Min(heap()->OldGenPromotionLimit(old_space_size),
3575 heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3576
3500 while (it.has_next()) { 3577 while (it.has_next()) {
3501 Page* p = it.next(); 3578 Page* p = it.next();
3502 3579
3503 // Clear sweeping flags indicating that marking bits are still intact. 3580 // Clear sweeping flags indicating that marking bits are still intact.
3504 p->ClearSweptPrecisely(); 3581 p->ClearSweptPrecisely();
3505 p->ClearSweptConservatively(); 3582 p->ClearSweptConservatively();
3506 3583
3507 if (p->IsEvacuationCandidate()) { 3584 if (p->IsEvacuationCandidate()) {
3508 ASSERT(evacuation_candidates_.length() > 0); 3585 ASSERT(evacuation_candidates_.length() > 0);
3509 continue; 3586 continue;
(...skipping 18 matching lines...) Expand all
3528 if (FLAG_gc_verbose) { 3605 if (FLAG_gc_verbose) {
3529 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", 3606 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3530 reinterpret_cast<intptr_t>(p)); 3607 reinterpret_cast<intptr_t>(p));
3531 } 3608 }
3532 space->ReleasePage(p); 3609 space->ReleasePage(p);
3533 continue; 3610 continue;
3534 } 3611 }
3535 unused_page_present = true; 3612 unused_page_present = true;
3536 } 3613 }
3537 3614
3538 if (FLAG_gc_verbose) {
3539 PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
3540 reinterpret_cast<intptr_t>(p),
3541 sweeper);
3542 }
3543
3544 switch (sweeper) { 3615 switch (sweeper) {
3545 case CONSERVATIVE: { 3616 case CONSERVATIVE: {
3617 if (FLAG_gc_verbose) {
3618 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3619 reinterpret_cast<intptr_t>(p));
3620 }
3546 SweepConservatively(space, p); 3621 SweepConservatively(space, p);
3622 pages_swept++;
3547 break; 3623 break;
3548 } 3624 }
3549 case LAZY_CONSERVATIVE: { 3625 case LAZY_CONSERVATIVE: {
3626 if (FLAG_gc_verbose) {
3627 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3628 reinterpret_cast<intptr_t>(p));
3629 }
3550 freed_bytes += SweepConservatively(space, p); 3630 freed_bytes += SweepConservatively(space, p);
3551 if (freed_bytes >= newspace_size && p != space->LastPage()) { 3631 pages_swept++;
3632 if (space_left + freed_bytes > newspace_size) {
3552 space->SetPagesToSweep(p->next_page()); 3633 space->SetPagesToSweep(p->next_page());
3553 lazy_sweeping_active = true; 3634 lazy_sweeping_active = true;
3635 } else {
3636 if (FLAG_gc_verbose) {
3637 PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3638 freed_bytes);
3639 }
3554 } 3640 }
3555 break; 3641 break;
3556 } 3642 }
3557 case PRECISE: { 3643 case PRECISE: {
3644 if (FLAG_gc_verbose) {
3645 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3646 reinterpret_cast<intptr_t>(p));
3647 }
3558 if (space->identity() == CODE_SPACE) { 3648 if (space->identity() == CODE_SPACE) {
3559 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); 3649 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3560 } else { 3650 } else {
3561 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); 3651 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3562 } 3652 }
3653 pages_swept++;
3563 break; 3654 break;
3564 } 3655 }
3565 default: { 3656 default: {
3566 UNREACHABLE(); 3657 UNREACHABLE();
3567 } 3658 }
3568 } 3659 }
3569 } 3660 }
3570 3661
3662 if (FLAG_gc_verbose) {
3663 PrintF("SweepSpace: %s (%d pages swept)\n",
3664 AllocationSpaceName(space->identity()),
3665 pages_swept);
3666 }
3667
3571 // Give pages that are queued to be freed back to the OS. 3668 // Give pages that are queued to be freed back to the OS.
3572 heap()->FreeQueuedChunks(); 3669 heap()->FreeQueuedChunks();
3573 } 3670 }
3574 3671
3575 3672
3576 void MarkCompactCollector::SweepSpaces() { 3673 void MarkCompactCollector::SweepSpaces() {
3577 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 3674 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3578 #ifdef DEBUG 3675 #ifdef DEBUG
3579 state_ = SWEEP_SPACES; 3676 state_ = SWEEP_SPACES;
3580 #endif 3677 #endif
3581 SweeperType how_to_sweep = 3678 SweeperType how_to_sweep =
3582 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; 3679 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3583 if (sweep_precisely_) how_to_sweep = PRECISE; 3680 if (sweep_precisely_) how_to_sweep = PRECISE;
3584 // Noncompacting collections simply sweep the spaces to clear the mark 3681 // Noncompacting collections simply sweep the spaces to clear the mark
3585 // bits and free the nonlive blocks (for old and map spaces). We sweep 3682 // bits and free the nonlive blocks (for old and map spaces). We sweep
3586 // the map space last because freeing non-live maps overwrites them and 3683 // the map space last because freeing non-live maps overwrites them and
3587 // the other spaces rely on possibly non-live maps to get the sizes for 3684 // the other spaces rely on possibly non-live maps to get the sizes for
3588 // non-live objects. 3685 // non-live objects.
3589 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 3686 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3590 SweepSpace(heap()->old_data_space(), how_to_sweep); 3687 SweepSpace(heap()->old_data_space(), how_to_sweep);
3591 3688
3592 RemoveDeadInvalidatedCode(); 3689 RemoveDeadInvalidatedCode();
3593 SweepSpace(heap()->code_space(), PRECISE); 3690 SweepSpace(heap()->code_space(), PRECISE);
3594 3691
3595 SweepSpace(heap()->cell_space(), PRECISE); 3692 SweepSpace(heap()->cell_space(), PRECISE);
3596 3693
3597 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 3694 EvacuateNewSpaceAndCandidates();
3598 EvacuateNewSpaceAndCandidates();
3599 }
3600 3695
3601 // ClearNonLiveTransitions depends on precise sweeping of map space to 3696 // ClearNonLiveTransitions depends on precise sweeping of map space to
3602 // detect whether unmarked map became dead in this collection or in one 3697 // detect whether unmarked map became dead in this collection or in one
3603 // of the previous ones. 3698 // of the previous ones.
3604 SweepSpace(heap()->map_space(), PRECISE); 3699 SweepSpace(heap()->map_space(), PRECISE);
3605 3700
3606 // Deallocate unmarked objects and clear marked bits for marked objects. 3701 // Deallocate unmarked objects and clear marked bits for marked objects.
3607 heap_->lo_space()->FreeUnmarkedObjects(); 3702 heap_->lo_space()->FreeUnmarkedObjects();
3608 } 3703 }
3609 3704
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
3775 while (buffer != NULL) { 3870 while (buffer != NULL) {
3776 SlotsBuffer* next_buffer = buffer->next(); 3871 SlotsBuffer* next_buffer = buffer->next();
3777 DeallocateBuffer(buffer); 3872 DeallocateBuffer(buffer);
3778 buffer = next_buffer; 3873 buffer = next_buffer;
3779 } 3874 }
3780 *buffer_address = NULL; 3875 *buffer_address = NULL;
3781 } 3876 }
3782 3877
3783 3878
3784 } } // namespace v8::internal 3879 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.cc ('k') | src/spaces.h » ('j') | src/spaces.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698