Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(414)

Side by Side Diff: src/mark-compact.cc

Issue 8692002: Only sweep one page eagerly unless we are running out of space. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.cc ('k') | src/objects.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 400 matching lines...) Expand 10 before | Expand all | Expand 10 after
411 411
412 return NULL; 412 return NULL;
413 } 413 }
414 414
415 415
416 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 416 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
417 ASSERT(space->identity() == OLD_POINTER_SPACE || 417 ASSERT(space->identity() == OLD_POINTER_SPACE ||
418 space->identity() == OLD_DATA_SPACE || 418 space->identity() == OLD_DATA_SPACE ||
419 space->identity() == CODE_SPACE); 419 space->identity() == CODE_SPACE);
420 420
421 int number_of_pages = space->CountTotalPages();
422
421 PageIterator it(space); 423 PageIterator it(space);
424 const int kMaxMaxEvacuationCandidates = 1000;
425 int max_evacuation_candidates = Min(
426 kMaxMaxEvacuationCandidates,
427 static_cast<int>(sqrt(number_of_pages / 2) + 1));
428
429 if (FLAG_stress_compaction || FLAG_always_compact) {
430 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
431 }
432
433 class Candidate {
434 public:
435 Candidate() : fragmentation_(0), page_(NULL) { }
436 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
437
438 int fragmentation() { return fragmentation_; }
439 Page* page() { return page_; }
440
441 private:
442 int fragmentation_;
443 Page* page_;
444 };
445
446 Candidate candidates[kMaxMaxEvacuationCandidates];
447
422 int count = 0; 448 int count = 0;
423 if (it.has_next()) it.next(); // Never compact the first page. 449 if (it.has_next()) it.next(); // Never compact the first page.
450 int fragmentation = 0;
451 Candidate* least = NULL;
424 while (it.has_next()) { 452 while (it.has_next()) {
425 Page* p = it.next(); 453 Page* p = it.next();
426 bool evacuate = false; 454 p->ClearEvacuationCandidate();
427 if (FLAG_stress_compaction) { 455 if (FLAG_stress_compaction) {
428 int counter = space->heap()->ms_count(); 456 int counter = space->heap()->ms_count();
429 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; 457 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
430 if ((counter & 1) == (page_number & 1)) evacuate = true; 458 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
431 } else { 459 } else {
432 if (space->IsFragmented(p)) evacuate = true; 460 fragmentation = space->Fragmentation(p);
433 } 461 }
434 if (evacuate) { 462 if (fragmentation != 0) {
435 AddEvacuationCandidate(p); 463 if (count < max_evacuation_candidates) {
436 count++; 464 candidates[count++] = Candidate(fragmentation, p);
437 } else { 465 } else {
438 p->ClearEvacuationCandidate(); 466 if (least == NULL) {
467 for (int i = 0; i < max_evacuation_candidates; i++) {
468 if (least == NULL ||
469 candidates[i].fragmentation() < least->fragmentation()) {
470 least = candidates + i;
471 }
472 }
473 }
474 if (least->fragmentation() < fragmentation) {
475 *least = Candidate(fragmentation, p);
476 least = NULL;
477 }
478 }
439 } 479 }
440 } 480 }
481 for (int i = 0; i < count; i++) {
482 AddEvacuationCandidate(candidates[i].page());
483 }
441 484
442 if (count > 0 && FLAG_trace_fragmentation) { 485 if (count > 0 && FLAG_trace_fragmentation) {
443 PrintF("Collected %d evacuation candidates for space %s\n", 486 PrintF("Collected %d evacuation candidates for space %s\n",
444 count, 487 count,
445 AllocationSpaceName(space->identity())); 488 AllocationSpaceName(space->identity()));
446 } 489 }
447 } 490 }
448 491
449 492
450 void MarkCompactCollector::AbortCompaction() { 493 void MarkCompactCollector::AbortCompaction() {
(...skipping 2496 matching lines...) Expand 10 before | Expand all | Expand 10 after
2947 if (code != NULL) { 2990 if (code != NULL) {
2948 code->Iterate(visitor); 2991 code->Iterate(visitor);
2949 SetMarkBitsUnderInvalidatedCode(code, false); 2992 SetMarkBitsUnderInvalidatedCode(code, false);
2950 } 2993 }
2951 } 2994 }
2952 invalidated_code_.Rewind(0); 2995 invalidated_code_.Rewind(0);
2953 } 2996 }
2954 2997
2955 2998
2956 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 2999 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2957 bool code_slots_filtering_required = MarkInvalidatedCode(); 3000 bool code_slots_filtering_required;
3001 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3002 code_slots_filtering_required = MarkInvalidatedCode();
2958 3003
2959 EvacuateNewSpace(); 3004 EvacuateNewSpace();
2960 EvacuatePages(); 3005 }
3006
3007
3008 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3009 EvacuatePages();
3010 }
2961 3011
2962 // Second pass: find pointers to new space and update them. 3012 // Second pass: find pointers to new space and update them.
2963 PointersUpdatingVisitor updating_visitor(heap()); 3013 PointersUpdatingVisitor updating_visitor(heap());
2964 3014
2965 // Update pointers in to space. 3015 { GCTracer::Scope gc_scope(tracer_,
2966 SemiSpaceIterator to_it(heap()->new_space()->bottom(), 3016 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
2967 heap()->new_space()->top()); 3017 // Update pointers in to space.
2968 for (HeapObject* object = to_it.Next(); 3018 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2969 object != NULL; 3019 heap()->new_space()->top());
2970 object = to_it.Next()) { 3020 for (HeapObject* object = to_it.Next();
2971 Map* map = object->map(); 3021 object != NULL;
2972 object->IterateBody(map->instance_type(), 3022 object = to_it.Next()) {
2973 object->SizeFromMap(map), 3023 Map* map = object->map();
2974 &updating_visitor); 3024 object->IterateBody(map->instance_type(),
3025 object->SizeFromMap(map),
3026 &updating_visitor);
3027 }
2975 } 3028 }
2976 3029
2977 // Update roots. 3030 { GCTracer::Scope gc_scope(tracer_,
2978 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3031 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
2979 LiveObjectList::IterateElements(&updating_visitor); 3032 // Update roots.
3033 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3034 LiveObjectList::IterateElements(&updating_visitor);
3035 }
2980 3036
2981 { 3037 { GCTracer::Scope gc_scope(tracer_,
3038 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
2982 StoreBufferRebuildScope scope(heap_, 3039 StoreBufferRebuildScope scope(heap_,
2983 heap_->store_buffer(), 3040 heap_->store_buffer(),
2984 &Heap::ScavengeStoreBufferCallback); 3041 &Heap::ScavengeStoreBufferCallback);
2985 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3042 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2986 } 3043 }
2987 3044
2988 SlotsBuffer::UpdateSlotsRecordedIn(heap_, 3045 { GCTracer::Scope gc_scope(tracer_,
2989 migration_slots_buffer_, 3046 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
2990 code_slots_filtering_required); 3047 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
2991 if (FLAG_trace_fragmentation) { 3048 migration_slots_buffer_,
2992 PrintF(" migration slots buffer: %d\n", 3049 code_slots_filtering_required);
2993 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); 3050 if (FLAG_trace_fragmentation) {
2994 } 3051 PrintF(" migration slots buffer: %d\n",
3052 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3053 }
2995 3054
2996 if (compacting_ && was_marked_incrementally_) { 3055 if (compacting_ && was_marked_incrementally_) {
2997 // It's difficult to filter out slots recorded for large objects. 3056 // It's difficult to filter out slots recorded for large objects.
2998 LargeObjectIterator it(heap_->lo_space()); 3057 LargeObjectIterator it(heap_->lo_space());
2999 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 3058 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3000 // LargeObjectSpace is not swept yet thus we have to skip 3059 // LargeObjectSpace is not swept yet thus we have to skip
3001 // dead objects explicitly. 3060 // dead objects explicitly.
3002 if (!IsMarked(obj)) continue; 3061 if (!IsMarked(obj)) continue;
3003 3062
3004 Page* p = Page::FromAddress(obj->address()); 3063 Page* p = Page::FromAddress(obj->address());
3005 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 3064 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3006 obj->Iterate(&updating_visitor); 3065 obj->Iterate(&updating_visitor);
3007 p->ClearFlag(Page::RESCAN_ON_EVACUATION); 3066 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3067 }
3008 } 3068 }
3009 } 3069 }
3010 } 3070 }
3011 3071
3012 int npages = evacuation_candidates_.length(); 3072 int npages = evacuation_candidates_.length();
3013 for (int i = 0; i < npages; i++) { 3073 { GCTracer::Scope gc_scope(
3014 Page* p = evacuation_candidates_[i]; 3074 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3015 ASSERT(p->IsEvacuationCandidate() || 3075 for (int i = 0; i < npages; i++) {
3016 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3076 Page* p = evacuation_candidates_[i];
3077 ASSERT(p->IsEvacuationCandidate() ||
3078 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3017 3079
3018 if (p->IsEvacuationCandidate()) { 3080 if (p->IsEvacuationCandidate()) {
3019 SlotsBuffer::UpdateSlotsRecordedIn(heap_, 3081 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3020 p->slots_buffer(), 3082 p->slots_buffer(),
3021 code_slots_filtering_required); 3083 code_slots_filtering_required);
3022 if (FLAG_trace_fragmentation) { 3084 if (FLAG_trace_fragmentation) {
3023 PrintF(" page %p slots buffer: %d\n", 3085 PrintF(" page %p slots buffer: %d\n",
3024 reinterpret_cast<void*>(p), 3086 reinterpret_cast<void*>(p),
3025 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3087 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3026 } 3088 }
3027 3089
3028 // Important: skip list should be cleared only after roots were updated 3090 // Important: skip list should be cleared only after roots were updated
3029 // because root iteration traverses the stack and might have to find code 3091 // because root iteration traverses the stack and might have to find
3030 // objects from non-updated pc pointing into evacuation candidate. 3092 // code objects from non-updated pc pointing into evacuation candidate.
3031 SkipList* list = p->skip_list(); 3093 SkipList* list = p->skip_list();
3032 if (list != NULL) list->Clear(); 3094 if (list != NULL) list->Clear();
3033 } else { 3095 } else {
3034 if (FLAG_gc_verbose) { 3096 if (FLAG_gc_verbose) {
3035 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 3097 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3036 reinterpret_cast<intptr_t>(p)); 3098 reinterpret_cast<intptr_t>(p));
3037 } 3099 }
3038 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3100 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3039 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3101 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3040 3102
3041 switch (space->identity()) { 3103 switch (space->identity()) {
3042 case OLD_DATA_SPACE: 3104 case OLD_DATA_SPACE:
3043 SweepConservatively(space, p); 3105 SweepConservatively(space, p);
3044 break; 3106 break;
3045 case OLD_POINTER_SPACE: 3107 case OLD_POINTER_SPACE:
3046 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( 3108 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3047 space, p, &updating_visitor); 3109 space, p, &updating_visitor);
3048 break; 3110 break;
3049 case CODE_SPACE: 3111 case CODE_SPACE:
3050 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( 3112 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3051 space, p, &updating_visitor); 3113 space, p, &updating_visitor);
3052 break; 3114 break;
3053 default: 3115 default:
3054 UNREACHABLE(); 3116 UNREACHABLE();
3055 break; 3117 break;
3118 }
3056 } 3119 }
3057 } 3120 }
3058 } 3121 }
3059 3122
3123 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3124
3060 // Update pointers from cells. 3125 // Update pointers from cells.
3061 HeapObjectIterator cell_iterator(heap_->cell_space()); 3126 HeapObjectIterator cell_iterator(heap_->cell_space());
3062 for (HeapObject* cell = cell_iterator.Next(); 3127 for (HeapObject* cell = cell_iterator.Next();
3063 cell != NULL; 3128 cell != NULL;
3064 cell = cell_iterator.Next()) { 3129 cell = cell_iterator.Next()) {
3065 if (cell->IsJSGlobalPropertyCell()) { 3130 if (cell->IsJSGlobalPropertyCell()) {
3066 Address value_address = 3131 Address value_address =
3067 reinterpret_cast<Address>(cell) + 3132 reinterpret_cast<Address>(cell) +
3068 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 3133 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3069 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 3134 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
(...skipping 406 matching lines...) Expand 10 before | Expand all | Expand 10 after
3476 free_start = DigestFreeStart(free_start, free_start_cell); 3541 free_start = DigestFreeStart(free_start, free_start_cell);
3477 freed_bytes += space->Free(free_start, 3542 freed_bytes += space->Free(free_start,
3478 static_cast<int>(block_address - free_start)); 3543 static_cast<int>(block_address - free_start));
3479 } 3544 }
3480 3545
3481 p->ResetLiveBytes(); 3546 p->ResetLiveBytes();
3482 return freed_bytes; 3547 return freed_bytes;
3483 } 3548 }
3484 3549
3485 3550
3486 void MarkCompactCollector::SweepSpace(PagedSpace* space, 3551 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3487 SweeperType sweeper) {
3488 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || 3552 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3489 sweeper == LAZY_CONSERVATIVE); 3553 sweeper == LAZY_CONSERVATIVE);
3490 3554
3491 space->ClearStats(); 3555 space->ClearStats();
3492 3556
3493 PageIterator it(space); 3557 PageIterator it(space);
3494 3558
3495 intptr_t freed_bytes = 0; 3559 intptr_t freed_bytes = 0;
3560 int pages_swept = 0;
3496 intptr_t newspace_size = space->heap()->new_space()->Size(); 3561 intptr_t newspace_size = space->heap()->new_space()->Size();
3497 bool lazy_sweeping_active = false; 3562 bool lazy_sweeping_active = false;
3498 bool unused_page_present = false; 3563 bool unused_page_present = false;
3499 3564
3565 intptr_t old_space_size = heap()->PromotedSpaceSize();
3566 intptr_t space_left =
3567 Min(heap()->OldGenPromotionLimit(old_space_size),
3568 heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3569
3500 while (it.has_next()) { 3570 while (it.has_next()) {
3501 Page* p = it.next(); 3571 Page* p = it.next();
3502 3572
3503 // Clear sweeping flags indicating that marking bits are still intact. 3573 // Clear sweeping flags indicating that marking bits are still intact.
3504 p->ClearSweptPrecisely(); 3574 p->ClearSweptPrecisely();
3505 p->ClearSweptConservatively(); 3575 p->ClearSweptConservatively();
3506 3576
3507 if (p->IsEvacuationCandidate()) { 3577 if (p->IsEvacuationCandidate()) {
3508 ASSERT(evacuation_candidates_.length() > 0); 3578 ASSERT(evacuation_candidates_.length() > 0);
3509 continue; 3579 continue;
(...skipping 18 matching lines...) Expand all
3528 if (FLAG_gc_verbose) { 3598 if (FLAG_gc_verbose) {
3529 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", 3599 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3530 reinterpret_cast<intptr_t>(p)); 3600 reinterpret_cast<intptr_t>(p));
3531 } 3601 }
3532 space->ReleasePage(p); 3602 space->ReleasePage(p);
3533 continue; 3603 continue;
3534 } 3604 }
3535 unused_page_present = true; 3605 unused_page_present = true;
3536 } 3606 }
3537 3607
3538 if (FLAG_gc_verbose) {
3539 PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
3540 reinterpret_cast<intptr_t>(p),
3541 sweeper);
3542 }
3543
3544 switch (sweeper) { 3608 switch (sweeper) {
3545 case CONSERVATIVE: { 3609 case CONSERVATIVE: {
3610 if (FLAG_gc_verbose) {
3611 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3612 reinterpret_cast<intptr_t>(p));
3613 }
3546 SweepConservatively(space, p); 3614 SweepConservatively(space, p);
3615 pages_swept++;
3547 break; 3616 break;
3548 } 3617 }
3549 case LAZY_CONSERVATIVE: { 3618 case LAZY_CONSERVATIVE: {
3619 if (FLAG_gc_verbose) {
3620 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3621 reinterpret_cast<intptr_t>(p));
3622 }
3550 freed_bytes += SweepConservatively(space, p); 3623 freed_bytes += SweepConservatively(space, p);
3551 if (freed_bytes >= newspace_size && p != space->LastPage()) { 3624 pages_swept++;
3625 if (space_left + freed_bytes > newspace_size) {
3552 space->SetPagesToSweep(p->next_page()); 3626 space->SetPagesToSweep(p->next_page());
3553 lazy_sweeping_active = true; 3627 lazy_sweeping_active = true;
3628 } else {
3629 if (FLAG_gc_verbose) {
3630 PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3631 freed_bytes);
3632 }
3554 } 3633 }
3555 break; 3634 break;
3556 } 3635 }
3557 case PRECISE: { 3636 case PRECISE: {
3637 if (FLAG_gc_verbose) {
3638 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3639 reinterpret_cast<intptr_t>(p));
3640 }
3558 if (space->identity() == CODE_SPACE) { 3641 if (space->identity() == CODE_SPACE) {
3559 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); 3642 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3560 } else { 3643 } else {
3561 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); 3644 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3562 } 3645 }
3646 pages_swept++;
3563 break; 3647 break;
3564 } 3648 }
3565 default: { 3649 default: {
3566 UNREACHABLE(); 3650 UNREACHABLE();
3567 } 3651 }
3568 } 3652 }
3569 } 3653 }
3570 3654
3655 if (FLAG_gc_verbose) {
3656 PrintF("SweepSpace: %s (%d pages swept)\n",
3657 AllocationSpaceName(space->identity()),
3658 pages_swept);
3659 }
3660
3571 // Give pages that are queued to be freed back to the OS. 3661 // Give pages that are queued to be freed back to the OS.
3572 heap()->FreeQueuedChunks(); 3662 heap()->FreeQueuedChunks();
3573 } 3663 }
3574 3664
3575 3665
3576 void MarkCompactCollector::SweepSpaces() { 3666 void MarkCompactCollector::SweepSpaces() {
3577 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 3667 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3578 #ifdef DEBUG 3668 #ifdef DEBUG
3579 state_ = SWEEP_SPACES; 3669 state_ = SWEEP_SPACES;
3580 #endif 3670 #endif
3581 SweeperType how_to_sweep = 3671 SweeperType how_to_sweep =
3582 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; 3672 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3583 if (sweep_precisely_) how_to_sweep = PRECISE; 3673 if (sweep_precisely_) how_to_sweep = PRECISE;
3584 // Noncompacting collections simply sweep the spaces to clear the mark 3674 // Noncompacting collections simply sweep the spaces to clear the mark
3585 // bits and free the nonlive blocks (for old and map spaces). We sweep 3675 // bits and free the nonlive blocks (for old and map spaces). We sweep
3586 // the map space last because freeing non-live maps overwrites them and 3676 // the map space last because freeing non-live maps overwrites them and
3587 // the other spaces rely on possibly non-live maps to get the sizes for 3677 // the other spaces rely on possibly non-live maps to get the sizes for
3588 // non-live objects. 3678 // non-live objects.
3589 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 3679 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3590 SweepSpace(heap()->old_data_space(), how_to_sweep); 3680 SweepSpace(heap()->old_data_space(), how_to_sweep);
3591 3681
3592 RemoveDeadInvalidatedCode(); 3682 RemoveDeadInvalidatedCode();
3593 SweepSpace(heap()->code_space(), PRECISE); 3683 SweepSpace(heap()->code_space(), PRECISE);
3594 3684
3595 SweepSpace(heap()->cell_space(), PRECISE); 3685 SweepSpace(heap()->cell_space(), PRECISE);
3596 3686
3597 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 3687 EvacuateNewSpaceAndCandidates();
3598 EvacuateNewSpaceAndCandidates();
3599 }
3600 3688
3601 // ClearNonLiveTransitions depends on precise sweeping of map space to 3689 // ClearNonLiveTransitions depends on precise sweeping of map space to
3602 // detect whether unmarked map became dead in this collection or in one 3690 // detect whether unmarked map became dead in this collection or in one
3603 // of the previous ones. 3691 // of the previous ones.
3604 SweepSpace(heap()->map_space(), PRECISE); 3692 SweepSpace(heap()->map_space(), PRECISE);
3605 3693
3606 // Deallocate unmarked objects and clear marked bits for marked objects. 3694 // Deallocate unmarked objects and clear marked bits for marked objects.
3607 heap_->lo_space()->FreeUnmarkedObjects(); 3695 heap_->lo_space()->FreeUnmarkedObjects();
3608 } 3696 }
3609 3697
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
3775 while (buffer != NULL) { 3863 while (buffer != NULL) {
3776 SlotsBuffer* next_buffer = buffer->next(); 3864 SlotsBuffer* next_buffer = buffer->next();
3777 DeallocateBuffer(buffer); 3865 DeallocateBuffer(buffer);
3778 buffer = next_buffer; 3866 buffer = next_buffer;
3779 } 3867 }
3780 *buffer_address = NULL; 3868 *buffer_address = NULL;
3781 } 3869 }
3782 3870
3783 3871
3784 } } // namespace v8::internal 3872 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.cc ('k') | src/objects.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698