| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 tracer_ = NULL; | 288 tracer_ = NULL; |
| 289 } | 289 } |
| 290 | 290 |
| 291 | 291 |
| 292 #ifdef DEBUG | 292 #ifdef DEBUG |
| 293 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 293 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 294 PageIterator it(space); | 294 PageIterator it(space); |
| 295 | 295 |
| 296 while (it.has_next()) { | 296 while (it.has_next()) { |
| 297 Page* p = it.next(); | 297 Page* p = it.next(); |
| 298 ASSERT(p->markbits()->IsClean()); | 298 CHECK(p->markbits()->IsClean()); |
| 299 CHECK_EQ(0, p->LiveBytes()); |
| 299 } | 300 } |
| 300 } | 301 } |
| 301 | 302 |
| 302 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { | 303 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { |
| 303 NewSpacePageIterator it(space->bottom(), space->top()); | 304 NewSpacePageIterator it(space->bottom(), space->top()); |
| 304 | 305 |
| 305 while (it.has_next()) { | 306 while (it.has_next()) { |
| 306 NewSpacePage* p = it.next(); | 307 NewSpacePage* p = it.next(); |
| 307 ASSERT(p->markbits()->IsClean()); | 308 CHECK(p->markbits()->IsClean()); |
| 309 CHECK_EQ(0, p->LiveBytes()); |
| 308 } | 310 } |
| 309 } | 311 } |
| 310 | 312 |
| 311 void MarkCompactCollector::VerifyMarkbitsAreClean() { | 313 void MarkCompactCollector::VerifyMarkbitsAreClean() { |
| 312 VerifyMarkbitsAreClean(heap_->old_pointer_space()); | 314 VerifyMarkbitsAreClean(heap_->old_pointer_space()); |
| 313 VerifyMarkbitsAreClean(heap_->old_data_space()); | 315 VerifyMarkbitsAreClean(heap_->old_data_space()); |
| 314 VerifyMarkbitsAreClean(heap_->code_space()); | 316 VerifyMarkbitsAreClean(heap_->code_space()); |
| 315 VerifyMarkbitsAreClean(heap_->cell_space()); | 317 VerifyMarkbitsAreClean(heap_->cell_space()); |
| 316 VerifyMarkbitsAreClean(heap_->map_space()); | 318 VerifyMarkbitsAreClean(heap_->map_space()); |
| 317 VerifyMarkbitsAreClean(heap_->new_space()); | 319 VerifyMarkbitsAreClean(heap_->new_space()); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 395 } | 397 } |
| 396 MarkBit old_mark_bit = MarkBitFrom(old_start); | 398 MarkBit old_mark_bit = MarkBitFrom(old_start); |
| 397 if (!old_mark_bit.Get()) { | 399 if (!old_mark_bit.Get()) { |
| 398 return false; | 400 return false; |
| 399 } | 401 } |
| 400 new_mark_bit.Set(); | 402 new_mark_bit.Set(); |
| 401 return true; | 403 return true; |
| 402 } | 404 } |
| 403 | 405 |
| 404 | 406 |
| 405 static const char* AllocationSpaceName(AllocationSpace space) { | 407 const char* AllocationSpaceName(AllocationSpace space) { |
| 406 switch (space) { | 408 switch (space) { |
| 407 case NEW_SPACE: return "NEW_SPACE"; | 409 case NEW_SPACE: return "NEW_SPACE"; |
| 408 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; | 410 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; |
| 409 case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; | 411 case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; |
| 410 case CODE_SPACE: return "CODE_SPACE"; | 412 case CODE_SPACE: return "CODE_SPACE"; |
| 411 case MAP_SPACE: return "MAP_SPACE"; | 413 case MAP_SPACE: return "MAP_SPACE"; |
| 412 case CELL_SPACE: return "CELL_SPACE"; | 414 case CELL_SPACE: return "CELL_SPACE"; |
| 413 case LO_SPACE: return "LO_SPACE"; | 415 case LO_SPACE: return "LO_SPACE"; |
| 414 default: | 416 default: |
| 415 UNREACHABLE(); | 417 UNREACHABLE(); |
| (...skipping 2105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2521 // migrate live objects and write forwarding addresses. This stage puts | 2523 // migrate live objects and write forwarding addresses. This stage puts |
| 2522 // new entries in the store buffer and may cause some pages to be marked | 2524 // new entries in the store buffer and may cause some pages to be marked |
| 2523 // scan-on-scavenge. | 2525 // scan-on-scavenge. |
| 2524 SemiSpaceIterator from_it(from_bottom, from_top); | 2526 SemiSpaceIterator from_it(from_bottom, from_top); |
| 2525 for (HeapObject* object = from_it.Next(); | 2527 for (HeapObject* object = from_it.Next(); |
| 2526 object != NULL; | 2528 object != NULL; |
| 2527 object = from_it.Next()) { | 2529 object = from_it.Next()) { |
| 2528 MarkBit mark_bit = Marking::MarkBitFrom(object); | 2530 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 2529 if (mark_bit.Get()) { | 2531 if (mark_bit.Get()) { |
| 2530 mark_bit.Clear(); | 2532 mark_bit.Clear(); |
| 2533 // Don't bother decrementing live bytes count. We'll discard the |
| 2534 // entire page at the end. |
| 2531 int size = object->Size(); | 2535 int size = object->Size(); |
| 2532 survivors_size += size; | 2536 survivors_size += size; |
| 2533 MemoryChunk::IncrementLiveBytes(object->address(), -size); | |
| 2534 | 2537 |
| 2535 // Aggressively promote young survivors to the old space. | 2538 // Aggressively promote young survivors to the old space. |
| 2536 if (TryPromoteObject(object, size)) { | 2539 if (TryPromoteObject(object, size)) { |
| 2537 continue; | 2540 continue; |
| 2538 } | 2541 } |
| 2539 | 2542 |
| 2540 // Promotion failed. Just migrate object to another semispace. | 2543 // Promotion failed. Just migrate object to another semispace. |
| 2541 MaybeObject* allocation = new_space->AllocateRaw(size); | 2544 MaybeObject* allocation = new_space->AllocateRaw(size); |
| 2542 if (allocation->IsFailure()) { | 2545 if (allocation->IsFailure()) { |
| 2543 if (!new_space->AddFreshPage()) { | 2546 if (!new_space->AddFreshPage()) { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2614 MigrateObject(HeapObject::cast(target_object)->address(), | 2617 MigrateObject(HeapObject::cast(target_object)->address(), |
| 2615 object_addr, | 2618 object_addr, |
| 2616 size, | 2619 size, |
| 2617 space->identity()); | 2620 space->identity()); |
| 2618 ASSERT(object->map_word().IsForwardingAddress()); | 2621 ASSERT(object->map_word().IsForwardingAddress()); |
| 2619 } | 2622 } |
| 2620 | 2623 |
| 2621 // Clear marking bits for current cell. | 2624 // Clear marking bits for current cell. |
| 2622 cells[cell_index] = 0; | 2625 cells[cell_index] = 0; |
| 2623 } | 2626 } |
| 2627 p->ResetLiveBytes(); |
| 2624 } | 2628 } |
| 2625 | 2629 |
| 2626 | 2630 |
| 2627 void MarkCompactCollector::EvacuatePages() { | 2631 void MarkCompactCollector::EvacuatePages() { |
| 2628 int npages = evacuation_candidates_.length(); | 2632 int npages = evacuation_candidates_.length(); |
| 2629 for (int i = 0; i < npages; i++) { | 2633 for (int i = 0; i < npages; i++) { |
| 2630 Page* p = evacuation_candidates_[i]; | 2634 Page* p = evacuation_candidates_[i]; |
| 2631 ASSERT(p->IsEvacuationCandidate() || | 2635 ASSERT(p->IsEvacuationCandidate() || |
| 2632 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 2636 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2633 if (p->IsEvacuationCandidate()) { | 2637 if (p->IsEvacuationCandidate()) { |
| (...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2812 } | 2816 } |
| 2813 } | 2817 } |
| 2814 free_start = free_end + size; | 2818 free_start = free_end + size; |
| 2815 } | 2819 } |
| 2816 // Clear marking bits for current cell. | 2820 // Clear marking bits for current cell. |
| 2817 cells[cell_index] = 0; | 2821 cells[cell_index] = 0; |
| 2818 } | 2822 } |
| 2819 if (free_start != p->ObjectAreaEnd()) { | 2823 if (free_start != p->ObjectAreaEnd()) { |
| 2820 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); | 2824 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
| 2821 } | 2825 } |
| 2826 p->ResetLiveBytes(); |
| 2822 } | 2827 } |
| 2823 | 2828 |
| 2824 | 2829 |
| 2825 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 2830 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 2826 EvacuateNewSpace(); | 2831 EvacuateNewSpace(); |
| 2827 EvacuatePages(); | 2832 EvacuatePages(); |
| 2828 | 2833 |
| 2829 // Second pass: find pointers to new space and update them. | 2834 // Second pass: find pointers to new space and update them. |
| 2830 PointersUpdatingVisitor updating_visitor(heap()); | 2835 PointersUpdatingVisitor updating_visitor(heap()); |
| 2831 | 2836 |
| (...skipping 469 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3301 // Skip over all the dead objects at the start of the page and mark them free. | 3306 // Skip over all the dead objects at the start of the page and mark them free. |
| 3302 for (cell_index = Page::kFirstUsedCell; | 3307 for (cell_index = Page::kFirstUsedCell; |
| 3303 cell_index < last_cell_index; | 3308 cell_index < last_cell_index; |
| 3304 cell_index++, block_address += 32 * kPointerSize) { | 3309 cell_index++, block_address += 32 * kPointerSize) { |
| 3305 if (cells[cell_index] != 0) break; | 3310 if (cells[cell_index] != 0) break; |
| 3306 } | 3311 } |
| 3307 size_t size = block_address - p->ObjectAreaStart(); | 3312 size_t size = block_address - p->ObjectAreaStart(); |
| 3308 if (cell_index == last_cell_index) { | 3313 if (cell_index == last_cell_index) { |
| 3309 freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), | 3314 freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), |
| 3310 static_cast<int>(size))); | 3315 static_cast<int>(size))); |
| 3316 ASSERT_EQ(0, p->LiveBytes()); |
| 3311 return freed_bytes; | 3317 return freed_bytes; |
| 3312 } | 3318 } |
| 3313 // Grow the size of the start-of-page free space a little to get up to the | 3319 // Grow the size of the start-of-page free space a little to get up to the |
| 3314 // first live object. | 3320 // first live object. |
| 3315 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); | 3321 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 3316 // Free the first free space. | 3322 // Free the first free space. |
| 3317 size = free_end - p->ObjectAreaStart(); | 3323 size = free_end - p->ObjectAreaStart(); |
| 3318 freed_bytes += space->Free(p->ObjectAreaStart(), | 3324 freed_bytes += space->Free(p->ObjectAreaStart(), |
| 3319 static_cast<int>(size)); | 3325 static_cast<int>(size)); |
| 3320 // The start of the current free area is represented in undigested form by | 3326 // The start of the current free area is represented in undigested form by |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3357 } | 3363 } |
| 3358 } | 3364 } |
| 3359 | 3365 |
| 3360 // Handle the free space at the end of the page. | 3366 // Handle the free space at the end of the page. |
| 3361 if (block_address - free_start > 32 * kPointerSize) { | 3367 if (block_address - free_start > 32 * kPointerSize) { |
| 3362 free_start = DigestFreeStart(free_start, free_start_cell); | 3368 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3363 freed_bytes += space->Free(free_start, | 3369 freed_bytes += space->Free(free_start, |
| 3364 static_cast<int>(block_address - free_start)); | 3370 static_cast<int>(block_address - free_start)); |
| 3365 } | 3371 } |
| 3366 | 3372 |
| 3373 p->ResetLiveBytes(); |
| 3367 return freed_bytes; | 3374 return freed_bytes; |
| 3368 } | 3375 } |
| 3369 | 3376 |
| 3370 | 3377 |
| 3371 void MarkCompactCollector::SweepSpace(PagedSpace* space, | 3378 void MarkCompactCollector::SweepSpace(PagedSpace* space, |
| 3372 SweeperType sweeper) { | 3379 SweeperType sweeper) { |
| 3373 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || | 3380 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3374 sweeper == LAZY_CONSERVATIVE); | 3381 sweeper == LAZY_CONSERVATIVE); |
| 3375 | 3382 |
| 3376 space->ClearStats(); | 3383 space->ClearStats(); |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3617 while (buffer != NULL) { | 3624 while (buffer != NULL) { |
| 3618 SlotsBuffer* next_buffer = buffer->next(); | 3625 SlotsBuffer* next_buffer = buffer->next(); |
| 3619 DeallocateBuffer(buffer); | 3626 DeallocateBuffer(buffer); |
| 3620 buffer = next_buffer; | 3627 buffer = next_buffer; |
| 3621 } | 3628 } |
| 3622 *buffer_address = NULL; | 3629 *buffer_address = NULL; |
| 3623 } | 3630 } |
| 3624 | 3631 |
| 3625 | 3632 |
| 3626 } } // namespace v8::internal | 3633 } } // namespace v8::internal |
| OLD | NEW |