Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2592 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2603 MapWord map_word = HeapObject::cast(obj)->map_word(); | 2603 MapWord map_word = HeapObject::cast(obj)->map_word(); |
| 2604 if (map_word.IsForwardingAddress()) { | 2604 if (map_word.IsForwardingAddress()) { |
| 2605 *slot = map_word.ToForwardingAddress(); | 2605 *slot = map_word.ToForwardingAddress(); |
| 2606 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); | 2606 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
| 2607 } | 2607 } |
| 2608 } | 2608 } |
| 2609 } | 2609 } |
| 2610 } | 2610 } |
| 2611 | 2611 |
| 2612 | 2612 |
| 2613 static void UpdateSlotsOnPage(Page* p, ObjectVisitor* visitor) { | 2613 // Sweep a space precisely. After this has been done the space can |
| 2614 // TODO(gc) this is basically clone of SweepPrecisely | 2614 // be iterated precisely, hitting only the live objects. Code space |
| 2615 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 2615 // is always swept precisely because we want to be able to iterate |
|
Vyacheslav Egorov (Chromium)
2011/08/22 17:00:37
I prefer enums to booleans they make callsites cle
Michael Starzinger
2011/08/23 07:48:48
Done.
| |
| 2616 // over it. Map space is swept precisely, because it is not compacted. | |
| 2617 // Slots in live objects pointing into evacuation candidates are updated | |
| 2618 // if requested. | |
| 2619 static void SweepPrecisely(PagedSpace* space, Page* p, bool update_slots) { | |
| 2620 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
| 2616 MarkBit::CellType* cells = p->markbits()->cells(); | 2621 MarkBit::CellType* cells = p->markbits()->cells(); |
| 2617 | 2622 |
| 2618 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); | 2623 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); |
| 2619 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
| 2620 p->MarkSwept(); | 2624 p->MarkSwept(); |
| 2621 | 2625 |
| 2622 int last_cell_index = | 2626 int last_cell_index = |
| 2623 Bitmap::IndexToCell( | 2627 Bitmap::IndexToCell( |
| 2624 Bitmap::CellAlignIndex( | 2628 Bitmap::CellAlignIndex( |
| 2625 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); | 2629 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 2626 | 2630 |
| 2627 int cell_index = Page::kFirstUsedCell; | 2631 int cell_index = Page::kFirstUsedCell; |
| 2628 Address free_start = p->ObjectAreaStart(); | 2632 Address free_start = p->ObjectAreaStart(); |
| 2629 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 2633 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 2640 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); | 2644 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 2641 int live_index = 0; | 2645 int live_index = 0; |
| 2642 for ( ; live_objects != 0; live_objects--) { | 2646 for ( ; live_objects != 0; live_objects--) { |
| 2643 Address free_end = object_address + offsets[live_index++] * kPointerSize; | 2647 Address free_end = object_address + offsets[live_index++] * kPointerSize; |
| 2644 if (free_end != free_start) { | 2648 if (free_end != free_start) { |
| 2645 space->Free(free_start, static_cast<int>(free_end - free_start)); | 2649 space->Free(free_start, static_cast<int>(free_end - free_start)); |
| 2646 } | 2650 } |
| 2647 HeapObject* live_object = HeapObject::FromAddress(free_end); | 2651 HeapObject* live_object = HeapObject::FromAddress(free_end); |
| 2648 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); | 2652 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
| 2649 int size = live_object->Size(); | 2653 int size = live_object->Size(); |
| 2650 UpdateSlotsInRange(HeapObject::RawField(live_object, kPointerSize), | 2654 if (update_slots) { |
| 2651 HeapObject::RawField(live_object, size)); | 2655 UpdateSlotsInRange(HeapObject::RawField(live_object, kPointerSize), |
| 2656 HeapObject::RawField(live_object, size)); | |
| 2657 } | |
| 2652 free_start = free_end + size; | 2658 free_start = free_end + size; |
| 2653 } | 2659 } |
| 2654 } | 2660 } |
| 2655 if (free_start != p->ObjectAreaEnd()) { | 2661 if (free_start != p->ObjectAreaEnd()) { |
| 2656 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); | 2662 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
| 2657 } | 2663 } |
| 2658 } | 2664 } |
| 2659 | 2665 |
| 2660 | 2666 |
| 2661 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 2667 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2701 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 2707 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2702 | 2708 |
| 2703 if (p->IsEvacuationCandidate()) { | 2709 if (p->IsEvacuationCandidate()) { |
| 2704 SlotsBuffer::UpdateSlotsRecordedIn(p->slots_buffer()); | 2710 SlotsBuffer::UpdateSlotsRecordedIn(p->slots_buffer()); |
| 2705 if (FLAG_trace_fragmentation) { | 2711 if (FLAG_trace_fragmentation) { |
| 2706 PrintF(" page %p slots buffer: %d\n", | 2712 PrintF(" page %p slots buffer: %d\n", |
| 2707 reinterpret_cast<void*>(p), | 2713 reinterpret_cast<void*>(p), |
| 2708 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 2714 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 2709 } | 2715 } |
| 2710 } else { | 2716 } else { |
| 2711 UpdateSlotsOnPage(p, &updating_visitor); | 2717 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 2718 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
| 2719 SweepPrecisely(space, p, true); | |
| 2712 } | 2720 } |
| 2713 } | 2721 } |
| 2714 | 2722 |
| 2715 // Update pointers from cells. | 2723 // Update pointers from cells. |
| 2716 HeapObjectIterator cell_iterator(heap_->cell_space()); | 2724 HeapObjectIterator cell_iterator(heap_->cell_space()); |
| 2717 for (HeapObject* cell = cell_iterator.Next(); | 2725 for (HeapObject* cell = cell_iterator.Next(); |
| 2718 cell != NULL; | 2726 cell != NULL; |
| 2719 cell = cell_iterator.Next()) { | 2727 cell = cell_iterator.Next()) { |
| 2720 if (cell->IsJSGlobalPropertyCell()) { | 2728 if (cell->IsJSGlobalPropertyCell()) { |
| 2721 Address value_address = | 2729 Address value_address = |
| (...skipping 445 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3167 if (block_address - free_start > 32 * kPointerSize) { | 3175 if (block_address - free_start > 32 * kPointerSize) { |
| 3168 free_start = DigestFreeStart(free_start, free_start_cell); | 3176 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3169 freed_bytes += space->Free(free_start, | 3177 freed_bytes += space->Free(free_start, |
| 3170 static_cast<int>(block_address - free_start)); | 3178 static_cast<int>(block_address - free_start)); |
| 3171 } | 3179 } |
| 3172 | 3180 |
| 3173 return freed_bytes; | 3181 return freed_bytes; |
| 3174 } | 3182 } |
| 3175 | 3183 |
| 3176 | 3184 |
| 3177 // Sweep a space precisely. After this has been done the space can | |
| 3178 // be iterated precisely, hitting only the live objects. Code space | |
| 3179 // is always swept precisely because we want to be able to iterate | |
| 3180 // over it. Map space is swept precisely, because it is not compacted. | |
| 3181 static void SweepPrecisely(PagedSpace* space, | |
| 3182 Page* p) { | |
| 3183 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
| 3184 MarkBit::CellType* cells = p->markbits()->cells(); | |
| 3185 | |
| 3186 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); | |
| 3187 | |
| 3188 int last_cell_index = | |
| 3189 Bitmap::IndexToCell( | |
| 3190 Bitmap::CellAlignIndex( | |
| 3191 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); | |
| 3192 | |
| 3193 int cell_index = Page::kFirstUsedCell; | |
| 3194 Address free_start = p->ObjectAreaStart(); | |
| 3195 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | |
| 3196 Address object_address = p->ObjectAreaStart(); | |
| 3197 int offsets[16]; | |
| 3198 | |
| 3199 for (cell_index = Page::kFirstUsedCell; | |
| 3200 cell_index < last_cell_index; | |
| 3201 cell_index++, object_address += 32 * kPointerSize) { | |
| 3202 ASSERT((unsigned)cell_index == | |
| 3203 Bitmap::IndexToCell( | |
| 3204 Bitmap::CellAlignIndex( | |
| 3205 p->AddressToMarkbitIndex(object_address)))); | |
| 3206 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); | |
| 3207 int live_index = 0; | |
| 3208 for ( ; live_objects != 0; live_objects--) { | |
| 3209 Address free_end = object_address + offsets[live_index++] * kPointerSize; | |
| 3210 if (free_end != free_start) { | |
| 3211 space->Free(free_start, static_cast<int>(free_end - free_start)); | |
| 3212 } | |
| 3213 HeapObject* live_object = HeapObject::FromAddress(free_end); | |
| 3214 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); | |
| 3215 free_start = free_end + live_object->Size(); | |
| 3216 } | |
| 3217 } | |
| 3218 if (free_start != p->ObjectAreaEnd()) { | |
| 3219 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); | |
| 3220 } | |
| 3221 } | |
| 3222 | |
| 3223 | |
| 3224 void MarkCompactCollector::SweepSpace(PagedSpace* space, | 3185 void MarkCompactCollector::SweepSpace(PagedSpace* space, |
| 3225 SweeperType sweeper) { | 3186 SweeperType sweeper) { |
| 3226 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || | 3187 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3227 sweeper == LAZY_CONSERVATIVE); | 3188 sweeper == LAZY_CONSERVATIVE); |
| 3228 | 3189 |
| 3229 space->ClearStats(); | 3190 space->ClearStats(); |
| 3230 | 3191 |
| 3231 PageIterator it(space); | 3192 PageIterator it(space); |
| 3232 | 3193 |
| 3233 intptr_t freed_bytes = 0; | 3194 intptr_t freed_bytes = 0; |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 3255 Page* next_page = p->next_page(); | 3216 Page* next_page = p->next_page(); |
| 3256 freed_bytes += SweepConservatively(space, p); | 3217 freed_bytes += SweepConservatively(space, p); |
| 3257 // TODO(gc): tweak the heuristic. | 3218 // TODO(gc): tweak the heuristic. |
| 3258 if (freed_bytes >= newspace_size && p != space->LastPage()) { | 3219 if (freed_bytes >= newspace_size && p != space->LastPage()) { |
| 3259 space->SetPagesToSweep(next_page, space->LastPage()); | 3220 space->SetPagesToSweep(next_page, space->LastPage()); |
| 3260 return; | 3221 return; |
| 3261 } | 3222 } |
| 3262 break; | 3223 break; |
| 3263 } | 3224 } |
| 3264 case PRECISE: { | 3225 case PRECISE: { |
| 3265 SweepPrecisely(space, p); | 3226 SweepPrecisely(space, p, false); |
| 3266 break; | 3227 break; |
| 3267 } | 3228 } |
| 3268 default: { | 3229 default: { |
| 3269 UNREACHABLE(); | 3230 UNREACHABLE(); |
| 3270 } | 3231 } |
| 3271 } | 3232 } |
| 3272 } | 3233 } |
| 3273 | 3234 |
| 3274 // TODO(gc): set up allocation top and limit using the free list. | 3235 // TODO(gc): set up allocation top and limit using the free list. |
| 3275 } | 3236 } |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3427 while (buffer != NULL) { | 3388 while (buffer != NULL) { |
| 3428 SlotsBuffer* next_buffer = buffer->next(); | 3389 SlotsBuffer* next_buffer = buffer->next(); |
| 3429 DeallocateBuffer(buffer); | 3390 DeallocateBuffer(buffer); |
| 3430 buffer = next_buffer; | 3391 buffer = next_buffer; |
| 3431 } | 3392 } |
| 3432 *buffer_address = NULL; | 3393 *buffer_address = NULL; |
| 3433 } | 3394 } |
| 3434 | 3395 |
| 3435 | 3396 |
| 3436 } } // namespace v8::internal | 3397 } } // namespace v8::internal |
| OLD | NEW |