Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 193 while (current < limit) { | 193 while (current < limit) { |
| 194 HeapObject* object = HeapObject::FromAddress(current); | 194 HeapObject* object = HeapObject::FromAddress(current); |
| 195 object->Iterate(&visitor); | 195 object->Iterate(&visitor); |
| 196 current += object->Size(); | 196 current += object->Size(); |
| 197 } | 197 } |
| 198 } | 198 } |
| 199 } | 199 } |
| 200 | 200 |
| 201 | 201 |
| 202 static void VerifyEvacuation(Heap* heap, PagedSpace* space) { | 202 static void VerifyEvacuation(Heap* heap, PagedSpace* space) { |
| 203 if (!space->swept_precisely()) return; | |
| 204 if (FLAG_use_allocation_folding && | 203 if (FLAG_use_allocation_folding && |
| 205 (space == heap->old_pointer_space() || space == heap->old_data_space())) { | 204 (space == heap->old_pointer_space() || space == heap->old_data_space())) { |
| 206 return; | 205 return; |
| 207 } | 206 } |
| 208 PageIterator it(space); | 207 PageIterator it(space); |
| 209 | 208 |
| 210 while (it.has_next()) { | 209 while (it.has_next()) { |
| 211 Page* p = it.next(); | 210 Page* p = it.next(); |
| 212 if (p->IsEvacuationCandidate()) continue; | 211 if (p->IsEvacuationCandidate()) continue; |
| 213 VerifyEvacuation(p); | 212 VerifyEvacuation(p); |
| (...skipping 2905 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3119 | 3118 |
| 3120 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3119 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3121 new_space->set_age_mark(new_space->top()); | 3120 new_space->set_age_mark(new_space->top()); |
| 3122 } | 3121 } |
| 3123 | 3122 |
| 3124 | 3123 |
| 3125 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { | 3124 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
| 3126 AlwaysAllocateScope always_allocate(isolate()); | 3125 AlwaysAllocateScope always_allocate(isolate()); |
| 3127 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3126 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3128 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | 3127 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3129 p->MarkSweptPrecisely(); | 3128 p->SetWasSwept(); |
| 3130 | 3129 |
| 3131 int offsets[16]; | 3130 int offsets[16]; |
| 3132 | 3131 |
| 3133 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 3132 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 3134 Address cell_base = it.CurrentCellBase(); | 3133 Address cell_base = it.CurrentCellBase(); |
| 3135 MarkBit::CellType* cell = it.CurrentCell(); | 3134 MarkBit::CellType* cell = it.CurrentCell(); |
| 3136 | 3135 |
| 3137 if (*cell == 0) continue; | 3136 if (*cell == 0) continue; |
| 3138 | 3137 |
| 3139 int live_objects = MarkWordToObjectStarts(*cell, offsets); | 3138 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3294 // be iterated precisely, hitting only the live objects. Code space | 3293 // be iterated precisely, hitting only the live objects. Code space |
| 3295 // is always swept precisely because we want to be able to iterate | 3294 // is always swept precisely because we want to be able to iterate |
| 3296 // over it. Map space is swept precisely, because it is not compacted. | 3295 // over it. Map space is swept precisely, because it is not compacted. |
| 3297 // Slots in live objects pointing into evacuation candidates are updated | 3296 // Slots in live objects pointing into evacuation candidates are updated |
| 3298 // if requested. | 3297 // if requested. |
| 3299 // Returns the size of the biggest continuous freed memory chunk in bytes. | 3298 // Returns the size of the biggest continuous freed memory chunk in bytes. |
| 3300 template <SweepingMode sweeping_mode, | 3299 template <SweepingMode sweeping_mode, |
| 3301 MarkCompactCollector::SweepingParallelism parallelism, | 3300 MarkCompactCollector::SweepingParallelism parallelism, |
| 3302 SkipListRebuildingMode skip_list_mode, | 3301 SkipListRebuildingMode skip_list_mode, |
| 3303 FreeSpaceTreatmentMode free_space_mode> | 3302 FreeSpaceTreatmentMode free_space_mode> |
| 3304 static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, | 3303 static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, |
|
Jarin
2014/08/21 08:29:12
How about renaming to just "Sweep"?
Hannes Payer (out of office)
2014/08/21 09:03:17
Done.
| |
| 3305 ObjectVisitor* v) { | 3304 ObjectVisitor* v) { |
| 3306 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | 3305 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3307 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3306 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 3308 space->identity() == CODE_SPACE); | 3307 space->identity() == CODE_SPACE); |
| 3309 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3308 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3310 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || | 3309 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || |
| 3311 sweeping_mode == SWEEP_ONLY); | 3310 sweeping_mode == SWEEP_ONLY); |
| 3312 | 3311 |
| 3313 Address free_start = p->area_start(); | 3312 Address free_start = p->area_start(); |
| 3314 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3313 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3377 } | 3376 } |
| 3378 #endif | 3377 #endif |
| 3379 } | 3378 } |
| 3380 p->ResetLiveBytes(); | 3379 p->ResetLiveBytes(); |
| 3381 | 3380 |
| 3382 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { | 3381 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { |
| 3383 // When concurrent sweeping is active, the page will be marked after | 3382 // When concurrent sweeping is active, the page will be marked after |
| 3384 // sweeping by the main thread. | 3383 // sweeping by the main thread. |
| 3385 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | 3384 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); |
| 3386 } else { | 3385 } else { |
| 3387 p->MarkSweptPrecisely(); | 3386 p->SetWasSwept(); |
| 3388 } | 3387 } |
| 3389 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3388 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3390 } | 3389 } |
| 3391 | 3390 |
| 3392 | 3391 |
| 3393 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | 3392 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { |
| 3394 Page* p = Page::FromAddress(code->address()); | 3393 Page* p = Page::FromAddress(code->address()); |
| 3395 | 3394 |
| 3396 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | 3395 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3397 return false; | 3396 return false; |
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3614 } else { | 3613 } else { |
| 3615 if (FLAG_gc_verbose) { | 3614 if (FLAG_gc_verbose) { |
| 3616 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3615 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3617 reinterpret_cast<intptr_t>(p)); | 3616 reinterpret_cast<intptr_t>(p)); |
| 3618 } | 3617 } |
| 3619 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3618 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3620 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3619 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3621 | 3620 |
| 3622 switch (space->identity()) { | 3621 switch (space->identity()) { |
| 3623 case OLD_DATA_SPACE: | 3622 case OLD_DATA_SPACE: |
| 3624 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | 3623 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3624 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | |
| 3625 space, NULL, p, &updating_visitor); | |
| 3625 break; | 3626 break; |
| 3626 case OLD_POINTER_SPACE: | 3627 case OLD_POINTER_SPACE: |
| 3627 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3628 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3628 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | 3629 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( |
| 3629 space, NULL, p, &updating_visitor); | 3630 space, NULL, p, &updating_visitor); |
| 3630 break; | 3631 break; |
| 3631 case CODE_SPACE: | 3632 case CODE_SPACE: |
| 3632 if (FLAG_zap_code_space) { | 3633 if (FLAG_zap_code_space) { |
| 3633 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3634 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3634 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( | 3635 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( |
| (...skipping 477 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4112 for (int i = 0; i < objects_in_these_8_words; i++) { | 4113 for (int i = 0; i < objects_in_these_8_words; i++) { |
| 4113 starts[objects++] = offset + table[1 + i]; | 4114 starts[objects++] = offset + table[1 + i]; |
| 4114 } | 4115 } |
| 4115 } | 4116 } |
| 4116 offset += 8; | 4117 offset += 8; |
| 4117 } | 4118 } |
| 4118 return objects; | 4119 return objects; |
| 4119 } | 4120 } |
| 4120 | 4121 |
| 4121 | 4122 |
| 4122 static inline Address DigestFreeStart(Address approximate_free_start, | |
| 4123 uint32_t free_start_cell) { | |
| 4124 DCHECK(free_start_cell != 0); | |
| 4125 | |
| 4126 // No consecutive 1 bits. | |
| 4127 DCHECK((free_start_cell & (free_start_cell << 1)) == 0); | |
| 4128 | |
| 4129 int offsets[16]; | |
| 4130 uint32_t cell = free_start_cell; | |
| 4131 int offset_of_last_live; | |
| 4132 if ((cell & 0x80000000u) != 0) { | |
| 4133 // This case would overflow below. | |
| 4134 offset_of_last_live = 31; | |
| 4135 } else { | |
| 4136 // Remove all but one bit, the most significant. This is an optimization | |
| 4137 // that may or may not be worthwhile. | |
| 4138 cell |= cell >> 16; | |
| 4139 cell |= cell >> 8; | |
| 4140 cell |= cell >> 4; | |
| 4141 cell |= cell >> 2; | |
| 4142 cell |= cell >> 1; | |
| 4143 cell = (cell + 1) >> 1; | |
| 4144 int live_objects = MarkWordToObjectStarts(cell, offsets); | |
| 4145 DCHECK(live_objects == 1); | |
| 4146 offset_of_last_live = offsets[live_objects - 1]; | |
| 4147 } | |
| 4148 Address last_live_start = | |
| 4149 approximate_free_start + offset_of_last_live * kPointerSize; | |
| 4150 HeapObject* last_live = HeapObject::FromAddress(last_live_start); | |
| 4151 Address free_start = last_live_start + last_live->Size(); | |
| 4152 return free_start; | |
| 4153 } | |
| 4154 | |
| 4155 | |
| 4156 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { | |
| 4157 DCHECK(cell != 0); | |
| 4158 | |
| 4159 // No consecutive 1 bits. | |
| 4160 DCHECK((cell & (cell << 1)) == 0); | |
| 4161 | |
| 4162 int offsets[16]; | |
| 4163 if (cell == 0x80000000u) { // Avoid overflow below. | |
| 4164 return block_address + 31 * kPointerSize; | |
| 4165 } | |
| 4166 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; | |
| 4167 DCHECK((first_set_bit & cell) == first_set_bit); | |
| 4168 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); | |
| 4169 DCHECK(live_objects == 1); | |
| 4170 USE(live_objects); | |
| 4171 return block_address + offsets[0] * kPointerSize; | |
| 4172 } | |
| 4173 | |
| 4174 | |
| 4175 // Force instantiation of templatized SweepConservatively method for | |
| 4176 // SWEEP_ON_MAIN_THREAD mode. | |
| 4177 template int MarkCompactCollector::SweepConservatively< | |
| 4178 MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*); | |
| 4179 | |
| 4180 | |
| 4181 // Force instantiation of templatized SweepConservatively method for | |
| 4182 // SWEEP_IN_PARALLEL mode. | |
| 4183 template int MarkCompactCollector::SweepConservatively< | |
| 4184 MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*); | |
| 4185 | |
| 4186 | |
| 4187 // Sweeps a space conservatively. After this has been done the larger free | |
| 4188 // spaces have been put on the free list and the smaller ones have been | |
| 4189 // ignored and left untouched. A free space is always either ignored or put | |
| 4190 // on the free list, never split up into two parts. This is important | |
| 4191 // because it means that any FreeSpace maps left actually describe a region of | |
| 4192 // memory that can be ignored when scanning. Dead objects other than free | |
| 4193 // spaces will not contain the free space map. | |
| 4194 template <MarkCompactCollector::SweepingParallelism mode> | |
| 4195 int MarkCompactCollector::SweepConservatively(PagedSpace* space, | |
| 4196 FreeList* free_list, Page* p) { | |
| 4197 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
| 4198 DCHECK( | |
| 4199 (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) || | |
| 4200 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && | |
| 4201 free_list == NULL)); | |
| 4202 | |
| 4203 intptr_t freed_bytes = 0; | |
| 4204 intptr_t max_freed_bytes = 0; | |
| 4205 size_t size = 0; | |
| 4206 | |
| 4207 // Skip over all the dead objects at the start of the page and mark them free. | |
| 4208 Address cell_base = 0; | |
| 4209 MarkBit::CellType* cell = NULL; | |
| 4210 MarkBitCellIterator it(p); | |
| 4211 for (; !it.Done(); it.Advance()) { | |
| 4212 cell_base = it.CurrentCellBase(); | |
| 4213 cell = it.CurrentCell(); | |
| 4214 if (*cell != 0) break; | |
| 4215 } | |
| 4216 | |
| 4217 if (it.Done()) { | |
| 4218 size = p->area_end() - p->area_start(); | |
| 4219 freed_bytes = | |
| 4220 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); | |
| 4221 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4222 DCHECK_EQ(0, p->LiveBytes()); | |
| 4223 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
| 4224 // When concurrent sweeping is active, the page will be marked after | |
| 4225 // sweeping by the main thread. | |
| 4226 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
| 4227 } else { | |
| 4228 p->MarkSweptConservatively(); | |
| 4229 } | |
| 4230 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
| 4231 } | |
| 4232 | |
| 4233 // Grow the size of the start-of-page free space a little to get up to the | |
| 4234 // first live object. | |
| 4235 Address free_end = StartOfLiveObject(cell_base, *cell); | |
| 4236 // Free the first free space. | |
| 4237 size = free_end - p->area_start(); | |
| 4238 freed_bytes = | |
| 4239 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); | |
| 4240 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4241 | |
| 4242 // The start of the current free area is represented in undigested form by | |
| 4243 // the address of the last 32-word section that contained a live object and | |
| 4244 // the marking bitmap for that cell, which describes where the live object | |
| 4245 // started. Unless we find a large free space in the bitmap we will not | |
| 4246 // digest this pair into a real address. We start the iteration here at the | |
| 4247 // first word in the marking bit map that indicates a live object. | |
| 4248 Address free_start = cell_base; | |
| 4249 MarkBit::CellType free_start_cell = *cell; | |
| 4250 | |
| 4251 for (; !it.Done(); it.Advance()) { | |
| 4252 cell_base = it.CurrentCellBase(); | |
| 4253 cell = it.CurrentCell(); | |
| 4254 if (*cell != 0) { | |
| 4255 // We have a live object. Check approximately whether it is more than 32 | |
| 4256 // words since the last live object. | |
| 4257 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4258 free_start = DigestFreeStart(free_start, free_start_cell); | |
| 4259 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4260 // Now that we know the exact start of the free space it still looks | |
| 4261 // like we have a large enough free space to be worth bothering with. | |
| 4262 // so now we need to find the start of the first live object at the | |
| 4263 // end of the free space. | |
| 4264 free_end = StartOfLiveObject(cell_base, *cell); | |
| 4265 freed_bytes = Free<mode>(space, free_list, free_start, | |
| 4266 static_cast<int>(free_end - free_start)); | |
| 4267 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4268 } | |
| 4269 } | |
| 4270 // Update our undigested record of where the current free area started. | |
| 4271 free_start = cell_base; | |
| 4272 free_start_cell = *cell; | |
| 4273 // Clear marking bits for current cell. | |
| 4274 *cell = 0; | |
| 4275 } | |
| 4276 } | |
| 4277 | |
| 4278 // Handle the free space at the end of the page. | |
| 4279 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4280 free_start = DigestFreeStart(free_start, free_start_cell); | |
| 4281 freed_bytes = Free<mode>(space, free_list, free_start, | |
| 4282 static_cast<int>(p->area_end() - free_start)); | |
| 4283 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4284 } | |
| 4285 | |
| 4286 p->ResetLiveBytes(); | |
| 4287 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
| 4288 // When concurrent sweeping is active, the page will be marked after | |
| 4289 // sweeping by the main thread. | |
| 4290 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
| 4291 } else { | |
| 4292 p->MarkSweptConservatively(); | |
| 4293 } | |
| 4294 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
| 4295 } | |
| 4296 | |
| 4297 | |
| 4298 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 4123 int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| 4299 int required_freed_bytes) { | 4124 int required_freed_bytes) { |
| 4300 int max_freed = 0; | 4125 int max_freed = 0; |
| 4301 int max_freed_overall = 0; | 4126 int max_freed_overall = 0; |
| 4302 PageIterator it(space); | 4127 PageIterator it(space); |
| 4303 while (it.has_next()) { | 4128 while (it.has_next()) { |
| 4304 Page* p = it.next(); | 4129 Page* p = it.next(); |
| 4305 max_freed = SweepInParallel(p, space); | 4130 max_freed = SweepInParallel(p, space); |
| 4306 DCHECK(max_freed >= 0); | 4131 DCHECK(max_freed >= 0); |
| 4307 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 4132 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { |
| 4308 return max_freed; | 4133 return max_freed; |
| 4309 } | 4134 } |
| 4310 max_freed_overall = Max(max_freed, max_freed_overall); | 4135 max_freed_overall = Max(max_freed, max_freed_overall); |
| 4311 if (p == space->end_of_unswept_pages()) break; | 4136 if (p == space->end_of_unswept_pages()) break; |
| 4312 } | 4137 } |
| 4313 return max_freed_overall; | 4138 return max_freed_overall; |
| 4314 } | 4139 } |
| 4315 | 4140 |
| 4316 | 4141 |
| 4317 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 4142 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { |
| 4318 int max_freed = 0; | 4143 int max_freed = 0; |
| 4319 if (page->TryParallelSweeping()) { | 4144 if (page->TryParallelSweeping()) { |
| 4320 FreeList* free_list = space == heap()->old_pointer_space() | 4145 FreeList* free_list = space == heap()->old_pointer_space() |
| 4321 ? free_list_old_pointer_space_.get() | 4146 ? free_list_old_pointer_space_.get() |
| 4322 : free_list_old_data_space_.get(); | 4147 : free_list_old_data_space_.get(); |
| 4323 FreeList private_free_list(space); | 4148 FreeList private_free_list(space); |
| 4324 if (space->swept_precisely()) { | 4149 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 4325 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, | 4150 IGNORE_FREE_SPACE>(space, &private_free_list, |
| 4326 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | 4151 page, NULL); |
| 4327 space, &private_free_list, page, NULL); | |
| 4328 } else { | |
| 4329 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>( | |
| 4330 space, &private_free_list, page); | |
| 4331 } | |
| 4332 free_list->Concatenate(&private_free_list); | 4152 free_list->Concatenate(&private_free_list); |
| 4333 } | 4153 } |
| 4334 return max_freed; | 4154 return max_freed; |
| 4335 } | 4155 } |
| 4336 | 4156 |
| 4337 | 4157 |
| 4338 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | 4158 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 4339 space->set_swept_precisely(sweeper == PRECISE || | |
| 4340 sweeper == CONCURRENT_PRECISE || | |
| 4341 sweeper == PARALLEL_PRECISE); | |
| 4342 space->ClearStats(); | 4159 space->ClearStats(); |
| 4343 | 4160 |
| 4344 // We defensively initialize end_of_unswept_pages_ here with the first page | 4161 // We defensively initialize end_of_unswept_pages_ here with the first page |
| 4345 // of the pages list. | 4162 // of the pages list. |
| 4346 space->set_end_of_unswept_pages(space->FirstPage()); | 4163 space->set_end_of_unswept_pages(space->FirstPage()); |
| 4347 | 4164 |
| 4348 PageIterator it(space); | 4165 PageIterator it(space); |
| 4349 | 4166 |
| 4350 int pages_swept = 0; | 4167 int pages_swept = 0; |
| 4351 bool unused_page_present = false; | 4168 bool unused_page_present = false; |
| 4352 bool parallel_sweeping_active = false; | 4169 bool parallel_sweeping_active = false; |
| 4353 | 4170 |
| 4354 while (it.has_next()) { | 4171 while (it.has_next()) { |
| 4355 Page* p = it.next(); | 4172 Page* p = it.next(); |
| 4356 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | 4173 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); |
| 4357 | 4174 |
| 4358 // Clear sweeping flags indicating that marking bits are still intact. | 4175 // Clear sweeping flags indicating that marking bits are still intact. |
| 4359 p->ClearSweptPrecisely(); | 4176 p->ClearWasSwept(); |
| 4360 p->ClearSweptConservatively(); | |
| 4361 | 4177 |
| 4362 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || | 4178 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || |
| 4363 p->IsEvacuationCandidate()) { | 4179 p->IsEvacuationCandidate()) { |
| 4364 // Will be processed in EvacuateNewSpaceAndCandidates. | 4180 // Will be processed in EvacuateNewSpaceAndCandidates. |
| 4365 DCHECK(evacuation_candidates_.length() > 0); | 4181 DCHECK(evacuation_candidates_.length() > 0); |
| 4366 continue; | 4182 continue; |
| 4367 } | 4183 } |
| 4368 | 4184 |
| 4369 // One unused page is kept, all further are released before sweeping them. | 4185 // One unused page is kept, all further are released before sweeping them. |
| 4370 if (p->LiveBytes() == 0) { | 4186 if (p->LiveBytes() == 0) { |
| 4371 if (unused_page_present) { | 4187 if (unused_page_present) { |
| 4372 if (FLAG_gc_verbose) { | 4188 if (FLAG_gc_verbose) { |
| 4373 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | 4189 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", |
| 4374 reinterpret_cast<intptr_t>(p)); | 4190 reinterpret_cast<intptr_t>(p)); |
| 4375 } | 4191 } |
| 4376 // Adjust unswept free bytes because releasing a page expects said | 4192 // Adjust unswept free bytes because releasing a page expects said |
| 4377 // counter to be accurate for unswept pages. | 4193 // counter to be accurate for unswept pages. |
| 4378 space->IncreaseUnsweptFreeBytes(p); | 4194 space->IncreaseUnsweptFreeBytes(p); |
| 4379 space->ReleasePage(p); | 4195 space->ReleasePage(p); |
| 4380 continue; | 4196 continue; |
| 4381 } | 4197 } |
| 4382 unused_page_present = true; | 4198 unused_page_present = true; |
| 4383 } | 4199 } |
| 4384 | 4200 |
| 4385 switch (sweeper) { | 4201 switch (sweeper) { |
| 4386 case CONCURRENT_CONSERVATIVE: | |
| 4387 case PARALLEL_CONSERVATIVE: { | |
| 4388 if (!parallel_sweeping_active) { | |
| 4389 if (FLAG_gc_verbose) { | |
| 4390 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | |
| 4391 reinterpret_cast<intptr_t>(p)); | |
| 4392 } | |
| 4393 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | |
| 4394 pages_swept++; | |
| 4395 parallel_sweeping_active = true; | |
| 4396 } else { | |
| 4397 if (FLAG_gc_verbose) { | |
| 4398 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | |
| 4399 reinterpret_cast<intptr_t>(p)); | |
| 4400 } | |
| 4401 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | |
| 4402 space->IncreaseUnsweptFreeBytes(p); | |
| 4403 } | |
| 4404 space->set_end_of_unswept_pages(p); | |
| 4405 break; | |
| 4406 } | |
| 4407 case CONCURRENT_PRECISE: | 4202 case CONCURRENT_PRECISE: |
| 4408 case PARALLEL_PRECISE: | 4203 case PARALLEL_PRECISE: |
| 4409 if (!parallel_sweeping_active) { | 4204 if (!parallel_sweeping_active) { |
| 4410 if (FLAG_gc_verbose) { | 4205 if (FLAG_gc_verbose) { |
| 4411 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | 4206 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| 4412 reinterpret_cast<intptr_t>(p)); | 4207 reinterpret_cast<intptr_t>(p)); |
| 4413 } | 4208 } |
| 4414 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 4209 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, |
| 4415 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | 4210 IGNORE_FREE_SPACE>(space, NULL, p, NULL); |
| 4416 pages_swept++; | 4211 pages_swept++; |
| 4417 parallel_sweeping_active = true; | 4212 parallel_sweeping_active = true; |
| 4418 } else { | 4213 } else { |
| 4419 if (FLAG_gc_verbose) { | 4214 if (FLAG_gc_verbose) { |
| 4420 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | 4215 PrintF("Sweeping 0x%" V8PRIxPTR " precisely in parallel.\n", |
| 4421 reinterpret_cast<intptr_t>(p)); | 4216 reinterpret_cast<intptr_t>(p)); |
| 4422 } | 4217 } |
| 4423 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | 4218 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); |
| 4424 space->IncreaseUnsweptFreeBytes(p); | 4219 space->IncreaseUnsweptFreeBytes(p); |
| 4425 } | 4220 } |
| 4426 space->set_end_of_unswept_pages(p); | 4221 space->set_end_of_unswept_pages(p); |
| 4427 break; | 4222 break; |
| 4428 case PRECISE: { | 4223 case PRECISE: { |
| 4429 if (FLAG_gc_verbose) { | 4224 if (FLAG_gc_verbose) { |
| 4430 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | 4225 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 4451 PrintF("SweepSpace: %s (%d pages swept)\n", | 4246 PrintF("SweepSpace: %s (%d pages swept)\n", |
| 4452 AllocationSpaceName(space->identity()), pages_swept); | 4247 AllocationSpaceName(space->identity()), pages_swept); |
| 4453 } | 4248 } |
| 4454 | 4249 |
| 4455 // Give pages that are queued to be freed back to the OS. | 4250 // Give pages that are queued to be freed back to the OS. |
| 4456 heap()->FreeQueuedChunks(); | 4251 heap()->FreeQueuedChunks(); |
| 4457 } | 4252 } |
| 4458 | 4253 |
| 4459 | 4254 |
| 4460 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { | 4255 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { |
| 4461 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | 4256 return type == MarkCompactCollector::PARALLEL_PRECISE || |
| 4462 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE || | |
| 4463 type == MarkCompactCollector::PARALLEL_PRECISE || | |
| 4464 type == MarkCompactCollector::CONCURRENT_PRECISE; | 4257 type == MarkCompactCollector::CONCURRENT_PRECISE; |
| 4465 } | 4258 } |
| 4466 | 4259 |
| 4467 | 4260 |
| 4468 static bool ShouldWaitForSweeperThreads( | 4261 static bool ShouldWaitForSweeperThreads( |
| 4469 MarkCompactCollector::SweeperType type) { | 4262 MarkCompactCollector::SweeperType type) { |
| 4470 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | 4263 return type == MarkCompactCollector::PARALLEL_PRECISE; |
| 4471 type == MarkCompactCollector::PARALLEL_PRECISE; | |
| 4472 } | 4264 } |
| 4473 | 4265 |
| 4474 | 4266 |
| 4475 void MarkCompactCollector::SweepSpaces() { | 4267 void MarkCompactCollector::SweepSpaces() { |
| 4476 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 4268 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 4477 double start_time = 0.0; | 4269 double start_time = 0.0; |
| 4478 if (FLAG_print_cumulative_gc_stat) { | 4270 if (FLAG_print_cumulative_gc_stat) { |
| 4479 start_time = base::OS::TimeCurrentMillis(); | 4271 start_time = base::OS::TimeCurrentMillis(); |
| 4480 } | 4272 } |
| 4481 | 4273 |
| 4482 #ifdef DEBUG | 4274 #ifdef DEBUG |
| 4483 state_ = SWEEP_SPACES; | 4275 state_ = SWEEP_SPACES; |
| 4484 #endif | 4276 #endif |
| 4485 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; | 4277 SweeperType how_to_sweep = CONCURRENT_PRECISE; |
| 4486 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; | 4278 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_PRECISE; |
| 4487 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; | 4279 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_PRECISE; |
| 4488 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) { | |
| 4489 how_to_sweep = PARALLEL_PRECISE; | |
| 4490 } | |
| 4491 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) { | |
| 4492 how_to_sweep = CONCURRENT_PRECISE; | |
| 4493 } | |
| 4494 if (sweep_precisely_) how_to_sweep = PRECISE; | 4280 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 4495 | 4281 |
| 4496 MoveEvacuationCandidatesToEndOfPagesList(); | 4282 MoveEvacuationCandidatesToEndOfPagesList(); |
| 4497 | 4283 |
| 4498 // Noncompacting collections simply sweep the spaces to clear the mark | 4284 // Noncompacting collections simply sweep the spaces to clear the mark |
| 4499 // bits and free the nonlive blocks (for old and map spaces). We sweep | 4285 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 4500 // the map space last because freeing non-live maps overwrites them and | 4286 // the map space last because freeing non-live maps overwrites them and |
| 4501 // the other spaces rely on possibly non-live maps to get the sizes for | 4287 // the other spaces rely on possibly non-live maps to get the sizes for |
| 4502 // non-live objects. | 4288 // non-live objects. |
| 4503 { | 4289 { |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4555 } | 4341 } |
| 4556 } | 4342 } |
| 4557 | 4343 |
| 4558 | 4344 |
| 4559 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { | 4345 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { |
| 4560 PageIterator it(space); | 4346 PageIterator it(space); |
| 4561 while (it.has_next()) { | 4347 while (it.has_next()) { |
| 4562 Page* p = it.next(); | 4348 Page* p = it.next(); |
| 4563 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { | 4349 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { |
| 4564 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); | 4350 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); |
| 4565 if (space->swept_precisely()) { | 4351 p->SetWasSwept(); |
| 4566 p->MarkSweptPrecisely(); | |
| 4567 } else { | |
| 4568 p->MarkSweptConservatively(); | |
| 4569 } | |
| 4570 } | 4352 } |
| 4571 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | 4353 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); |
| 4572 } | 4354 } |
| 4573 } | 4355 } |
| 4574 | 4356 |
| 4575 | 4357 |
| 4576 void MarkCompactCollector::ParallelSweepSpacesComplete() { | 4358 void MarkCompactCollector::ParallelSweepSpacesComplete() { |
| 4577 ParallelSweepSpaceComplete(heap()->old_pointer_space()); | 4359 ParallelSweepSpaceComplete(heap()->old_pointer_space()); |
| 4578 ParallelSweepSpaceComplete(heap()->old_data_space()); | 4360 ParallelSweepSpaceComplete(heap()->old_data_space()); |
| 4579 } | 4361 } |
| (...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4779 SlotsBuffer* buffer = *buffer_address; | 4561 SlotsBuffer* buffer = *buffer_address; |
| 4780 while (buffer != NULL) { | 4562 while (buffer != NULL) { |
| 4781 SlotsBuffer* next_buffer = buffer->next(); | 4563 SlotsBuffer* next_buffer = buffer->next(); |
| 4782 DeallocateBuffer(buffer); | 4564 DeallocateBuffer(buffer); |
| 4783 buffer = next_buffer; | 4565 buffer = next_buffer; |
| 4784 } | 4566 } |
| 4785 *buffer_address = NULL; | 4567 *buffer_address = NULL; |
| 4786 } | 4568 } |
| 4787 } | 4569 } |
| 4788 } // namespace v8::internal | 4570 } // namespace v8::internal |
| OLD | NEW |