Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(126)

Side by Side Diff: src/mark-compact.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 const char* Marking::kImpossibleBitPattern = "01"; 50 const char* Marking::kImpossibleBitPattern = "01";
51 51
52 52
53 // ------------------------------------------------------------------------- 53 // -------------------------------------------------------------------------
54 // MarkCompactCollector 54 // MarkCompactCollector
55 55
56 MarkCompactCollector::MarkCompactCollector() : // NOLINT 56 MarkCompactCollector::MarkCompactCollector() : // NOLINT
57 #ifdef DEBUG 57 #ifdef DEBUG
58 state_(IDLE), 58 state_(IDLE),
59 #endif 59 #endif
60 sweep_precisely_(false),
61 compacting_(false),
60 tracer_(NULL), 62 tracer_(NULL),
61 #ifdef DEBUG 63 #ifdef DEBUG
62 live_young_objects_size_(0), 64 live_young_objects_size_(0),
63 live_old_pointer_objects_size_(0), 65 live_old_pointer_objects_size_(0),
64 live_old_data_objects_size_(0), 66 live_old_data_objects_size_(0),
65 live_code_objects_size_(0), 67 live_code_objects_size_(0),
66 live_map_objects_size_(0), 68 live_map_objects_size_(0),
67 live_cell_objects_size_(0), 69 live_cell_objects_size_(0),
68 live_lo_objects_size_(0), 70 live_lo_objects_size_(0),
69 live_bytes_(0), 71 live_bytes_(0),
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
147 } 149 }
148 150
149 151
150 class VerifyEvacuationVisitor: public ObjectVisitor { 152 class VerifyEvacuationVisitor: public ObjectVisitor {
151 public: 153 public:
152 void VisitPointers(Object** start, Object** end) { 154 void VisitPointers(Object** start, Object** end) {
153 for (Object** current = start; current < end; current++) { 155 for (Object** current = start; current < end; current++) {
154 if ((*current)->IsHeapObject()) { 156 if ((*current)->IsHeapObject()) {
155 HeapObject* object = HeapObject::cast(*current); 157 HeapObject* object = HeapObject::cast(*current);
156 if (MarkCompactCollector::IsOnEvacuationCandidate(object)) { 158 if (MarkCompactCollector::IsOnEvacuationCandidate(object)) {
157 HEAP->TracePathToObject(source_);
158 CHECK(false); 159 CHECK(false);
159 } 160 }
160 } 161 }
161 } 162 }
162 } 163 }
163 164
164 HeapObject* source_; 165 HeapObject* source_;
165 }; 166 };
166 167
167 168
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
218 } 219 }
219 #endif 220 #endif
220 221
221 222
222 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { 223 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
223 p->MarkEvacuationCandidate(); 224 p->MarkEvacuationCandidate();
224 evacuation_candidates_.Add(p); 225 evacuation_candidates_.Add(p);
225 } 226 }
226 227
227 228
229 bool MarkCompactCollector::StartCompaction() {
230 // Don't start compaction if we are in the middle of incremental
231 // marking cycle. We did not collect any slots.
232 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
233 slots_buffer_.Clear();
234 evacuation_candidates_.Rewind(0);
235
236 CollectEvacuationCandidates(heap()->old_pointer_space());
237 CollectEvacuationCandidates(heap()->old_data_space());
238
239 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
240 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
241
242 compacting_ = evacuation_candidates_.length() > 0;
243 }
244
245 return compacting_;
246 }
247
248
228 void MarkCompactCollector::CollectGarbage() { 249 void MarkCompactCollector::CollectGarbage() {
229 // Make sure that Prepare() has been called. The individual steps below will 250 // Make sure that Prepare() has been called. The individual steps below will
230 // update the state as they proceed. 251 // update the state as they proceed.
231 ASSERT(state_ == PREPARE_GC); 252 ASSERT(state_ == PREPARE_GC);
232 253
233 MarkLiveObjects(); 254 MarkLiveObjects();
234 ASSERT(heap_->incremental_marking()->IsStopped()); 255 ASSERT(heap_->incremental_marking()->IsStopped());
235 256
236 if (FLAG_collect_maps) ClearNonLiveTransitions(); 257 if (FLAG_collect_maps) ClearNonLiveTransitions();
237 258
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
363 Page* p = it.next(); 384 Page* p = it.next();
364 if (space->IsFragmented(p)) { 385 if (space->IsFragmented(p)) {
365 AddEvacuationCandidate(p); 386 AddEvacuationCandidate(p);
366 } else { 387 } else {
367 p->ClearEvacuationCandidate(); 388 p->ClearEvacuationCandidate();
368 } 389 }
369 } 390 }
370 } 391 }
371 392
372 393
394 #if 0
373 static void ClearEvacuationCandidates(PagedSpace* space) { 395 static void ClearEvacuationCandidates(PagedSpace* space) {
Erik Corry 2011/07/04 11:04:11 Commented code
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
374 ASSERT(space->identity() == OLD_POINTER_SPACE || 396 ASSERT(space->identity() == OLD_POINTER_SPACE ||
375 space->identity() == OLD_DATA_SPACE); 397 space->identity() == OLD_DATA_SPACE);
376 398
377 PageIterator it(space); 399 PageIterator it(space);
378 while (it.has_next()) { 400 while (it.has_next()) {
379 Page* p = it.next(); 401 Page* p = it.next();
380 p->ClearEvacuationCandidate(); 402 p->ClearEvacuationCandidate();
381 } 403 }
382 } 404 }
405 #endif
383 406
384 407
385 void MarkCompactCollector::Prepare(GCTracer* tracer) { 408 void MarkCompactCollector::Prepare(GCTracer* tracer) {
386 // TODO(gc) re-enable code flushing. 409 // TODO(gc) re-enable code flushing.
387 FLAG_flush_code = false; 410 FLAG_flush_code = false;
388 FLAG_always_compact = false; 411 FLAG_always_compact = false;
389 412
390 // Disable collection of maps if incremental marking is enabled. 413 // Disable collection of maps if incremental marking is enabled.
391 // TODO(gc) improve maps collection algorithm to work with incremental 414 // TODO(gc) improve maps collection algorithm to work with incremental
392 // marking. 415 // marking.
(...skipping 10 matching lines...) Expand all
403 ASSERT(!FLAG_always_compact || !FLAG_never_compact); 426 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
404 427
405 if (FLAG_collect_maps) CreateBackPointers(); 428 if (FLAG_collect_maps) CreateBackPointers();
406 #ifdef ENABLE_GDB_JIT_INTERFACE 429 #ifdef ENABLE_GDB_JIT_INTERFACE
407 if (FLAG_gdbjit) { 430 if (FLAG_gdbjit) {
408 // If GDBJIT interface is active disable compaction. 431 // If GDBJIT interface is active disable compaction.
409 compacting_collection_ = false; 432 compacting_collection_ = false;
410 } 433 }
411 #endif 434 #endif
412 435
413 if (!FLAG_never_compact) { 436 if (!FLAG_never_compact) StartCompaction();
414 slots_buffer_.Clear();
415 evacuation_candidates_.Rewind(0);
416
417 if (!heap()->incremental_marking()->IsMarking()) {
418 CollectEvacuationCandidates(heap()->old_pointer_space());
419 CollectEvacuationCandidates(heap()->old_data_space());
420 } else {
421 ClearEvacuationCandidates(heap()->old_pointer_space());
422 ClearEvacuationCandidates(heap()->old_data_space());
423 }
424 }
425 437
426 PagedSpaces spaces; 438 PagedSpaces spaces;
427 for (PagedSpace* space = spaces.next(); 439 for (PagedSpace* space = spaces.next();
428 space != NULL; 440 space != NULL;
429 space = spaces.next()) { 441 space = spaces.next()) {
430 space->PrepareForMarkCompact(); 442 space->PrepareForMarkCompact();
431 } 443 }
432 444
433 if (!heap()->incremental_marking()->IsMarking()) { 445 if (!heap()->incremental_marking()->IsMarking()) {
434 ClearMarkbits(heap_); 446 ClearMarkbits(heap_);
(...skipping 1872 matching lines...) Expand 10 before | Expand all | Expand 10 after
2307 2319
2308 void MarkCompactCollector::EvacuatePages() { 2320 void MarkCompactCollector::EvacuatePages() {
2309 int npages = evacuation_candidates_.length(); 2321 int npages = evacuation_candidates_.length();
2310 for (int i = 0; i < npages; i++) { 2322 for (int i = 0; i < npages; i++) {
2311 Page* p = evacuation_candidates_[i]; 2323 Page* p = evacuation_candidates_[i];
2312 EvacuateLiveObjectsFromPage(p); 2324 EvacuateLiveObjectsFromPage(p);
2313 } 2325 }
2314 } 2326 }
2315 2327
2316 2328
2329 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2330 public:
2331 virtual Object* RetainAs(Object* object) {
2332 if (object->IsHeapObject()) {
2333 HeapObject* heap_object = HeapObject::cast(object);
2334 MapWord map_word = heap_object->map_word();
2335 if (map_word.IsForwardingAddress()) {
2336 return map_word.ToForwardingAddress();
2337 }
2338 }
2339 return object;
2340 }
2341 };
2342
2343
2317 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 2344 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
2318 EvacuateNewSpace(); 2345 EvacuateNewSpace();
2319 EvacuatePages(); 2346 EvacuatePages();
2320 2347
2321 // Second pass: find pointers to new space and update them. 2348 // Second pass: find pointers to new space and update them.
2322 PointersUpdatingVisitor updating_visitor(heap()); 2349 PointersUpdatingVisitor updating_visitor(heap());
2323 2350
2324 // Update pointers in to space. 2351 // Update pointers in to space.
2325 SemiSpaceIterator to_it(heap()->new_space()->bottom(), 2352 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
2326 heap()->new_space()->top()); 2353 heap()->new_space()->top());
2327 for (HeapObject* object = to_it.Next(); 2354 for (HeapObject* object = to_it.Next();
2328 object != NULL; 2355 object != NULL;
2329 object = to_it.Next()) { 2356 object = to_it.Next()) {
2330 Map* map = object->map(); 2357 Map* map = object->map();
2331 object->IterateBody(map->instance_type(), 2358 object->IterateBody(map->instance_type(),
2332 object->SizeFromMap(map), 2359 object->SizeFromMap(map),
2333 &updating_visitor); 2360 &updating_visitor);
2334 } 2361 }
2335 2362
2336 // Update roots. 2363 // Update roots.
2337 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 2364 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2338 LiveObjectList::IterateElements(&updating_visitor); 2365 LiveObjectList::IterateElements(&updating_visitor);
2339 2366
2340 { 2367 {
2341 StoreBufferRebuildScope scope(heap_, 2368 StoreBufferRebuildScope scope(heap_,
2342 heap_->store_buffer(), 2369 heap_->store_buffer(),
2343 &Heap::ScavengeStoreBufferCallback); 2370 &Heap::ScavengeStoreBufferCallback);
2344 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 2371 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2345 } 2372 }
2346 slots_buffer_.Iterate(&updating_visitor); 2373 slots_buffer_.Update();
2347 2374
2348 // Update pointers from cells. 2375 // Update pointers from cells.
2349 HeapObjectIterator cell_iterator(heap_->cell_space()); 2376 HeapObjectIterator cell_iterator(heap_->cell_space());
2350 for (HeapObject* cell = cell_iterator.Next(); 2377 for (HeapObject* cell = cell_iterator.Next();
2351 cell != NULL; 2378 cell != NULL;
2352 cell = cell_iterator.Next()) { 2379 cell = cell_iterator.Next()) {
2353 if (cell->IsJSGlobalPropertyCell()) { 2380 if (cell->IsJSGlobalPropertyCell()) {
2354 Address value_address = 2381 Address value_address =
2355 reinterpret_cast<Address>(cell) + 2382 reinterpret_cast<Address>(cell) +
2356 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 2383 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2357 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 2384 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2358 } 2385 }
2359 } 2386 }
2360 2387
2361 // Update pointer from the global contexts list. 2388 // Update pointer from the global contexts list.
2362 updating_visitor.VisitPointer(heap_->global_contexts_list_address()); 2389 updating_visitor.VisitPointer(heap_->global_contexts_list_address());
2363 2390
2364 heap_->symbol_table()->Iterate(&updating_visitor); 2391 heap_->symbol_table()->Iterate(&updating_visitor);
2365 2392
2366 // Update pointers from external string table. 2393 // Update pointers from external string table.
2367 heap_->UpdateReferencesInExternalStringTable( 2394 heap_->UpdateReferencesInExternalStringTable(
2368 &UpdateReferenceInExternalStringTableEntry); 2395 &UpdateReferenceInExternalStringTableEntry);
2369 2396
2370 // Update JSFunction pointers from the runtime profiler. 2397 // Update JSFunction pointers from the runtime profiler.
2371 heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 2398 heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
2372 2399
2400 EvacuationWeakObjectRetainer evacuation_object_retainer;
2401 heap()->ProcessWeakReferences(&evacuation_object_retainer);
2402
2373 #ifdef DEBUG 2403 #ifdef DEBUG
2374 if (FLAG_verify_heap) { 2404 if (FLAG_verify_heap) {
2375 VerifyEvacuation(heap_); 2405 VerifyEvacuation(heap_);
2376 } 2406 }
2377 #endif 2407 #endif
2378 2408
2379 int npages = evacuation_candidates_.length(); 2409 int npages = evacuation_candidates_.length();
2410 ASSERT(compacting_ == (npages > 0));
2380 for (int i = 0; i < npages; i++) { 2411 for (int i = 0; i < npages; i++) {
2381 Page* p = evacuation_candidates_[i]; 2412 Page* p = evacuation_candidates_[i];
2382 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 2413 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2383 space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); 2414 space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
2384 p->set_scan_on_scavenge(false); 2415 p->set_scan_on_scavenge(false);
2385 2416 p->ClearEvacuationCandidate();
2386 // We are not clearing evacuation candidate flag here 2417 p->SetFlag(MemoryChunk::EVACUATED);
2387 // because it is required to notify lazy sweeper to skip 2418 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
2388 // these pages.
2389 } 2419 }
2420 compacting_ = false;
2390 } 2421 }
2391 2422
2392 2423
2393 INLINE(static uint32_t SweepFree(PagedSpace* space, 2424 INLINE(static uint32_t SweepFree(PagedSpace* space,
2394 Page* p, 2425 Page* p,
2395 uint32_t free_start, 2426 uint32_t free_start,
2396 uint32_t region_end, 2427 uint32_t region_end,
2397 uint32_t* cells)); 2428 uint32_t* cells));
2398 2429
2399 2430
(...skipping 651 matching lines...) Expand 10 before | Expand all | Expand 10 after
3051 if (buffer_idx_ == buffers_.length()) { 3082 if (buffer_idx_ == buffers_.length()) {
3052 buffers_.Add(new ObjectSlot[kBufferSize]); 3083 buffers_.Add(new ObjectSlot[kBufferSize]);
3053 } 3084 }
3054 buffer_ = buffers_[buffer_idx_]; 3085 buffer_ = buffers_[buffer_idx_];
3055 } 3086 }
3056 3087
3057 buffer_[idx_++] = slot; 3088 buffer_[idx_++] = slot;
3058 } 3089 }
3059 3090
3060 3091
3061 void SlotsBuffer::Iterate(ObjectVisitor* visitor) { 3092 static inline void UpdateSlot(Object** slot) {
3093 Object* obj = *slot;
3094 if (!obj->IsHeapObject()) return;
3095
3096 HeapObject* heap_obj = HeapObject::cast(obj);
3097
3098 MapWord map_word = heap_obj->map_word();
3099 if (map_word.IsForwardingAddress()) {
3100 ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot));
3101 *slot = map_word.ToForwardingAddress();
3102 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
3103 }
3104 }
3105
3106
3107 void SlotsBuffer::Update() {
3062 if (buffer_idx_ < 0) return; 3108 if (buffer_idx_ < 0) return;
3063 3109
3064 for (int buffer_index = 0; buffer_index < buffer_idx_; ++buffer_index) { 3110 for (int buffer_index = 0; buffer_index < buffer_idx_; ++buffer_index) {
3065 ObjectSlot* buffer = buffers_[buffer_index]; 3111 ObjectSlot* buffer = buffers_[buffer_index];
3066 for (int slot_idx = 0; slot_idx < kBufferSize; ++slot_idx) { 3112 for (int slot_idx = 0; slot_idx < kBufferSize; ++slot_idx) {
3067 visitor->VisitPointer(buffer[slot_idx]); 3113 UpdateSlot(buffer[slot_idx]);
3068 } 3114 }
3069 } 3115 }
3070 3116
3071 ObjectSlot* last_buffer = buffers_[buffer_idx_]; 3117 ObjectSlot* last_buffer = buffers_[buffer_idx_];
3072 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { 3118 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3073 visitor->VisitPointer(last_buffer[slot_idx]); 3119 UpdateSlot(last_buffer[slot_idx]);
3074 } 3120 }
3075 } 3121 }
3076 3122
3077 3123
3078 void SlotsBuffer::Report() { 3124 void SlotsBuffer::Report() {
3079 } 3125 }
3080 3126
3081 3127
3082 } } // namespace v8::internal 3128 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698