| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 11 matching lines...) Expand all Loading... |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "compilation-cache.h" | 30 #include "compilation-cache.h" |
| 31 #include "execution.h" | 31 #include "execution.h" |
| 32 #include "heap-profiler.h" | |
| 33 #include "gdb-jit.h" | 32 #include "gdb-jit.h" |
| 34 #include "global-handles.h" | 33 #include "global-handles.h" |
| 34 #include "heap-profiler.h" |
| 35 #include "ic-inl.h" | 35 #include "ic-inl.h" |
| 36 #include "incremental-marking.h" |
| 36 #include "liveobjectlist-inl.h" | 37 #include "liveobjectlist-inl.h" |
| 37 #include "mark-compact.h" | 38 #include "mark-compact.h" |
| 38 #include "objects-visiting.h" | 39 #include "objects-visiting.h" |
| 40 #include "objects-visiting-inl.h" |
| 39 #include "stub-cache.h" | 41 #include "stub-cache.h" |
| 40 | 42 |
| 41 namespace v8 { | 43 namespace v8 { |
| 42 namespace internal { | 44 namespace internal { |
| 43 | 45 |
| 46 |
| 47 const char* Marking::kWhiteBitPattern = "00"; |
| 48 const char* Marking::kBlackBitPattern = "10"; |
| 49 const char* Marking::kGreyBitPattern = "11"; |
| 50 const char* Marking::kImpossibleBitPattern = "01"; |
| 51 |
| 52 |
| 44 // ------------------------------------------------------------------------- | 53 // ------------------------------------------------------------------------- |
| 45 // MarkCompactCollector | 54 // MarkCompactCollector |
| 46 | 55 |
| 47 MarkCompactCollector::MarkCompactCollector() : // NOLINT | 56 MarkCompactCollector::MarkCompactCollector() : // NOLINT |
| 48 #ifdef DEBUG | 57 #ifdef DEBUG |
| 49 state_(IDLE), | 58 state_(IDLE), |
| 50 #endif | 59 #endif |
| 51 force_compaction_(false), | 60 sweep_precisely_(false), |
| 52 compacting_collection_(false), | 61 compacting_(false), |
| 53 compact_on_next_gc_(false), | 62 collect_maps_(FLAG_collect_maps), |
| 54 previous_marked_count_(0), | |
| 55 tracer_(NULL), | 63 tracer_(NULL), |
| 64 migration_slots_buffer_(NULL), |
| 56 #ifdef DEBUG | 65 #ifdef DEBUG |
| 57 live_young_objects_size_(0), | 66 live_young_objects_size_(0), |
| 58 live_old_pointer_objects_size_(0), | 67 live_old_pointer_objects_size_(0), |
| 59 live_old_data_objects_size_(0), | 68 live_old_data_objects_size_(0), |
| 60 live_code_objects_size_(0), | 69 live_code_objects_size_(0), |
| 61 live_map_objects_size_(0), | 70 live_map_objects_size_(0), |
| 62 live_cell_objects_size_(0), | 71 live_cell_objects_size_(0), |
| 63 live_lo_objects_size_(0), | 72 live_lo_objects_size_(0), |
| 64 live_bytes_(0), | 73 live_bytes_(0), |
| 65 #endif | 74 #endif |
| 66 heap_(NULL), | 75 heap_(NULL), |
| 67 code_flusher_(NULL), | 76 code_flusher_(NULL), |
| 68 encountered_weak_maps_(NULL) { } | 77 encountered_weak_maps_(NULL) { } |
| 69 | 78 |
| 70 | 79 |
| 80 #ifdef DEBUG |
| 81 class VerifyMarkingVisitor: public ObjectVisitor { |
| 82 public: |
| 83 void VisitPointers(Object** start, Object** end) { |
| 84 for (Object** current = start; current < end; current++) { |
| 85 if ((*current)->IsHeapObject()) { |
| 86 HeapObject* object = HeapObject::cast(*current); |
| 87 ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); |
| 88 } |
| 89 } |
| 90 } |
| 91 }; |
| 92 |
| 93 |
| 94 static void VerifyMarking(Address bottom, Address top) { |
| 95 VerifyMarkingVisitor visitor; |
| 96 HeapObject* object; |
| 97 Address next_object_must_be_here_or_later = bottom; |
| 98 |
| 99 for (Address current = bottom; |
| 100 current < top; |
| 101 current += kPointerSize) { |
| 102 object = HeapObject::FromAddress(current); |
| 103 if (MarkCompactCollector::IsMarked(object)) { |
| 104 ASSERT(current >= next_object_must_be_here_or_later); |
| 105 object->Iterate(&visitor); |
| 106 next_object_must_be_here_or_later = current + object->Size(); |
| 107 } |
| 108 } |
| 109 } |
| 110 |
| 111 |
| 112 static void VerifyMarking(Page* p) { |
| 113 VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd()); |
| 114 } |
| 115 |
| 116 |
| 117 static void VerifyMarking(NewSpace* space) { |
| 118 Address end = space->top(); |
| 119 NewSpacePageIterator it(space->bottom(), end); |
| 120 // The bottom position is at the start of its page. Allows us to use |
| 121 // page->body() as start of range on all pages. |
| 122 ASSERT_EQ(space->bottom(), |
| 123 NewSpacePage::FromAddress(space->bottom())->body()); |
| 124 while (it.has_next()) { |
| 125 NewSpacePage* page = it.next(); |
| 126 Address limit = it.has_next() ? page->body_limit() : end; |
| 127 ASSERT(limit == end || !page->Contains(end)); |
| 128 VerifyMarking(page->body(), limit); |
| 129 } |
| 130 } |
| 131 |
| 132 |
| 133 static void VerifyMarking(PagedSpace* space) { |
| 134 PageIterator it(space); |
| 135 |
| 136 while (it.has_next()) { |
| 137 VerifyMarking(it.next()); |
| 138 } |
| 139 } |
| 140 |
| 141 |
| 142 static void VerifyMarking(Heap* heap) { |
| 143 VerifyMarking(heap->old_pointer_space()); |
| 144 VerifyMarking(heap->old_data_space()); |
| 145 VerifyMarking(heap->code_space()); |
| 146 VerifyMarking(heap->cell_space()); |
| 147 VerifyMarking(heap->map_space()); |
| 148 VerifyMarking(heap->new_space()); |
| 149 |
| 150 VerifyMarkingVisitor visitor; |
| 151 |
| 152 LargeObjectIterator it(heap->lo_space()); |
| 153 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 154 if (MarkCompactCollector::IsMarked(obj)) { |
| 155 obj->Iterate(&visitor); |
| 156 } |
| 157 } |
| 158 |
| 159 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 160 } |
| 161 |
| 162 |
| 163 class VerifyEvacuationVisitor: public ObjectVisitor { |
| 164 public: |
| 165 void VisitPointers(Object** start, Object** end) { |
| 166 for (Object** current = start; current < end; current++) { |
| 167 if ((*current)->IsHeapObject()) { |
| 168 HeapObject* object = HeapObject::cast(*current); |
| 169 if (MarkCompactCollector::IsOnEvacuationCandidate(object)) { |
| 170 CHECK(false); |
| 171 } |
| 172 } |
| 173 } |
| 174 } |
| 175 |
| 176 HeapObject* source_; |
| 177 }; |
| 178 |
| 179 |
| 180 static void VerifyEvacuation(Address bottom, Address top) { |
| 181 VerifyEvacuationVisitor visitor; |
| 182 HeapObject* object; |
| 183 Address next_object_must_be_here_or_later = bottom; |
| 184 |
| 185 for (Address current = bottom; |
| 186 current < top; |
| 187 current += kPointerSize) { |
| 188 object = HeapObject::FromAddress(current); |
| 189 if (MarkCompactCollector::IsMarked(object)) { |
| 190 ASSERT(current >= next_object_must_be_here_or_later); |
| 191 visitor.source_ = object; |
| 192 object->Iterate(&visitor); |
| 193 next_object_must_be_here_or_later = current + object->Size(); |
| 194 } |
| 195 } |
| 196 } |
| 197 |
| 198 |
| 199 static void VerifyEvacuation(Page* p) { |
| 200 if (p->IsEvacuationCandidate()) return; |
| 201 |
| 202 VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd()); |
| 203 } |
| 204 |
| 205 |
| 206 static void VerifyEvacuation(NewSpace* space) { |
| 207 // TODO(gc) Verify evacution for new space. |
| 208 } |
| 209 |
| 210 |
| 211 static void VerifyEvacuation(PagedSpace* space) { |
| 212 PageIterator it(space); |
| 213 |
| 214 while (it.has_next()) { |
| 215 VerifyEvacuation(it.next()); |
| 216 } |
| 217 } |
| 218 |
| 219 |
| 220 static void VerifyEvacuation(Heap* heap) { |
| 221 VerifyEvacuation(heap->old_pointer_space()); |
| 222 VerifyEvacuation(heap->old_data_space()); |
| 223 VerifyEvacuation(heap->code_space()); |
| 224 VerifyEvacuation(heap->cell_space()); |
| 225 VerifyEvacuation(heap->map_space()); |
| 226 VerifyEvacuation(heap->new_space()); |
| 227 |
| 228 VerifyEvacuationVisitor visitor; |
| 229 heap->IterateStrongRoots(&visitor, VISIT_ALL); |
| 230 } |
| 231 #endif |
| 232 |
| 233 |
| 234 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
| 235 p->MarkEvacuationCandidate(); |
| 236 evacuation_candidates_.Add(p); |
| 237 } |
| 238 |
| 239 |
| 240 bool MarkCompactCollector::StartCompaction() { |
| 241 // Don't start compaction if we are in the middle of incremental |
| 242 // marking cycle. We did not collect any slots. |
| 243 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) { |
| 244 ASSERT(evacuation_candidates_.length() == 0); |
| 245 |
| 246 CollectEvacuationCandidates(heap()->old_pointer_space()); |
| 247 CollectEvacuationCandidates(heap()->old_data_space()); |
| 248 CollectEvacuationCandidates(heap()->code_space()); |
| 249 |
| 250 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 251 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 252 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 253 |
| 254 compacting_ = evacuation_candidates_.length() > 0; |
| 255 } |
| 256 |
| 257 return compacting_; |
| 258 } |
| 259 |
| 260 |
| 71 void MarkCompactCollector::CollectGarbage() { | 261 void MarkCompactCollector::CollectGarbage() { |
| 72 // Make sure that Prepare() has been called. The individual steps below will | 262 // Make sure that Prepare() has been called. The individual steps below will |
| 73 // update the state as they proceed. | 263 // update the state as they proceed. |
| 74 ASSERT(state_ == PREPARE_GC); | 264 ASSERT(state_ == PREPARE_GC); |
| 75 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); | 265 ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
| 76 | 266 |
| 77 // Prepare has selected whether to compact the old generation or not. | |
| 78 // Tell the tracer. | |
| 79 if (IsCompacting()) tracer_->set_is_compacting(); | |
| 80 | |
| 81 MarkLiveObjects(); | 267 MarkLiveObjects(); |
| 82 | 268 ASSERT(heap_->incremental_marking()->IsStopped()); |
| 83 if (FLAG_collect_maps) ClearNonLiveTransitions(); | 269 |
| 270 if (collect_maps_) ClearNonLiveTransitions(); |
| 84 | 271 |
| 85 ClearWeakMaps(); | 272 ClearWeakMaps(); |
| 86 | 273 |
| 87 SweepLargeObjectSpace(); | 274 #ifdef DEBUG |
| 88 | 275 if (FLAG_verify_heap) { |
| 89 if (IsCompacting()) { | 276 VerifyMarking(heap_); |
| 90 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); | 277 } |
| 91 EncodeForwardingAddresses(); | 278 #endif |
| 92 | 279 |
| 93 heap()->MarkMapPointersAsEncoded(true); | 280 SweepSpaces(); |
| 94 UpdatePointers(); | 281 |
| 95 heap()->MarkMapPointersAsEncoded(false); | 282 if (!collect_maps_) ReattachInitialMaps(); |
| 96 heap()->isolate()->pc_to_code_cache()->Flush(); | 283 |
| 97 | 284 heap_->isolate()->pc_to_code_cache()->Flush(); |
| 98 RelocateObjects(); | |
| 99 } else { | |
| 100 SweepSpaces(); | |
| 101 heap()->isolate()->pc_to_code_cache()->Flush(); | |
| 102 } | |
| 103 | 285 |
| 104 Finish(); | 286 Finish(); |
| 105 | 287 |
| 106 // Save the count of marked objects remaining after the collection and | |
| 107 // null out the GC tracer. | |
| 108 previous_marked_count_ = tracer_->marked_count(); | |
| 109 ASSERT(previous_marked_count_ == 0); | |
| 110 tracer_ = NULL; | 288 tracer_ = NULL; |
| 111 } | 289 } |
| 112 | 290 |
| 113 | 291 |
| 292 #ifdef DEBUG |
| 293 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 294 PageIterator it(space); |
| 295 |
| 296 while (it.has_next()) { |
| 297 Page* p = it.next(); |
| 298 ASSERT(p->markbits()->IsClean()); |
| 299 } |
| 300 } |
| 301 |
| 302 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { |
| 303 NewSpacePageIterator it(space->bottom(), space->top()); |
| 304 |
| 305 while (it.has_next()) { |
| 306 NewSpacePage* p = it.next(); |
| 307 ASSERT(p->markbits()->IsClean()); |
| 308 } |
| 309 } |
| 310 |
| 311 void MarkCompactCollector::VerifyMarkbitsAreClean() { |
| 312 VerifyMarkbitsAreClean(heap_->old_pointer_space()); |
| 313 VerifyMarkbitsAreClean(heap_->old_data_space()); |
| 314 VerifyMarkbitsAreClean(heap_->code_space()); |
| 315 VerifyMarkbitsAreClean(heap_->cell_space()); |
| 316 VerifyMarkbitsAreClean(heap_->map_space()); |
| 317 VerifyMarkbitsAreClean(heap_->new_space()); |
| 318 |
| 319 LargeObjectIterator it(heap_->lo_space()); |
| 320 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 321 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 322 ASSERT(Marking::IsWhite(mark_bit)); |
| 323 } |
| 324 } |
| 325 #endif |
| 326 |
| 327 |
| 328 static void ClearMarkbits(PagedSpace* space) { |
| 329 PageIterator it(space); |
| 330 |
| 331 while (it.has_next()) { |
| 332 Bitmap::Clear(it.next()); |
| 333 } |
| 334 } |
| 335 |
| 336 |
| 337 static void ClearMarkbits(NewSpace* space) { |
| 338 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
| 339 |
| 340 while (it.has_next()) { |
| 341 Bitmap::Clear(it.next()); |
| 342 } |
| 343 } |
| 344 |
| 345 |
| 346 static void ClearMarkbits(Heap* heap) { |
| 347 ClearMarkbits(heap->code_space()); |
| 348 ClearMarkbits(heap->map_space()); |
| 349 ClearMarkbits(heap->old_pointer_space()); |
| 350 ClearMarkbits(heap->old_data_space()); |
| 351 ClearMarkbits(heap->cell_space()); |
| 352 ClearMarkbits(heap->new_space()); |
| 353 |
| 354 LargeObjectIterator it(heap->lo_space()); |
| 355 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 356 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 357 mark_bit.Clear(); |
| 358 mark_bit.Next().Clear(); |
| 359 } |
| 360 } |
| 361 |
| 362 |
| 363 bool Marking::TransferMark(Address old_start, Address new_start) { |
| 364 // This is only used when resizing an object. |
| 365 ASSERT(MemoryChunk::FromAddress(old_start) == |
| 366 MemoryChunk::FromAddress(new_start)); |
| 367 // If the mark doesn't move, we don't check the color of the object. |
| 368 // It doesn't matter whether the object is black, since it hasn't changed |
| 369 // size, so the adjustment to the live data count will be zero anyway. |
| 370 if (old_start == new_start) return false; |
| 371 |
| 372 MarkBit new_mark_bit = MarkBitFrom(new_start); |
| 373 |
| 374 if (heap_->incremental_marking()->IsMarking()) { |
| 375 MarkBit old_mark_bit = MarkBitFrom(old_start); |
| 376 #ifdef DEBUG |
| 377 ObjectColor old_color = Color(old_mark_bit); |
| 378 #endif |
| 379 if (Marking::IsBlack(old_mark_bit)) { |
| 380 Marking::MarkBlack(new_mark_bit); |
| 381 old_mark_bit.Clear(); |
| 382 return true; |
| 383 } else if (Marking::IsGrey(old_mark_bit)) { |
| 384 old_mark_bit.Next().Clear(); |
| 385 heap_->incremental_marking()->WhiteToGreyAndPush( |
| 386 HeapObject::FromAddress(new_start), new_mark_bit); |
| 387 heap_->incremental_marking()->RestartIfNotMarking(); |
| 388 } |
| 389 |
| 390 #ifdef DEBUG |
| 391 ObjectColor new_color = Color(new_mark_bit); |
| 392 ASSERT(new_color == old_color); |
| 393 #endif |
| 394 return false; |
| 395 } |
| 396 MarkBit old_mark_bit = MarkBitFrom(old_start); |
| 397 if (!old_mark_bit.Get()) { |
| 398 return false; |
| 399 } |
| 400 new_mark_bit.Set(); |
| 401 return true; |
| 402 } |
| 403 |
| 404 |
| 405 static const char* AllocationSpaceName(AllocationSpace space) { |
| 406 switch (space) { |
| 407 case NEW_SPACE: return "NEW_SPACE"; |
| 408 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; |
| 409 case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; |
| 410 case CODE_SPACE: return "CODE_SPACE"; |
| 411 case MAP_SPACE: return "MAP_SPACE"; |
| 412 case CELL_SPACE: return "CELL_SPACE"; |
| 413 case LO_SPACE: return "LO_SPACE"; |
| 414 default: |
| 415 UNREACHABLE(); |
| 416 } |
| 417 |
| 418 return NULL; |
| 419 } |
| 420 |
| 421 |
| 422 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
| 423 ASSERT(space->identity() == OLD_POINTER_SPACE || |
| 424 space->identity() == OLD_DATA_SPACE || |
| 425 space->identity() == CODE_SPACE); |
| 426 |
| 427 PageIterator it(space); |
| 428 int count = 0; |
| 429 if (it.has_next()) it.next(); // Never compact the first page. |
| 430 while (it.has_next()) { |
| 431 Page* p = it.next(); |
| 432 if (space->IsFragmented(p)) { |
| 433 AddEvacuationCandidate(p); |
| 434 count++; |
| 435 } else { |
| 436 p->ClearEvacuationCandidate(); |
| 437 } |
| 438 } |
| 439 |
| 440 if (count > 0 && FLAG_trace_fragmentation) { |
| 441 PrintF("Collected %d evacuation candidates for space %s\n", |
| 442 count, |
| 443 AllocationSpaceName(space->identity())); |
| 444 } |
| 445 } |
| 446 |
| 447 |
| 114 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 448 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
| 449 FLAG_flush_code = false; |
| 450 |
| 451 // Disable collection of maps if incremental marking is enabled. |
| 452 // Map collection algorithm relies on a special map transition tree traversal |
| 453 // order which is not implemented for incremental marking. |
| 454 collect_maps_ = FLAG_collect_maps && |
| 455 !heap()->incremental_marking()->IsMarking(); |
| 456 |
| 115 // Rather than passing the tracer around we stash it in a static member | 457 // Rather than passing the tracer around we stash it in a static member |
| 116 // variable. | 458 // variable. |
| 117 tracer_ = tracer; | 459 tracer_ = tracer; |
| 118 | 460 |
| 119 #ifdef DEBUG | 461 #ifdef DEBUG |
| 120 ASSERT(state_ == IDLE); | 462 ASSERT(state_ == IDLE); |
| 121 state_ = PREPARE_GC; | 463 state_ = PREPARE_GC; |
| 122 #endif | 464 #endif |
| 123 ASSERT(!FLAG_always_compact || !FLAG_never_compact); | 465 ASSERT(!FLAG_always_compact || !FLAG_never_compact); |
| 124 | 466 |
| 125 compacting_collection_ = | 467 if (collect_maps_) CreateBackPointers(); |
| 126 FLAG_always_compact || force_compaction_ || compact_on_next_gc_; | |
| 127 compact_on_next_gc_ = false; | |
| 128 | |
| 129 if (FLAG_never_compact) compacting_collection_ = false; | |
| 130 if (!heap()->map_space()->MapPointersEncodable()) | |
| 131 compacting_collection_ = false; | |
| 132 if (FLAG_collect_maps) CreateBackPointers(); | |
| 133 #ifdef ENABLE_GDB_JIT_INTERFACE | 468 #ifdef ENABLE_GDB_JIT_INTERFACE |
| 134 if (FLAG_gdbjit) { | 469 if (FLAG_gdbjit) { |
| 135 // If GDBJIT interface is active disable compaction. | 470 // If GDBJIT interface is active disable compaction. |
| 136 compacting_collection_ = false; | 471 compacting_collection_ = false; |
| 137 } | 472 } |
| 138 #endif | 473 #endif |
| 139 | 474 |
| 475 // Clear marking bits for precise sweeping to collect all garbage. |
| 476 if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) { |
| 477 heap()->incremental_marking()->Abort(); |
| 478 ClearMarkbits(heap_); |
| 479 } |
| 480 |
| 481 if (!FLAG_never_compact) StartCompaction(); |
| 482 |
| 140 PagedSpaces spaces; | 483 PagedSpaces spaces; |
| 141 for (PagedSpace* space = spaces.next(); | 484 for (PagedSpace* space = spaces.next(); |
| 142 space != NULL; space = spaces.next()) { | 485 space != NULL; |
| 143 space->PrepareForMarkCompact(compacting_collection_); | 486 space = spaces.next()) { |
| 487 space->PrepareForMarkCompact(); |
| 144 } | 488 } |
| 145 | 489 |
| 146 #ifdef DEBUG | 490 #ifdef DEBUG |
| 491 if (!heap()->incremental_marking()->IsMarking()) { |
| 492 VerifyMarkbitsAreClean(); |
| 493 } |
| 494 #endif |
| 495 |
| 496 #ifdef DEBUG |
| 147 live_bytes_ = 0; | 497 live_bytes_ = 0; |
| 148 live_young_objects_size_ = 0; | 498 live_young_objects_size_ = 0; |
| 149 live_old_pointer_objects_size_ = 0; | 499 live_old_pointer_objects_size_ = 0; |
| 150 live_old_data_objects_size_ = 0; | 500 live_old_data_objects_size_ = 0; |
| 151 live_code_objects_size_ = 0; | 501 live_code_objects_size_ = 0; |
| 152 live_map_objects_size_ = 0; | 502 live_map_objects_size_ = 0; |
| 153 live_cell_objects_size_ = 0; | 503 live_cell_objects_size_ = 0; |
| 154 live_lo_objects_size_ = 0; | 504 live_lo_objects_size_ = 0; |
| 155 #endif | 505 #endif |
| 156 } | 506 } |
| 157 | 507 |
| 158 | 508 |
| 159 void MarkCompactCollector::Finish() { | 509 void MarkCompactCollector::Finish() { |
| 160 #ifdef DEBUG | 510 #ifdef DEBUG |
| 161 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | 511 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
| 162 state_ = IDLE; | 512 state_ = IDLE; |
| 163 #endif | 513 #endif |
| 164 // The stub cache is not traversed during GC; clear the cache to | 514 // The stub cache is not traversed during GC; clear the cache to |
| 165 // force lazy re-initialization of it. This must be done after the | 515 // force lazy re-initialization of it. This must be done after the |
| 166 // GC, because it relies on the new address of certain old space | 516 // GC, because it relies on the new address of certain old space |
| 167 // objects (empty string, illegal builtin). | 517 // objects (empty string, illegal builtin). |
| 168 heap()->isolate()->stub_cache()->Clear(); | 518 heap()->isolate()->stub_cache()->Clear(); |
| 169 | 519 |
| 170 heap()->external_string_table_.CleanUp(); | 520 heap()->external_string_table_.CleanUp(); |
| 171 | |
| 172 // If we've just compacted old space there's no reason to check the | |
| 173 // fragmentation limit. Just return. | |
| 174 if (HasCompacted()) return; | |
| 175 | |
| 176 // We compact the old generation on the next GC if it has gotten too | |
| 177 // fragmented (ie, we could recover an expected amount of space by | |
| 178 // reclaiming the waste and free list blocks). | |
| 179 static const int kFragmentationLimit = 15; // Percent. | |
| 180 static const int kFragmentationAllowed = 1 * MB; // Absolute. | |
| 181 intptr_t old_gen_recoverable = 0; | |
| 182 intptr_t old_gen_used = 0; | |
| 183 | |
| 184 OldSpaces spaces; | |
| 185 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { | |
| 186 old_gen_recoverable += space->Waste() + space->AvailableFree(); | |
| 187 old_gen_used += space->Size(); | |
| 188 } | |
| 189 | |
| 190 int old_gen_fragmentation = | |
| 191 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); | |
| 192 if (old_gen_fragmentation > kFragmentationLimit && | |
| 193 old_gen_recoverable > kFragmentationAllowed) { | |
| 194 compact_on_next_gc_ = true; | |
| 195 } | |
| 196 } | 521 } |
| 197 | 522 |
| 198 | 523 |
| 199 // ------------------------------------------------------------------------- | 524 // ------------------------------------------------------------------------- |
| 200 // Phase 1: tracing and marking live objects. | 525 // Phase 1: tracing and marking live objects. |
| 201 // before: all objects are in normal state. | 526 // before: all objects are in normal state. |
| 202 // after: a live object's map pointer is marked as '00'. | 527 // after: a live object's map pointer is marked as '00'. |
| 203 | 528 |
| 204 // Marking all live objects in the heap as part of mark-sweep or mark-compact | 529 // Marking all live objects in the heap as part of mark-sweep or mark-compact |
| 205 // collection. Before marking, all objects are in their normal state. After | 530 // collection. Before marking, all objects are in their normal state. After |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 254 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); | 579 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); |
| 255 | 580 |
| 256 JSFunction* candidate = jsfunction_candidates_head_; | 581 JSFunction* candidate = jsfunction_candidates_head_; |
| 257 JSFunction* next_candidate; | 582 JSFunction* next_candidate; |
| 258 while (candidate != NULL) { | 583 while (candidate != NULL) { |
| 259 next_candidate = GetNextCandidate(candidate); | 584 next_candidate = GetNextCandidate(candidate); |
| 260 | 585 |
| 261 SharedFunctionInfo* shared = candidate->unchecked_shared(); | 586 SharedFunctionInfo* shared = candidate->unchecked_shared(); |
| 262 | 587 |
| 263 Code* code = shared->unchecked_code(); | 588 Code* code = shared->unchecked_code(); |
| 264 if (!code->IsMarked()) { | 589 MarkBit code_mark = Marking::MarkBitFrom(code); |
| 590 if (!code_mark.Get()) { |
| 265 shared->set_code(lazy_compile); | 591 shared->set_code(lazy_compile); |
| 266 candidate->set_code(lazy_compile); | 592 candidate->set_code(lazy_compile); |
| 267 } else { | 593 } else { |
| 268 candidate->set_code(shared->unchecked_code()); | 594 candidate->set_code(shared->unchecked_code()); |
| 269 } | 595 } |
| 270 | 596 |
| 597 // We are in the middle of a GC cycle so the write barrier in the code |
| 598 // setter did not record the slot update and we have to do that manually. |
| 599 Address slot = candidate->address() + JSFunction::kCodeEntryOffset; |
| 600 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); |
| 601 isolate_->heap()->mark_compact_collector()-> |
| 602 RecordCodeEntrySlot(slot, target); |
| 603 |
| 271 candidate = next_candidate; | 604 candidate = next_candidate; |
| 272 } | 605 } |
| 273 | 606 |
| 274 jsfunction_candidates_head_ = NULL; | 607 jsfunction_candidates_head_ = NULL; |
| 275 } | 608 } |
| 276 | 609 |
| 277 | 610 |
| 278 void ProcessSharedFunctionInfoCandidates() { | 611 void ProcessSharedFunctionInfoCandidates() { |
| 279 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); | 612 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); |
| 280 | 613 |
| 281 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; | 614 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
| 282 SharedFunctionInfo* next_candidate; | 615 SharedFunctionInfo* next_candidate; |
| 283 while (candidate != NULL) { | 616 while (candidate != NULL) { |
| 284 next_candidate = GetNextCandidate(candidate); | 617 next_candidate = GetNextCandidate(candidate); |
| 285 SetNextCandidate(candidate, NULL); | 618 SetNextCandidate(candidate, NULL); |
| 286 | 619 |
| 287 Code* code = candidate->unchecked_code(); | 620 Code* code = candidate->unchecked_code(); |
| 288 if (!code->IsMarked()) { | 621 MarkBit code_mark = Marking::MarkBitFrom(code); |
| 622 if (!code_mark.Get()) { |
| 289 candidate->set_code(lazy_compile); | 623 candidate->set_code(lazy_compile); |
| 290 } | 624 } |
| 291 | 625 |
| 292 candidate = next_candidate; | 626 candidate = next_candidate; |
| 293 } | 627 } |
| 294 | 628 |
| 295 shared_function_info_candidates_head_ = NULL; | 629 shared_function_info_candidates_head_ = NULL; |
| 296 } | 630 } |
| 297 | 631 |
| 298 static JSFunction** GetNextCandidateField(JSFunction* candidate) { | 632 static JSFunction** GetNextCandidateField(JSFunction* candidate) { |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 348 // | 682 // |
| 349 // Here we assume that if we change *p, we replace it with a heap object | 683 // Here we assume that if we change *p, we replace it with a heap object |
| 350 // (ie, the left substring of a cons string is always a heap object). | 684 // (ie, the left substring of a cons string is always a heap object). |
| 351 // | 685 // |
| 352 // The check performed is: | 686 // The check performed is: |
| 353 // object->IsConsString() && !object->IsSymbol() && | 687 // object->IsConsString() && !object->IsSymbol() && |
| 354 // (ConsString::cast(object)->second() == HEAP->empty_string()) | 688 // (ConsString::cast(object)->second() == HEAP->empty_string()) |
| 355 // except the maps for the object and its possible substrings might be | 689 // except the maps for the object and its possible substrings might be |
| 356 // marked. | 690 // marked. |
| 357 HeapObject* object = HeapObject::cast(*p); | 691 HeapObject* object = HeapObject::cast(*p); |
| 358 MapWord map_word = object->map_word(); | 692 Map* map = object->map(); |
| 359 map_word.ClearMark(); | 693 InstanceType type = map->instance_type(); |
| 360 InstanceType type = map_word.ToMap()->instance_type(); | |
| 361 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; | 694 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; |
| 362 | 695 |
| 363 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); | 696 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); |
| 364 Heap* heap = map_word.ToMap()->heap(); | 697 Heap* heap = map->GetHeap(); |
| 365 if (second != heap->raw_unchecked_empty_string()) { | 698 if (second != heap->empty_string()) { |
| 366 return object; | 699 return object; |
| 367 } | 700 } |
| 368 | 701 |
| 369 // Since we don't have the object's start, it is impossible to update the | 702 // Since we don't have the object's start, it is impossible to update the |
| 370 // page dirty marks. Therefore, we only replace the string with its left | 703 // page dirty marks. Therefore, we only replace the string with its left |
| 371 // substring when page dirty marks do not change. | 704 // substring when page dirty marks do not change. |
| 372 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); | 705 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); |
| 373 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; | 706 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; |
| 374 | 707 |
| 375 *p = first; | 708 *p = first; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 397 table_.Register(kVisitSlicedString, | 730 table_.Register(kVisitSlicedString, |
| 398 &FixedBodyVisitor<StaticMarkingVisitor, | 731 &FixedBodyVisitor<StaticMarkingVisitor, |
| 399 SlicedString::BodyDescriptor, | 732 SlicedString::BodyDescriptor, |
| 400 void>::Visit); | 733 void>::Visit); |
| 401 | 734 |
| 402 table_.Register(kVisitFixedArray, | 735 table_.Register(kVisitFixedArray, |
| 403 &FlexibleBodyVisitor<StaticMarkingVisitor, | 736 &FlexibleBodyVisitor<StaticMarkingVisitor, |
| 404 FixedArray::BodyDescriptor, | 737 FixedArray::BodyDescriptor, |
| 405 void>::Visit); | 738 void>::Visit); |
| 406 | 739 |
| 740 table_.Register(kVisitGlobalContext, &VisitGlobalContext); |
| 741 |
| 407 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); | 742 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); |
| 408 | 743 |
| 409 table_.Register(kVisitGlobalContext, | |
| 410 &FixedBodyVisitor<StaticMarkingVisitor, | |
| 411 Context::MarkCompactBodyDescriptor, | |
| 412 void>::Visit); | |
| 413 | |
| 414 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); | 744 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); |
| 745 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); |
| 415 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); | 746 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); |
| 416 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); | 747 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
| 417 | 748 |
| 418 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap); | 749 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap); |
| 419 | 750 |
| 420 table_.Register(kVisitOddball, | 751 table_.Register(kVisitOddball, |
| 421 &FixedBodyVisitor<StaticMarkingVisitor, | 752 &FixedBodyVisitor<StaticMarkingVisitor, |
| 422 Oddball::BodyDescriptor, | 753 Oddball::BodyDescriptor, |
| 423 void>::Visit); | 754 void>::Visit); |
| 424 table_.Register(kVisitMap, | 755 table_.Register(kVisitMap, |
| (...skipping 24 matching lines...) Expand all Loading... |
| 449 table_.RegisterSpecializations<JSObjectVisitor, | 780 table_.RegisterSpecializations<JSObjectVisitor, |
| 450 kVisitJSObject, | 781 kVisitJSObject, |
| 451 kVisitJSObjectGeneric>(); | 782 kVisitJSObjectGeneric>(); |
| 452 | 783 |
| 453 table_.RegisterSpecializations<StructObjectVisitor, | 784 table_.RegisterSpecializations<StructObjectVisitor, |
| 454 kVisitStruct, | 785 kVisitStruct, |
| 455 kVisitStructGeneric>(); | 786 kVisitStructGeneric>(); |
| 456 } | 787 } |
| 457 | 788 |
| 458 INLINE(static void VisitPointer(Heap* heap, Object** p)) { | 789 INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
| 459 MarkObjectByPointer(heap, p); | 790 MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
| 460 } | 791 } |
| 461 | 792 |
| 462 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { | 793 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
| 463 // Mark all objects pointed to in [start, end). | 794 // Mark all objects pointed to in [start, end). |
| 464 const int kMinRangeForMarkingRecursion = 64; | 795 const int kMinRangeForMarkingRecursion = 64; |
| 465 if (end - start >= kMinRangeForMarkingRecursion) { | 796 if (end - start >= kMinRangeForMarkingRecursion) { |
| 466 if (VisitUnmarkedObjects(heap, start, end)) return; | 797 if (VisitUnmarkedObjects(heap, start, end)) return; |
| 467 // We are close to a stack overflow, so just mark the objects. | 798 // We are close to a stack overflow, so just mark the objects. |
| 468 } | 799 } |
| 469 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p); | 800 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 470 } | 801 for (Object** p = start; p < end; p++) { |
| 471 | 802 MarkObjectByPointer(collector, start, p); |
| 472 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { | |
| 473 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); | |
| 474 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); | |
| 475 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) { | |
| 476 IC::Clear(rinfo->pc()); | |
| 477 // Please note targets for cleared inline cached do not have to be | |
| 478 // marked since they are contained in HEAP->non_monomorphic_cache(). | |
| 479 } else { | |
| 480 heap->mark_compact_collector()->MarkObject(code); | |
| 481 } | 803 } |
| 482 } | 804 } |
| 483 | 805 |
| 484 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { | 806 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { |
| 485 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); | 807 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
| 486 Object* cell = rinfo->target_cell(); | 808 JSGlobalPropertyCell* cell = |
| 487 Object* old_cell = cell; | 809 JSGlobalPropertyCell::cast(rinfo->target_cell()); |
| 488 VisitPointer(heap, &cell); | 810 MarkBit mark = Marking::MarkBitFrom(cell); |
| 489 if (cell != old_cell) { | 811 heap->mark_compact_collector()->MarkObject(cell, mark); |
| 490 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell)); | 812 } |
| 813 |
| 814 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { |
| 815 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 816 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 817 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) { |
| 818 IC::Clear(rinfo->pc()); |
| 819 // Please note targets for cleared inline cached do not have to be |
| 820 // marked since they are contained in HEAP->non_monomorphic_cache(). |
| 821 target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 822 } else { |
| 823 MarkBit code_mark = Marking::MarkBitFrom(target); |
| 824 heap->mark_compact_collector()->MarkObject(target, code_mark); |
| 491 } | 825 } |
| 826 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
| 492 } | 827 } |
| 493 | 828 |
| 494 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { | 829 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { |
| 495 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && | 830 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
| 496 rinfo->IsPatchedReturnSequence()) || | 831 rinfo->IsPatchedReturnSequence()) || |
| 497 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | 832 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| 498 rinfo->IsPatchedDebugBreakSlotSequence())); | 833 rinfo->IsPatchedDebugBreakSlotSequence())); |
| 499 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); | 834 Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
| 500 heap->mark_compact_collector()->MarkObject(code); | 835 MarkBit code_mark = Marking::MarkBitFrom(target); |
| 836 heap->mark_compact_collector()->MarkObject(target, code_mark); |
| 837 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
| 501 } | 838 } |
| 502 | 839 |
| 503 // Mark object pointed to by p. | 840 // Mark object pointed to by p. |
| 504 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) { | 841 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
| 842 Object** anchor_slot, |
| 843 Object** p)) { |
| 505 if (!(*p)->IsHeapObject()) return; | 844 if (!(*p)->IsHeapObject()) return; |
| 506 HeapObject* object = ShortCircuitConsString(p); | 845 HeapObject* object = ShortCircuitConsString(p); |
| 507 if (!object->IsMarked()) { | 846 collector->RecordSlot(anchor_slot, p, object); |
| 508 heap->mark_compact_collector()->MarkUnmarkedObject(object); | 847 MarkBit mark = Marking::MarkBitFrom(object); |
| 509 } | 848 collector->MarkObject(object, mark); |
| 510 } | 849 } |
| 511 | 850 |
| 512 | 851 |
| 513 // Visit an unmarked object. | 852 // Visit an unmarked object. |
| 514 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, | 853 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, |
| 515 HeapObject* obj)) { | 854 HeapObject* obj)) { |
| 516 #ifdef DEBUG | 855 #ifdef DEBUG |
| 517 ASSERT(Isolate::Current()->heap()->Contains(obj)); | 856 ASSERT(Isolate::Current()->heap()->Contains(obj)); |
| 518 ASSERT(!obj->IsMarked()); | 857 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); |
| 519 #endif | 858 #endif |
| 520 Map* map = obj->map(); | 859 Map* map = obj->map(); |
| 521 collector->SetMark(obj); | 860 Heap* heap = obj->GetHeap(); |
| 861 MarkBit mark = Marking::MarkBitFrom(obj); |
| 862 heap->mark_compact_collector()->SetMark(obj, mark); |
| 522 // Mark the map pointer and the body. | 863 // Mark the map pointer and the body. |
| 523 if (!map->IsMarked()) collector->MarkUnmarkedObject(map); | 864 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 865 heap->mark_compact_collector()->MarkObject(map, map_mark); |
| 524 IterateBody(map, obj); | 866 IterateBody(map, obj); |
| 525 } | 867 } |
| 526 | 868 |
| 527 // Visit all unmarked objects pointed to by [start, end). | 869 // Visit all unmarked objects pointed to by [start, end). |
| 528 // Returns false if the operation fails (lack of stack space). | 870 // Returns false if the operation fails (lack of stack space). |
| 529 static inline bool VisitUnmarkedObjects(Heap* heap, | 871 static inline bool VisitUnmarkedObjects(Heap* heap, |
| 530 Object** start, | 872 Object** start, |
| 531 Object** end) { | 873 Object** end) { |
| 532 // Return false is we are close to the stack limit. | 874 // Return false is we are close to the stack limit. |
| 533 StackLimitCheck check(heap->isolate()); | 875 StackLimitCheck check(heap->isolate()); |
| 534 if (check.HasOverflowed()) return false; | 876 if (check.HasOverflowed()) return false; |
| 535 | 877 |
| 536 MarkCompactCollector* collector = heap->mark_compact_collector(); | 878 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 537 // Visit the unmarked objects. | 879 // Visit the unmarked objects. |
| 538 for (Object** p = start; p < end; p++) { | 880 for (Object** p = start; p < end; p++) { |
| 539 if (!(*p)->IsHeapObject()) continue; | 881 Object* o = *p; |
| 540 HeapObject* obj = HeapObject::cast(*p); | 882 if (!o->IsHeapObject()) continue; |
| 541 if (obj->IsMarked()) continue; | 883 collector->RecordSlot(start, p, o); |
| 884 HeapObject* obj = HeapObject::cast(o); |
| 885 MarkBit mark = Marking::MarkBitFrom(obj); |
| 886 if (mark.Get()) continue; |
| 542 VisitUnmarkedObject(collector, obj); | 887 VisitUnmarkedObject(collector, obj); |
| 543 } | 888 } |
| 544 return true; | 889 return true; |
| 545 } | 890 } |
| 546 | 891 |
| 547 static inline void VisitExternalReference(Address* p) { } | 892 static inline void VisitExternalReference(Address* p) { } |
| 548 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } | 893 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } |
| 549 | 894 |
| 550 private: | 895 private: |
| 551 class DataObjectVisitor { | 896 class DataObjectVisitor { |
| 552 public: | 897 public: |
| 553 template<int size> | 898 template<int size> |
| 554 static void VisitSpecialized(Map* map, HeapObject* object) { | 899 static void VisitSpecialized(Map* map, HeapObject* object) { |
| 555 } | 900 } |
| 556 | 901 |
| 557 static void Visit(Map* map, HeapObject* object) { | 902 static void Visit(Map* map, HeapObject* object) { |
| 558 } | 903 } |
| 559 }; | 904 }; |
| 560 | 905 |
| 561 typedef FlexibleBodyVisitor<StaticMarkingVisitor, | 906 typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
| 562 JSObject::BodyDescriptor, | 907 JSObject::BodyDescriptor, |
| 563 void> JSObjectVisitor; | 908 void> JSObjectVisitor; |
| 564 | 909 |
| 565 typedef FlexibleBodyVisitor<StaticMarkingVisitor, | 910 typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
| 566 StructBodyDescriptor, | 911 StructBodyDescriptor, |
| 567 void> StructObjectVisitor; | 912 void> StructObjectVisitor; |
| 568 | 913 |
| 569 static void VisitJSWeakMap(Map* map, HeapObject* object) { | 914 static void VisitJSWeakMap(Map* map, HeapObject* object) { |
| 570 MarkCompactCollector* collector = map->heap()->mark_compact_collector(); | 915 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
| 571 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); | 916 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); |
| 572 | 917 |
| 573 // Enqueue weak map in linked list of encountered weak maps. | 918 // Enqueue weak map in linked list of encountered weak maps. |
| 574 ASSERT(weak_map->next() == Smi::FromInt(0)); | 919 ASSERT(weak_map->next() == Smi::FromInt(0)); |
| 575 weak_map->set_next(collector->encountered_weak_maps()); | 920 weak_map->set_next(collector->encountered_weak_maps()); |
| 576 collector->set_encountered_weak_maps(weak_map); | 921 collector->set_encountered_weak_maps(weak_map); |
| 577 | 922 |
| 578 // Skip visiting the backing hash table containing the mappings. | 923 // Skip visiting the backing hash table containing the mappings. |
| 579 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); | 924 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); |
| 580 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( | 925 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
| 581 map->heap(), | 926 map->GetHeap(), |
| 582 object, | 927 object, |
| 583 JSWeakMap::BodyDescriptor::kStartOffset, | 928 JSWeakMap::BodyDescriptor::kStartOffset, |
| 584 JSWeakMap::kTableOffset); | 929 JSWeakMap::kTableOffset); |
| 585 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( | 930 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
| 586 map->heap(), | 931 map->GetHeap(), |
| 587 object, | 932 object, |
| 588 JSWeakMap::kTableOffset + kPointerSize, | 933 JSWeakMap::kTableOffset + kPointerSize, |
| 589 object_size); | 934 object_size); |
| 590 | 935 |
| 591 // Mark the backing hash table without pushing it on the marking stack. | 936 // Mark the backing hash table without pushing it on the marking stack. |
| 592 ASSERT(!weak_map->unchecked_table()->IsMarked()); | 937 ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table())); |
| 593 ASSERT(weak_map->unchecked_table()->map()->IsMarked()); | 938 ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map())); |
| 594 collector->SetMark(weak_map->unchecked_table()); | 939 |
| 940 HeapObject* unchecked_table = weak_map->unchecked_table(); |
| 941 MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table); |
| 942 collector->SetMark(unchecked_table, mark_bit); |
| 595 } | 943 } |
| 596 | 944 |
| 597 static void VisitCode(Map* map, HeapObject* object) { | 945 static void VisitCode(Map* map, HeapObject* object) { |
| 598 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>( | 946 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>( |
| 599 map->heap()); | 947 map->GetHeap()); |
| 600 } | 948 } |
| 601 | 949 |
| 602 // Code flushing support. | 950 // Code flushing support. |
| 603 | 951 |
| 604 // How many collections newly compiled code object will survive before being | 952 // How many collections newly compiled code object will survive before being |
| 605 // flushed. | 953 // flushed. |
| 606 static const int kCodeAgeThreshold = 5; | 954 static const int kCodeAgeThreshold = 5; |
| 607 | 955 |
| 608 static const int kRegExpCodeThreshold = 5; | 956 static const int kRegExpCodeThreshold = 5; |
| 609 | 957 |
| 610 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { | 958 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { |
| 611 Object* undefined = heap->raw_unchecked_undefined_value(); | 959 Object* undefined = heap->undefined_value(); |
| 612 return (info->script() != undefined) && | 960 return (info->script() != undefined) && |
| 613 (reinterpret_cast<Script*>(info->script())->source() != undefined); | 961 (reinterpret_cast<Script*>(info->script())->source() != undefined); |
| 614 } | 962 } |
| 615 | 963 |
| 616 | 964 |
| 617 inline static bool IsCompiled(JSFunction* function) { | 965 inline static bool IsCompiled(JSFunction* function) { |
| 618 return function->unchecked_code() != | 966 return function->unchecked_code() != |
| 619 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); | 967 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); |
| 620 } | 968 } |
| 621 | 969 |
| 622 inline static bool IsCompiled(SharedFunctionInfo* function) { | 970 inline static bool IsCompiled(SharedFunctionInfo* function) { |
| 623 return function->unchecked_code() != | 971 return function->unchecked_code() != |
| 624 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); | 972 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); |
| 625 } | 973 } |
| 626 | 974 |
| 627 inline static bool IsFlushable(Heap* heap, JSFunction* function) { | 975 inline static bool IsFlushable(Heap* heap, JSFunction* function) { |
| 628 SharedFunctionInfo* shared_info = function->unchecked_shared(); | 976 SharedFunctionInfo* shared_info = function->unchecked_shared(); |
| 629 | 977 |
| 630 // Code is either on stack, in compilation cache or referenced | 978 // Code is either on stack, in compilation cache or referenced |
| 631 // by optimized version of function. | 979 // by optimized version of function. |
| 632 if (function->unchecked_code()->IsMarked()) { | 980 MarkBit code_mark = |
| 981 Marking::MarkBitFrom(function->unchecked_code()); |
| 982 if (code_mark.Get()) { |
| 633 shared_info->set_code_age(0); | 983 shared_info->set_code_age(0); |
| 634 return false; | 984 return false; |
| 635 } | 985 } |
| 636 | 986 |
| 637 // We do not flush code for optimized functions. | 987 // We do not flush code for optimized functions. |
| 638 if (function->code() != shared_info->unchecked_code()) { | 988 if (function->code() != shared_info->unchecked_code()) { |
| 639 return false; | 989 return false; |
| 640 } | 990 } |
| 641 | 991 |
| 642 return IsFlushable(heap, shared_info); | 992 return IsFlushable(heap, shared_info); |
| 643 } | 993 } |
| 644 | 994 |
| 645 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { | 995 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { |
| 646 // Code is either on stack, in compilation cache or referenced | 996 // Code is either on stack, in compilation cache or referenced |
| 647 // by optimized version of function. | 997 // by optimized version of function. |
| 648 if (shared_info->unchecked_code()->IsMarked()) { | 998 MarkBit code_mark = |
| 999 Marking::MarkBitFrom(shared_info->unchecked_code()); |
| 1000 if (code_mark.Get()) { |
| 649 shared_info->set_code_age(0); | 1001 shared_info->set_code_age(0); |
| 650 return false; | 1002 return false; |
| 651 } | 1003 } |
| 652 | 1004 |
| 653 // The function must be compiled and have the source code available, | 1005 // The function must be compiled and have the source code available, |
| 654 // to be able to recompile it in case we need the function again. | 1006 // to be able to recompile it in case we need the function again. |
| 655 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { | 1007 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { |
| 656 return false; | 1008 return false; |
| 657 } | 1009 } |
| 658 | 1010 |
| 659 // We never flush code for Api functions. | 1011 // We never flush code for Api functions. |
| 660 Object* function_data = shared_info->function_data(); | 1012 Object* function_data = shared_info->function_data(); |
| 661 if (function_data->IsHeapObject() && | 1013 if (function_data->IsFunctionTemplateInfo()) return false; |
| 662 (SafeMap(function_data)->instance_type() == | |
| 663 FUNCTION_TEMPLATE_INFO_TYPE)) { | |
| 664 return false; | |
| 665 } | |
| 666 | 1014 |
| 667 // Only flush code for functions. | 1015 // Only flush code for functions. |
| 668 if (shared_info->code()->kind() != Code::FUNCTION) return false; | 1016 if (shared_info->code()->kind() != Code::FUNCTION) return false; |
| 669 | 1017 |
| 670 // Function must be lazy compilable. | 1018 // Function must be lazy compilable. |
| 671 if (!shared_info->allows_lazy_compilation()) return false; | 1019 if (!shared_info->allows_lazy_compilation()) return false; |
| 672 | 1020 |
| 673 // If this is a full script wrapped in a function we do no flush the code. | 1021 // If this is a full script wrapped in a function we do no flush the code. |
| 674 if (shared_info->is_toplevel()) return false; | 1022 if (shared_info->is_toplevel()) return false; |
| 675 | 1023 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 688 | 1036 |
| 689 // This function's code looks flushable. But we have to postpone the | 1037 // This function's code looks flushable. But we have to postpone the |
| 690 // decision until we see all functions that point to the same | 1038 // decision until we see all functions that point to the same |
| 691 // SharedFunctionInfo because some of them might be optimized. | 1039 // SharedFunctionInfo because some of them might be optimized. |
| 692 // That would make the nonoptimized version of the code nonflushable, | 1040 // That would make the nonoptimized version of the code nonflushable, |
| 693 // because it is required for bailing out from optimized code. | 1041 // because it is required for bailing out from optimized code. |
| 694 heap->mark_compact_collector()->code_flusher()->AddCandidate(function); | 1042 heap->mark_compact_collector()->code_flusher()->AddCandidate(function); |
| 695 return true; | 1043 return true; |
| 696 } | 1044 } |
| 697 | 1045 |
| 698 | 1046 static inline bool IsValidNotBuiltinContext(Object* ctx) { |
| 699 static inline Map* SafeMap(Object* obj) { | 1047 return ctx->IsContext() && |
| 700 MapWord map_word = HeapObject::cast(obj)->map_word(); | 1048 !Context::cast(ctx)->global()->IsJSBuiltinsObject(); |
| 701 map_word.ClearMark(); | |
| 702 map_word.ClearOverflow(); | |
| 703 return map_word.ToMap(); | |
| 704 } | 1049 } |
| 705 | 1050 |
| 706 | 1051 |
| 707 static inline bool IsJSBuiltinsObject(Object* obj) { | |
| 708 return obj->IsHeapObject() && | |
| 709 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE); | |
| 710 } | |
| 711 | |
| 712 | |
| 713 static inline bool IsValidNotBuiltinContext(Object* ctx) { | |
| 714 if (!ctx->IsHeapObject()) return false; | |
| 715 | |
| 716 Map* map = SafeMap(ctx); | |
| 717 Heap* heap = map->heap(); | |
| 718 if (!(map == heap->raw_unchecked_function_context_map() || | |
| 719 map == heap->raw_unchecked_catch_context_map() || | |
| 720 map == heap->raw_unchecked_with_context_map() || | |
| 721 map == heap->raw_unchecked_global_context_map())) { | |
| 722 return false; | |
| 723 } | |
| 724 | |
| 725 Context* context = reinterpret_cast<Context*>(ctx); | |
| 726 | |
| 727 if (IsJSBuiltinsObject(context->global())) { | |
| 728 return false; | |
| 729 } | |
| 730 | |
| 731 return true; | |
| 732 } | |
| 733 | |
| 734 | |
| 735 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { | 1052 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { |
| 736 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); | 1053 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
| 737 | 1054 |
| 738 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); | 1055 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); |
| 739 | 1056 |
| 740 FixedBodyVisitor<StaticMarkingVisitor, | 1057 FixedBodyVisitor<StaticMarkingVisitor, |
| 741 SharedFunctionInfo::BodyDescriptor, | 1058 SharedFunctionInfo::BodyDescriptor, |
| 742 void>::Visit(map, object); | 1059 void>::Visit(map, object); |
| 743 } | 1060 } |
| 744 | 1061 |
| 745 | 1062 |
| 746 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, | 1063 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, |
| 747 JSRegExp* re, | 1064 JSRegExp* re, |
| 748 bool is_ascii) { | 1065 bool is_ascii) { |
| 749 // Make sure that the fixed array is in fact initialized on the RegExp. | 1066 // Make sure that the fixed array is in fact initialized on the RegExp. |
| 750 // We could potentially trigger a GC when initializing the RegExp. | 1067 // We could potentially trigger a GC when initializing the RegExp. |
| 751 if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return; | 1068 if (HeapObject::cast(re->data())->map()->instance_type() != |
| 1069 FIXED_ARRAY_TYPE) return; |
| 752 | 1070 |
| 753 // Make sure this is a RegExp that actually contains code. | 1071 // Make sure this is a RegExp that actually contains code. |
| 754 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; | 1072 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; |
| 755 | 1073 |
| 756 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); | 1074 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); |
| 757 if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) { | 1075 if (!code->IsSmi() && |
| 1076 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { |
| 758 // Save a copy that can be reinstated if we need the code again. | 1077 // Save a copy that can be reinstated if we need the code again. |
| 759 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), | 1078 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), |
| 760 code, | 1079 code, |
| 761 heap); | 1080 heap); |
| 762 // Set a number in the 0-255 range to guarantee no smi overflow. | 1081 // Set a number in the 0-255 range to guarantee no smi overflow. |
| 763 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii), | 1082 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii), |
| 764 Smi::FromInt(heap->sweep_generation() & 0xff), | 1083 Smi::FromInt(heap->sweep_generation() & 0xff), |
| 765 heap); | 1084 heap); |
| 766 } else if (code->IsSmi()) { | 1085 } else if (code->IsSmi()) { |
| 767 int value = Smi::cast(code)->value(); | 1086 int value = Smi::cast(code)->value(); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 783 } | 1102 } |
| 784 } | 1103 } |
| 785 | 1104 |
| 786 | 1105 |
| 787 // Works by setting the current sweep_generation (as a smi) in the | 1106 // Works by setting the current sweep_generation (as a smi) in the |
| 788 // code object place in the data array of the RegExp and keeps a copy | 1107 // code object place in the data array of the RegExp and keeps a copy |
| 789 // around that can be reinstated if we reuse the RegExp before flushing. | 1108 // around that can be reinstated if we reuse the RegExp before flushing. |
| 790 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs | 1109 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs |
| 791 // we flush the code. | 1110 // we flush the code. |
| 792 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { | 1111 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { |
| 793 Heap* heap = map->heap(); | 1112 Heap* heap = map->GetHeap(); |
| 794 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1113 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 795 if (!collector->is_code_flushing_enabled()) { | 1114 if (!collector->is_code_flushing_enabled()) { |
| 796 VisitJSRegExpFields(map, object); | 1115 VisitJSRegExpFields(map, object); |
| 797 return; | 1116 return; |
| 798 } | 1117 } |
| 799 JSRegExp* re = reinterpret_cast<JSRegExp*>(object); | 1118 JSRegExp* re = reinterpret_cast<JSRegExp*>(object); |
| 800 // Flush code or set age on both ascii and two byte code. | 1119 // Flush code or set age on both ascii and two byte code. |
| 801 UpdateRegExpCodeAgeAndFlush(heap, re, true); | 1120 UpdateRegExpCodeAgeAndFlush(heap, re, true); |
| 802 UpdateRegExpCodeAgeAndFlush(heap, re, false); | 1121 UpdateRegExpCodeAgeAndFlush(heap, re, false); |
| 803 // Visit the fields of the RegExp, including the updated FixedArray. | 1122 // Visit the fields of the RegExp, including the updated FixedArray. |
| 804 VisitJSRegExpFields(map, object); | 1123 VisitJSRegExpFields(map, object); |
| 805 } | 1124 } |
| 806 | 1125 |
| 807 | 1126 |
| 808 static void VisitSharedFunctionInfoAndFlushCode(Map* map, | 1127 static void VisitSharedFunctionInfoAndFlushCode(Map* map, |
| 809 HeapObject* object) { | 1128 HeapObject* object) { |
| 810 MarkCompactCollector* collector = map->heap()->mark_compact_collector(); | 1129 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
| 811 if (!collector->is_code_flushing_enabled()) { | 1130 if (!collector->is_code_flushing_enabled()) { |
| 812 VisitSharedFunctionInfoGeneric(map, object); | 1131 VisitSharedFunctionInfoGeneric(map, object); |
| 813 return; | 1132 return; |
| 814 } | 1133 } |
| 815 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false); | 1134 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false); |
| 816 } | 1135 } |
| 817 | 1136 |
| 818 | 1137 |
| 819 static void VisitSharedFunctionInfoAndFlushCodeGeneric( | 1138 static void VisitSharedFunctionInfoAndFlushCodeGeneric( |
| 820 Map* map, HeapObject* object, bool known_flush_code_candidate) { | 1139 Map* map, HeapObject* object, bool known_flush_code_candidate) { |
| 821 Heap* heap = map->heap(); | 1140 Heap* heap = map->GetHeap(); |
| 822 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); | 1141 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
| 823 | 1142 |
| 824 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); | 1143 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); |
| 825 | 1144 |
| 826 if (!known_flush_code_candidate) { | 1145 if (!known_flush_code_candidate) { |
| 827 known_flush_code_candidate = IsFlushable(heap, shared); | 1146 known_flush_code_candidate = IsFlushable(heap, shared); |
| 828 if (known_flush_code_candidate) { | 1147 if (known_flush_code_candidate) { |
| 829 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); | 1148 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); |
| 830 } | 1149 } |
| 831 } | 1150 } |
| 832 | 1151 |
| 833 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); | 1152 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); |
| 834 } | 1153 } |
| 835 | 1154 |
| 836 | 1155 |
| 837 static void VisitCodeEntry(Heap* heap, Address entry_address) { | 1156 static void VisitCodeEntry(Heap* heap, Address entry_address) { |
| 838 Object* code = Code::GetObjectFromEntryAddress(entry_address); | 1157 Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); |
| 839 Object* old_code = code; | 1158 MarkBit mark = Marking::MarkBitFrom(code); |
| 840 VisitPointer(heap, &code); | 1159 heap->mark_compact_collector()->MarkObject(code, mark); |
| 841 if (code != old_code) { | 1160 heap->mark_compact_collector()-> |
| 842 Memory::Address_at(entry_address) = | 1161 RecordCodeEntrySlot(entry_address, code); |
| 843 reinterpret_cast<Code*>(code)->entry(); | 1162 } |
| 1163 |
| 1164 static void VisitGlobalContext(Map* map, HeapObject* object) { |
| 1165 FixedBodyVisitor<StaticMarkingVisitor, |
| 1166 Context::MarkCompactBodyDescriptor, |
| 1167 void>::Visit(map, object); |
| 1168 |
| 1169 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
| 1170 for (int idx = Context::FIRST_WEAK_SLOT; |
| 1171 idx < Context::GLOBAL_CONTEXT_SLOTS; |
| 1172 ++idx) { |
| 1173 Object** slot = |
| 1174 HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx)); |
| 1175 collector->RecordSlot(slot, slot, *slot); |
| 844 } | 1176 } |
| 845 } | 1177 } |
| 846 | 1178 |
| 847 | |
| 848 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { | 1179 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { |
| 849 Heap* heap = map->heap(); | 1180 Heap* heap = map->GetHeap(); |
| 850 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1181 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 851 if (!collector->is_code_flushing_enabled()) { | 1182 if (!collector->is_code_flushing_enabled()) { |
| 852 VisitJSFunction(map, object); | 1183 VisitJSFunction(map, object); |
| 853 return; | 1184 return; |
| 854 } | 1185 } |
| 855 | 1186 |
| 856 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); | 1187 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); |
| 857 // The function must have a valid context and not be a builtin. | 1188 // The function must have a valid context and not be a builtin. |
| 858 bool flush_code_candidate = false; | 1189 bool flush_code_candidate = false; |
| 859 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { | 1190 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { |
| 860 flush_code_candidate = FlushCodeForFunction(heap, jsfunction); | 1191 flush_code_candidate = FlushCodeForFunction(heap, jsfunction); |
| 861 } | 1192 } |
| 862 | 1193 |
| 863 if (!flush_code_candidate) { | 1194 if (!flush_code_candidate) { |
| 864 collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code()); | 1195 Code* code = jsfunction->unchecked_shared()->unchecked_code(); |
| 1196 MarkBit code_mark = Marking::MarkBitFrom(code); |
| 1197 heap->mark_compact_collector()->MarkObject(code, code_mark); |
| 865 | 1198 |
| 866 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { | 1199 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { |
| 867 // For optimized functions we should retain both non-optimized version | 1200 // For optimized functions we should retain both non-optimized version |
| 868 // of it's code and non-optimized version of all inlined functions. | 1201 // of it's code and non-optimized version of all inlined functions. |
| 869 // This is required to support bailing out from inlined code. | 1202 // This is required to support bailing out from inlined code. |
| 870 DeoptimizationInputData* data = | 1203 DeoptimizationInputData* data = |
| 871 reinterpret_cast<DeoptimizationInputData*>( | 1204 reinterpret_cast<DeoptimizationInputData*>( |
| 872 jsfunction->unchecked_code()->unchecked_deoptimization_data()); | 1205 jsfunction->unchecked_code()->unchecked_deoptimization_data()); |
| 873 | 1206 |
| 874 FixedArray* literals = data->UncheckedLiteralArray(); | 1207 FixedArray* literals = data->UncheckedLiteralArray(); |
| 875 | 1208 |
| 876 for (int i = 0, count = data->InlinedFunctionCount()->value(); | 1209 for (int i = 0, count = data->InlinedFunctionCount()->value(); |
| 877 i < count; | 1210 i < count; |
| 878 i++) { | 1211 i++) { |
| 879 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); | 1212 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); |
| 880 collector->MarkObject(inlined->unchecked_shared()->unchecked_code()); | 1213 Code* inlined_code = inlined->unchecked_shared()->unchecked_code(); |
| 1214 MarkBit inlined_code_mark = |
| 1215 Marking::MarkBitFrom(inlined_code); |
| 1216 heap->mark_compact_collector()->MarkObject( |
| 1217 inlined_code, inlined_code_mark); |
| 881 } | 1218 } |
| 882 } | 1219 } |
| 883 } | 1220 } |
| 884 | 1221 |
| 885 VisitJSFunctionFields(map, | 1222 VisitJSFunctionFields(map, |
| 886 reinterpret_cast<JSFunction*>(object), | 1223 reinterpret_cast<JSFunction*>(object), |
| 887 flush_code_candidate); | 1224 flush_code_candidate); |
| 888 } | 1225 } |
| 889 | 1226 |
| 890 | 1227 |
| 891 static void VisitJSFunction(Map* map, HeapObject* object) { | 1228 static void VisitJSFunction(Map* map, HeapObject* object) { |
| 892 VisitJSFunctionFields(map, | 1229 VisitJSFunctionFields(map, |
| 893 reinterpret_cast<JSFunction*>(object), | 1230 reinterpret_cast<JSFunction*>(object), |
| 894 false); | 1231 false); |
| 895 } | 1232 } |
| 896 | 1233 |
| 897 | 1234 |
| 898 #define SLOT_ADDR(obj, offset) \ | 1235 #define SLOT_ADDR(obj, offset) \ |
| 899 reinterpret_cast<Object**>((obj)->address() + offset) | 1236 reinterpret_cast<Object**>((obj)->address() + offset) |
| 900 | 1237 |
| 901 | 1238 |
| 902 static inline void VisitJSFunctionFields(Map* map, | 1239 static inline void VisitJSFunctionFields(Map* map, |
| 903 JSFunction* object, | 1240 JSFunction* object, |
| 904 bool flush_code_candidate) { | 1241 bool flush_code_candidate) { |
| 905 Heap* heap = map->heap(); | 1242 Heap* heap = map->GetHeap(); |
| 906 MarkCompactCollector* collector = heap->mark_compact_collector(); | |
| 907 | 1243 |
| 908 VisitPointers(heap, | 1244 VisitPointers(heap, |
| 909 SLOT_ADDR(object, JSFunction::kPropertiesOffset), | 1245 HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
| 910 SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); | 1246 HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
| 911 | 1247 |
| 912 if (!flush_code_candidate) { | 1248 if (!flush_code_candidate) { |
| 913 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); | 1249 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
| 914 } else { | 1250 } else { |
| 915 // Don't visit code object. | 1251 // Don't visit code object. |
| 916 | 1252 |
| 917 // Visit shared function info to avoid double checking of it's | 1253 // Visit shared function info to avoid double checking of it's |
| 918 // flushability. | 1254 // flushability. |
| 919 SharedFunctionInfo* shared_info = object->unchecked_shared(); | 1255 SharedFunctionInfo* shared_info = object->unchecked_shared(); |
| 920 if (!shared_info->IsMarked()) { | 1256 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); |
| 1257 if (!shared_info_mark.Get()) { |
| 921 Map* shared_info_map = shared_info->map(); | 1258 Map* shared_info_map = shared_info->map(); |
| 922 collector->SetMark(shared_info); | 1259 MarkBit shared_info_map_mark = |
| 923 collector->MarkObject(shared_info_map); | 1260 Marking::MarkBitFrom(shared_info_map); |
| 1261 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); |
| 1262 heap->mark_compact_collector()->MarkObject(shared_info_map, |
| 1263 shared_info_map_mark); |
| 924 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, | 1264 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, |
| 925 shared_info, | 1265 shared_info, |
| 926 true); | 1266 true); |
| 927 } | 1267 } |
| 928 } | 1268 } |
| 929 | 1269 |
| 930 VisitPointers(heap, | 1270 VisitPointers( |
| 931 SLOT_ADDR(object, | 1271 heap, |
| 932 JSFunction::kCodeEntryOffset + kPointerSize), | 1272 HeapObject::RawField(object, |
| 933 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset)); | 1273 JSFunction::kCodeEntryOffset + kPointerSize), |
| 1274 HeapObject::RawField(object, |
| 1275 JSFunction::kNonWeakFieldsEndOffset)); |
| 934 | 1276 |
| 935 // Don't visit the next function list field as it is a weak reference. | 1277 // Don't visit the next function list field as it is a weak reference. |
| 1278 Object** next_function = |
| 1279 HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset); |
| 1280 heap->mark_compact_collector()->RecordSlot( |
| 1281 next_function, next_function, *next_function); |
| 936 } | 1282 } |
| 937 | 1283 |
| 938 static inline void VisitJSRegExpFields(Map* map, | 1284 static inline void VisitJSRegExpFields(Map* map, |
| 939 HeapObject* object) { | 1285 HeapObject* object) { |
| 940 int last_property_offset = | 1286 int last_property_offset = |
| 941 JSRegExp::kSize + kPointerSize * map->inobject_properties(); | 1287 JSRegExp::kSize + kPointerSize * map->inobject_properties(); |
| 942 VisitPointers(map->heap(), | 1288 VisitPointers(map->GetHeap(), |
| 943 SLOT_ADDR(object, JSRegExp::kPropertiesOffset), | 1289 SLOT_ADDR(object, JSRegExp::kPropertiesOffset), |
| 944 SLOT_ADDR(object, last_property_offset)); | 1290 SLOT_ADDR(object, last_property_offset)); |
| 945 } | 1291 } |
| 946 | 1292 |
| 947 | 1293 |
| 948 static void VisitSharedFunctionInfoFields(Heap* heap, | 1294 static void VisitSharedFunctionInfoFields(Heap* heap, |
| 949 HeapObject* object, | 1295 HeapObject* object, |
| 950 bool flush_code_candidate) { | 1296 bool flush_code_candidate) { |
| 951 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset)); | 1297 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset)); |
| 952 | 1298 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 988 }; | 1334 }; |
| 989 | 1335 |
| 990 | 1336 |
| 991 class CodeMarkingVisitor : public ThreadVisitor { | 1337 class CodeMarkingVisitor : public ThreadVisitor { |
| 992 public: | 1338 public: |
| 993 explicit CodeMarkingVisitor(MarkCompactCollector* collector) | 1339 explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
| 994 : collector_(collector) {} | 1340 : collector_(collector) {} |
| 995 | 1341 |
| 996 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { | 1342 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
| 997 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { | 1343 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
| 998 collector_->MarkObject(it.frame()->unchecked_code()); | 1344 Code* code = it.frame()->unchecked_code(); |
| 1345 MarkBit code_bit = Marking::MarkBitFrom(code); |
| 1346 collector_->MarkObject(it.frame()->unchecked_code(), code_bit); |
| 999 } | 1347 } |
| 1000 } | 1348 } |
| 1001 | 1349 |
| 1002 private: | 1350 private: |
| 1003 MarkCompactCollector* collector_; | 1351 MarkCompactCollector* collector_; |
| 1004 }; | 1352 }; |
| 1005 | 1353 |
| 1006 | 1354 |
| 1007 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { | 1355 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
| 1008 public: | 1356 public: |
| 1009 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) | 1357 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
| 1010 : collector_(collector) {} | 1358 : collector_(collector) {} |
| 1011 | 1359 |
| 1012 void VisitPointers(Object** start, Object** end) { | 1360 void VisitPointers(Object** start, Object** end) { |
| 1013 for (Object** p = start; p < end; p++) VisitPointer(p); | 1361 for (Object** p = start; p < end; p++) VisitPointer(p); |
| 1014 } | 1362 } |
| 1015 | 1363 |
| 1016 void VisitPointer(Object** slot) { | 1364 void VisitPointer(Object** slot) { |
| 1017 Object* obj = *slot; | 1365 Object* obj = *slot; |
| 1018 if (obj->IsSharedFunctionInfo()) { | 1366 if (obj->IsSharedFunctionInfo()) { |
| 1019 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); | 1367 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
| 1020 collector_->MarkObject(shared->unchecked_code()); | 1368 MarkBit shared_mark = Marking::MarkBitFrom(shared); |
| 1021 collector_->MarkObject(shared); | 1369 MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code()); |
| 1370 collector_->MarkObject(shared->unchecked_code(), code_mark); |
| 1371 collector_->MarkObject(shared, shared_mark); |
| 1022 } | 1372 } |
| 1023 } | 1373 } |
| 1024 | 1374 |
| 1025 private: | 1375 private: |
| 1026 MarkCompactCollector* collector_; | 1376 MarkCompactCollector* collector_; |
| 1027 }; | 1377 }; |
| 1028 | 1378 |
| 1029 | 1379 |
| 1030 void MarkCompactCollector::PrepareForCodeFlushing() { | 1380 void MarkCompactCollector::PrepareForCodeFlushing() { |
| 1031 ASSERT(heap() == Isolate::Current()->heap()); | 1381 ASSERT(heap() == Isolate::Current()->heap()); |
| 1032 | 1382 |
| 1033 if (!FLAG_flush_code) { | 1383 if (!FLAG_flush_code) { |
| 1034 EnableCodeFlushing(false); | 1384 EnableCodeFlushing(false); |
| 1035 return; | 1385 return; |
| 1036 } | 1386 } |
| 1037 | 1387 |
| 1038 #ifdef ENABLE_DEBUGGER_SUPPORT | 1388 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 1039 if (heap()->isolate()->debug()->IsLoaded() || | 1389 if (heap()->isolate()->debug()->IsLoaded() || |
| 1040 heap()->isolate()->debug()->has_break_points()) { | 1390 heap()->isolate()->debug()->has_break_points()) { |
| 1041 EnableCodeFlushing(false); | 1391 EnableCodeFlushing(false); |
| 1042 return; | 1392 return; |
| 1043 } | 1393 } |
| 1044 #endif | 1394 #endif |
| 1045 EnableCodeFlushing(true); | 1395 EnableCodeFlushing(true); |
| 1046 | 1396 |
| 1047 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray | 1397 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
| 1048 // relies on it being marked before any other descriptor array. | 1398 // relies on it being marked before any other descriptor array. |
| 1049 MarkObject(heap()->raw_unchecked_empty_descriptor_array()); | 1399 HeapObject* descriptor_array = heap()->empty_descriptor_array(); |
| 1400 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); |
| 1401 MarkObject(descriptor_array, descriptor_array_mark); |
| 1050 | 1402 |
| 1051 // Make sure we are not referencing the code from the stack. | 1403 // Make sure we are not referencing the code from the stack. |
| 1052 ASSERT(this == heap()->mark_compact_collector()); | 1404 ASSERT(this == heap()->mark_compact_collector()); |
| 1053 for (StackFrameIterator it; !it.done(); it.Advance()) { | 1405 for (StackFrameIterator it; !it.done(); it.Advance()) { |
| 1054 MarkObject(it.frame()->unchecked_code()); | 1406 Code* code = it.frame()->unchecked_code(); |
| 1407 MarkBit code_mark = Marking::MarkBitFrom(code); |
| 1408 MarkObject(code, code_mark); |
| 1055 } | 1409 } |
| 1056 | 1410 |
| 1057 // Iterate the archived stacks in all threads to check if | 1411 // Iterate the archived stacks in all threads to check if |
| 1058 // the code is referenced. | 1412 // the code is referenced. |
| 1059 CodeMarkingVisitor code_marking_visitor(this); | 1413 CodeMarkingVisitor code_marking_visitor(this); |
| 1060 heap()->isolate()->thread_manager()->IterateArchivedThreads( | 1414 heap()->isolate()->thread_manager()->IterateArchivedThreads( |
| 1061 &code_marking_visitor); | 1415 &code_marking_visitor); |
| 1062 | 1416 |
| 1063 SharedFunctionInfoMarkingVisitor visitor(this); | 1417 SharedFunctionInfoMarkingVisitor visitor(this); |
| 1064 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); | 1418 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); |
| 1065 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); | 1419 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); |
| 1066 | 1420 |
| 1067 ProcessMarkingStack(); | 1421 ProcessMarkingDeque(); |
| 1068 } | 1422 } |
| 1069 | 1423 |
| 1070 | 1424 |
| 1071 // Visitor class for marking heap roots. | 1425 // Visitor class for marking heap roots. |
| 1072 class RootMarkingVisitor : public ObjectVisitor { | 1426 class RootMarkingVisitor : public ObjectVisitor { |
| 1073 public: | 1427 public: |
| 1074 explicit RootMarkingVisitor(Heap* heap) | 1428 explicit RootMarkingVisitor(Heap* heap) |
| 1075 : collector_(heap->mark_compact_collector()) { } | 1429 : collector_(heap->mark_compact_collector()) { } |
| 1076 | 1430 |
| 1077 void VisitPointer(Object** p) { | 1431 void VisitPointer(Object** p) { |
| 1078 MarkObjectByPointer(p); | 1432 MarkObjectByPointer(p); |
| 1079 } | 1433 } |
| 1080 | 1434 |
| 1081 void VisitPointers(Object** start, Object** end) { | 1435 void VisitPointers(Object** start, Object** end) { |
| 1082 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | 1436 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 1083 } | 1437 } |
| 1084 | 1438 |
| 1085 private: | 1439 private: |
| 1086 void MarkObjectByPointer(Object** p) { | 1440 void MarkObjectByPointer(Object** p) { |
| 1087 if (!(*p)->IsHeapObject()) return; | 1441 if (!(*p)->IsHeapObject()) return; |
| 1088 | 1442 |
| 1089 // Replace flat cons strings in place. | 1443 // Replace flat cons strings in place. |
| 1090 HeapObject* object = ShortCircuitConsString(p); | 1444 HeapObject* object = ShortCircuitConsString(p); |
| 1091 if (object->IsMarked()) return; | 1445 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 1446 if (mark_bit.Get()) return; |
| 1092 | 1447 |
| 1093 Map* map = object->map(); | 1448 Map* map = object->map(); |
| 1094 // Mark the object. | 1449 // Mark the object. |
| 1095 collector_->SetMark(object); | 1450 collector_->SetMark(object, mark_bit); |
| 1096 | 1451 |
| 1097 // Mark the map pointer and body, and push them on the marking stack. | 1452 // Mark the map pointer and body, and push them on the marking stack. |
| 1098 collector_->MarkObject(map); | 1453 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 1454 collector_->MarkObject(map, map_mark); |
| 1099 StaticMarkingVisitor::IterateBody(map, object); | 1455 StaticMarkingVisitor::IterateBody(map, object); |
| 1100 | 1456 |
| 1101 // Mark all the objects reachable from the map and body. May leave | 1457 // Mark all the objects reachable from the map and body. May leave |
| 1102 // overflowed objects in the heap. | 1458 // overflowed objects in the heap. |
| 1103 collector_->EmptyMarkingStack(); | 1459 collector_->EmptyMarkingDeque(); |
| 1104 } | 1460 } |
| 1105 | 1461 |
| 1106 MarkCompactCollector* collector_; | 1462 MarkCompactCollector* collector_; |
| 1107 }; | 1463 }; |
| 1108 | 1464 |
| 1109 | 1465 |
| 1110 // Helper class for pruning the symbol table. | 1466 // Helper class for pruning the symbol table. |
| 1111 class SymbolTableCleaner : public ObjectVisitor { | 1467 class SymbolTableCleaner : public ObjectVisitor { |
| 1112 public: | 1468 public: |
| 1113 explicit SymbolTableCleaner(Heap* heap) | 1469 explicit SymbolTableCleaner(Heap* heap) |
| 1114 : heap_(heap), pointers_removed_(0) { } | 1470 : heap_(heap), pointers_removed_(0) { } |
| 1115 | 1471 |
| 1116 virtual void VisitPointers(Object** start, Object** end) { | 1472 virtual void VisitPointers(Object** start, Object** end) { |
| 1117 // Visit all HeapObject pointers in [start, end). | 1473 // Visit all HeapObject pointers in [start, end). |
| 1118 for (Object** p = start; p < end; p++) { | 1474 for (Object** p = start; p < end; p++) { |
| 1119 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { | 1475 Object* o = *p; |
| 1476 if (o->IsHeapObject() && |
| 1477 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { |
| 1120 // Check if the symbol being pruned is an external symbol. We need to | 1478 // Check if the symbol being pruned is an external symbol. We need to |
| 1121 // delete the associated external data as this symbol is going away. | 1479 // delete the associated external data as this symbol is going away. |
| 1122 | 1480 |
| 1123 // Since no objects have yet been moved we can safely access the map of | 1481 // Since no objects have yet been moved we can safely access the map of |
| 1124 // the object. | 1482 // the object. |
| 1125 if ((*p)->IsExternalString()) { | 1483 if (o->IsExternalString()) { |
| 1126 heap_->FinalizeExternalString(String::cast(*p)); | 1484 heap_->FinalizeExternalString(String::cast(*p)); |
| 1127 } | 1485 } |
| 1128 // Set the entry to null_value (as deleted). | 1486 // Set the entry to null_value (as deleted). |
| 1129 *p = heap_->raw_unchecked_null_value(); | 1487 *p = heap_->null_value(); |
| 1130 pointers_removed_++; | 1488 pointers_removed_++; |
| 1131 } | 1489 } |
| 1132 } | 1490 } |
| 1133 } | 1491 } |
| 1134 | 1492 |
| 1135 int PointersRemoved() { | 1493 int PointersRemoved() { |
| 1136 return pointers_removed_; | 1494 return pointers_removed_; |
| 1137 } | 1495 } |
| 1138 | 1496 |
| 1139 private: | 1497 private: |
| 1140 Heap* heap_; | 1498 Heap* heap_; |
| 1141 int pointers_removed_; | 1499 int pointers_removed_; |
| 1142 }; | 1500 }; |
| 1143 | 1501 |
| 1144 | 1502 |
| 1145 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | 1503 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| 1146 // are retained. | 1504 // are retained. |
| 1147 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | 1505 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| 1148 public: | 1506 public: |
| 1149 virtual Object* RetainAs(Object* object) { | 1507 virtual Object* RetainAs(Object* object) { |
| 1150 MapWord first_word = HeapObject::cast(object)->map_word(); | 1508 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { |
| 1151 if (first_word.IsMarked()) { | |
| 1152 return object; | 1509 return object; |
| 1153 } else { | 1510 } else { |
| 1154 return NULL; | 1511 return NULL; |
| 1155 } | 1512 } |
| 1156 } | 1513 } |
| 1157 }; | 1514 }; |
| 1158 | 1515 |
| 1159 | 1516 |
| 1160 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { | 1517 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
| 1161 ASSERT(!object->IsMarked()); | 1518 ASSERT(IsMarked(object)); |
| 1162 ASSERT(HEAP->Contains(object)); | 1519 ASSERT(HEAP->Contains(object)); |
| 1163 if (object->IsMap()) { | 1520 if (object->IsMap()) { |
| 1164 Map* map = Map::cast(object); | 1521 Map* map = Map::cast(object); |
| 1165 if (FLAG_cleanup_code_caches_at_gc) { | 1522 if (FLAG_cleanup_code_caches_at_gc) { |
| 1166 map->ClearCodeCache(heap()); | 1523 map->ClearCodeCache(heap()); |
| 1167 } | 1524 } |
| 1168 SetMark(map); | |
| 1169 | 1525 |
| 1170 // When map collection is enabled we have to mark through map's transitions | 1526 // When map collection is enabled we have to mark through map's transitions |
| 1171 // in a special way to make transition links weak. | 1527 // in a special way to make transition links weak. |
| 1172 // Only maps for subclasses of JSReceiver can have transitions. | 1528 // Only maps for subclasses of JSReceiver can have transitions. |
| 1173 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 1529 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
| 1174 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { | 1530 if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
| 1175 MarkMapContents(map); | 1531 MarkMapContents(map); |
| 1176 } else { | 1532 } else { |
| 1177 marking_stack_.Push(map); | 1533 marking_deque_.PushBlack(map); |
| 1178 } | 1534 } |
| 1179 } else { | 1535 } else { |
| 1180 SetMark(object); | 1536 marking_deque_.PushBlack(object); |
| 1181 marking_stack_.Push(object); | |
| 1182 } | 1537 } |
| 1183 } | 1538 } |
| 1184 | 1539 |
| 1185 | 1540 |
| 1186 void MarkCompactCollector::MarkMapContents(Map* map) { | 1541 void MarkCompactCollector::MarkMapContents(Map* map) { |
| 1187 // Mark prototype transitions array but don't push it into marking stack. | 1542 // Mark prototype transitions array but don't push it into marking stack. |
| 1188 // This will make references from it weak. We will clean dead prototype | 1543 // This will make references from it weak. We will clean dead prototype |
| 1189 // transitions in ClearNonLiveTransitions. | 1544 // transitions in ClearNonLiveTransitions. |
| 1190 FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); | 1545 FixedArray* prototype_transitions = map->prototype_transitions(); |
| 1191 if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions); | 1546 MarkBit mark = Marking::MarkBitFrom(prototype_transitions); |
| 1547 if (!mark.Get()) { |
| 1548 mark.Set(); |
| 1549 MemoryChunk::IncrementLiveBytes(prototype_transitions->address(), |
| 1550 prototype_transitions->Size()); |
| 1551 } |
| 1192 | 1552 |
| 1193 Object* raw_descriptor_array = | 1553 Object** raw_descriptor_array_slot = |
| 1194 *HeapObject::RawField(map, | 1554 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
| 1195 Map::kInstanceDescriptorsOrBitField3Offset); | 1555 Object* raw_descriptor_array = *raw_descriptor_array_slot; |
| 1196 if (!raw_descriptor_array->IsSmi()) { | 1556 if (!raw_descriptor_array->IsSmi()) { |
| 1197 MarkDescriptorArray( | 1557 MarkDescriptorArray( |
| 1198 reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); | 1558 reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); |
| 1199 } | 1559 } |
| 1200 | 1560 |
| 1201 // Mark the Object* fields of the Map. | 1561 // Mark the Object* fields of the Map. |
| 1202 // Since the descriptor array has been marked already, it is fine | 1562 // Since the descriptor array has been marked already, it is fine |
| 1203 // that one of these fields contains a pointer to it. | 1563 // that one of these fields contains a pointer to it. |
| 1204 Object** start_slot = HeapObject::RawField(map, | 1564 Object** start_slot = HeapObject::RawField(map, |
| 1205 Map::kPointerFieldsBeginOffset); | 1565 Map::kPointerFieldsBeginOffset); |
| 1206 | 1566 |
| 1207 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); | 1567 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
| 1208 | 1568 |
| 1209 StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot); | 1569 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
| 1210 } | 1570 } |
| 1211 | 1571 |
| 1212 | 1572 |
| 1213 void MarkCompactCollector::MarkDescriptorArray( | 1573 void MarkCompactCollector::MarkDescriptorArray( |
| 1214 DescriptorArray* descriptors) { | 1574 DescriptorArray* descriptors) { |
| 1215 if (descriptors->IsMarked()) return; | 1575 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); |
| 1576 if (descriptors_mark.Get()) return; |
| 1216 // Empty descriptor array is marked as a root before any maps are marked. | 1577 // Empty descriptor array is marked as a root before any maps are marked. |
| 1217 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array()); | 1578 ASSERT(descriptors != heap()->empty_descriptor_array()); |
| 1218 SetMark(descriptors); | 1579 SetMark(descriptors, descriptors_mark); |
| 1219 | 1580 |
| 1220 FixedArray* contents = reinterpret_cast<FixedArray*>( | 1581 FixedArray* contents = reinterpret_cast<FixedArray*>( |
| 1221 descriptors->get(DescriptorArray::kContentArrayIndex)); | 1582 descriptors->get(DescriptorArray::kContentArrayIndex)); |
| 1222 ASSERT(contents->IsHeapObject()); | 1583 ASSERT(contents->IsHeapObject()); |
| 1223 ASSERT(!contents->IsMarked()); | 1584 ASSERT(!IsMarked(contents)); |
| 1224 ASSERT(contents->IsFixedArray()); | 1585 ASSERT(contents->IsFixedArray()); |
| 1225 ASSERT(contents->length() >= 2); | 1586 ASSERT(contents->length() >= 2); |
| 1226 SetMark(contents); | 1587 MarkBit contents_mark = Marking::MarkBitFrom(contents); |
| 1588 SetMark(contents, contents_mark); |
| 1227 // Contents contains (value, details) pairs. If the details say that the type | 1589 // Contents contains (value, details) pairs. If the details say that the type |
| 1228 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, | 1590 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, |
| 1229 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as | 1591 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as |
| 1230 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and | 1592 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and |
| 1231 // CONSTANT_TRANSITION is the value an Object* (a Map*). | 1593 // CONSTANT_TRANSITION is the value an Object* (a Map*). |
| 1232 for (int i = 0; i < contents->length(); i += 2) { | 1594 for (int i = 0; i < contents->length(); i += 2) { |
| 1233 // If the pair (value, details) at index i, i+1 is not | 1595 // If the pair (value, details) at index i, i+1 is not |
| 1234 // a transition or null descriptor, mark the value. | 1596 // a transition or null descriptor, mark the value. |
| 1235 PropertyDetails details(Smi::cast(contents->get(i + 1))); | 1597 PropertyDetails details(Smi::cast(contents->get(i + 1))); |
| 1598 |
| 1599 Object** slot = contents->data_start() + i; |
| 1600 Object* value = *slot; |
| 1601 if (!value->IsHeapObject()) continue; |
| 1602 |
| 1603 RecordSlot(slot, slot, *slot); |
| 1604 |
| 1236 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { | 1605 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { |
| 1237 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); | 1606 HeapObject* object = HeapObject::cast(value); |
| 1238 if (object->IsHeapObject() && !object->IsMarked()) { | 1607 MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); |
| 1239 SetMark(object); | 1608 if (!mark.Get()) { |
| 1240 marking_stack_.Push(object); | 1609 SetMark(HeapObject::cast(object), mark); |
| 1610 marking_deque_.PushBlack(object); |
| 1241 } | 1611 } |
| 1242 } | 1612 } |
| 1243 } | 1613 } |
| 1244 // The DescriptorArray descriptors contains a pointer to its contents array, | 1614 // The DescriptorArray descriptors contains a pointer to its contents array, |
| 1245 // but the contents array is already marked. | 1615 // but the contents array is already marked. |
| 1246 marking_stack_.Push(descriptors); | 1616 marking_deque_.PushBlack(descriptors); |
| 1247 } | 1617 } |
| 1248 | 1618 |
| 1249 | 1619 |
| 1250 void MarkCompactCollector::CreateBackPointers() { | 1620 void MarkCompactCollector::CreateBackPointers() { |
| 1251 HeapObjectIterator iterator(heap()->map_space()); | 1621 HeapObjectIterator iterator(heap()->map_space()); |
| 1252 for (HeapObject* next_object = iterator.next(); | 1622 for (HeapObject* next_object = iterator.Next(); |
| 1253 next_object != NULL; next_object = iterator.next()) { | 1623 next_object != NULL; next_object = iterator.Next()) { |
| 1254 if (next_object->IsMap()) { // Could also be ByteArray on free list. | 1624 if (next_object->IsMap()) { // Could also be FreeSpace object on free list. |
| 1255 Map* map = Map::cast(next_object); | 1625 Map* map = Map::cast(next_object); |
| 1256 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); | 1626 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
| 1257 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { | 1627 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
| 1258 map->CreateBackPointers(); | 1628 map->CreateBackPointers(); |
| 1259 } else { | 1629 } else { |
| 1260 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array()); | 1630 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array()); |
| 1261 } | 1631 } |
| 1262 } | 1632 } |
| 1263 } | 1633 } |
| 1264 } | 1634 } |
| 1265 | 1635 |
| 1266 | 1636 |
| 1267 static int OverflowObjectSize(HeapObject* obj) { | 1637 // Fill the marking stack with overflowed objects returned by the given |
| 1268 // Recover the normal map pointer, it might be marked as live and | 1638 // iterator. Stop when the marking stack is filled or the end of the space |
| 1269 // overflowed. | 1639 // is reached, whichever comes first. |
| 1270 MapWord map_word = obj->map_word(); | 1640 template<class T> |
| 1271 map_word.ClearMark(); | 1641 static void DiscoverGreyObjectsWithIterator(Heap* heap, |
| 1272 map_word.ClearOverflow(); | 1642 MarkingDeque* marking_deque, |
| 1273 return obj->SizeFromMap(map_word.ToMap()); | 1643 T* it) { |
| 1644 // The caller should ensure that the marking stack is initially not full, |
| 1645 // so that we don't waste effort pointlessly scanning for objects. |
| 1646 ASSERT(!marking_deque->IsFull()); |
| 1647 |
| 1648 Map* filler_map = heap->one_pointer_filler_map(); |
| 1649 for (HeapObject* object = it->Next(); |
| 1650 object != NULL; |
| 1651 object = it->Next()) { |
| 1652 MarkBit markbit = Marking::MarkBitFrom(object); |
| 1653 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { |
| 1654 Marking::GreyToBlack(markbit); |
| 1655 marking_deque->PushBlack(object); |
| 1656 if (marking_deque->IsFull()) return; |
| 1657 } |
| 1658 } |
| 1274 } | 1659 } |
| 1275 | 1660 |
| 1276 | 1661 |
| 1277 class OverflowedObjectsScanner : public AllStatic { | 1662 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); |
| 1278 public: | |
| 1279 // Fill the marking stack with overflowed objects returned by the given | |
| 1280 // iterator. Stop when the marking stack is filled or the end of the space | |
| 1281 // is reached, whichever comes first. | |
| 1282 template<class T> | |
| 1283 static inline void ScanOverflowedObjects(MarkCompactCollector* collector, | |
| 1284 T* it) { | |
| 1285 // The caller should ensure that the marking stack is initially not full, | |
| 1286 // so that we don't waste effort pointlessly scanning for objects. | |
| 1287 ASSERT(!collector->marking_stack_.is_full()); | |
| 1288 | 1663 |
| 1289 for (HeapObject* object = it->next(); object != NULL; object = it->next()) { | 1664 |
| 1290 if (object->IsOverflowed()) { | 1665 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { |
| 1291 object->ClearOverflow(); | 1666 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 1292 ASSERT(object->IsMarked()); | 1667 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 1293 ASSERT(HEAP->Contains(object)); | 1668 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 1294 collector->marking_stack_.Push(object); | 1669 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 1295 if (collector->marking_stack_.is_full()) return; | 1670 |
| 1296 } | 1671 MarkBit::CellType* cells = p->markbits()->cells(); |
| 1672 |
| 1673 int last_cell_index = |
| 1674 Bitmap::IndexToCell( |
| 1675 Bitmap::CellAlignIndex( |
| 1676 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 1677 |
| 1678 int cell_index = Page::kFirstUsedCell; |
| 1679 Address cell_base = p->ObjectAreaStart(); |
| 1680 |
| 1681 for (cell_index = Page::kFirstUsedCell; |
| 1682 cell_index < last_cell_index; |
| 1683 cell_index++, cell_base += 32 * kPointerSize) { |
| 1684 ASSERT((unsigned)cell_index == |
| 1685 Bitmap::IndexToCell( |
| 1686 Bitmap::CellAlignIndex( |
| 1687 p->AddressToMarkbitIndex(cell_base)))); |
| 1688 |
| 1689 const MarkBit::CellType current_cell = cells[cell_index]; |
| 1690 if (current_cell == 0) continue; |
| 1691 |
| 1692 const MarkBit::CellType next_cell = cells[cell_index + 1]; |
| 1693 MarkBit::CellType grey_objects = current_cell & |
| 1694 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); |
| 1695 |
| 1696 int offset = 0; |
| 1697 while (grey_objects != 0) { |
| 1698 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); |
| 1699 grey_objects >>= trailing_zeros; |
| 1700 offset += trailing_zeros; |
| 1701 MarkBit markbit(&cells[cell_index], 1 << offset, false); |
| 1702 ASSERT(Marking::IsGrey(markbit)); |
| 1703 Marking::GreyToBlack(markbit); |
| 1704 Address addr = cell_base + offset * kPointerSize; |
| 1705 marking_deque->PushBlack(HeapObject::FromAddress(addr)); |
| 1706 if (marking_deque->IsFull()) return; |
| 1707 offset += 2; |
| 1708 grey_objects >>= 2; |
| 1709 } |
| 1710 |
| 1711 grey_objects >>= (Bitmap::kBitsPerCell - 1); |
| 1712 } |
| 1713 } |
| 1714 |
| 1715 |
| 1716 static void DiscoverGreyObjectsInSpace(Heap* heap, |
| 1717 MarkingDeque* marking_deque, |
| 1718 PagedSpace* space) { |
| 1719 if (!space->was_swept_conservatively()) { |
| 1720 HeapObjectIterator it(space); |
| 1721 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); |
| 1722 } else { |
| 1723 PageIterator it(space); |
| 1724 while (it.has_next()) { |
| 1725 Page* p = it.next(); |
| 1726 DiscoverGreyObjectsOnPage(marking_deque, p); |
| 1727 if (marking_deque->IsFull()) return; |
| 1297 } | 1728 } |
| 1298 } | 1729 } |
| 1299 }; | 1730 } |
| 1300 | 1731 |
| 1301 | 1732 |
| 1302 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { | 1733 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
| 1303 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); | 1734 Object* o = *p; |
| 1735 if (!o->IsHeapObject()) return false; |
| 1736 HeapObject* heap_object = HeapObject::cast(o); |
| 1737 MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 1738 return !mark.Get(); |
| 1304 } | 1739 } |
| 1305 | 1740 |
| 1306 | 1741 |
| 1307 void MarkCompactCollector::MarkSymbolTable() { | 1742 void MarkCompactCollector::MarkSymbolTable() { |
| 1308 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); | 1743 SymbolTable* symbol_table = heap()->symbol_table(); |
| 1309 // Mark the symbol table itself. | 1744 // Mark the symbol table itself. |
| 1310 SetMark(symbol_table); | 1745 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); |
| 1746 SetMark(symbol_table, symbol_table_mark); |
| 1311 // Explicitly mark the prefix. | 1747 // Explicitly mark the prefix. |
| 1312 MarkingVisitor marker(heap()); | 1748 MarkingVisitor marker(heap()); |
| 1313 symbol_table->IteratePrefix(&marker); | 1749 symbol_table->IteratePrefix(&marker); |
| 1314 ProcessMarkingStack(); | 1750 ProcessMarkingDeque(); |
| 1315 } | 1751 } |
| 1316 | 1752 |
| 1317 | 1753 |
| 1318 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { | 1754 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
| 1319 // Mark the heap roots including global variables, stack variables, | 1755 // Mark the heap roots including global variables, stack variables, |
| 1320 // etc., and all objects reachable from them. | 1756 // etc., and all objects reachable from them. |
| 1321 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); | 1757 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
| 1322 | 1758 |
| 1323 // Handle the symbol table specially. | 1759 // Handle the symbol table specially. |
| 1324 MarkSymbolTable(); | 1760 MarkSymbolTable(); |
| 1325 | 1761 |
| 1326 // There may be overflowed objects in the heap. Visit them now. | 1762 // There may be overflowed objects in the heap. Visit them now. |
| 1327 while (marking_stack_.overflowed()) { | 1763 while (marking_deque_.overflowed()) { |
| 1328 RefillMarkingStack(); | 1764 RefillMarkingDeque(); |
| 1329 EmptyMarkingStack(); | 1765 EmptyMarkingDeque(); |
| 1330 } | 1766 } |
| 1331 } | 1767 } |
| 1332 | 1768 |
| 1333 | 1769 |
| 1334 void MarkCompactCollector::MarkObjectGroups() { | 1770 void MarkCompactCollector::MarkObjectGroups() { |
| 1335 List<ObjectGroup*>* object_groups = | 1771 List<ObjectGroup*>* object_groups = |
| 1336 heap()->isolate()->global_handles()->object_groups(); | 1772 heap()->isolate()->global_handles()->object_groups(); |
| 1337 | 1773 |
| 1338 int last = 0; | 1774 int last = 0; |
| 1339 for (int i = 0; i < object_groups->length(); i++) { | 1775 for (int i = 0; i < object_groups->length(); i++) { |
| 1340 ObjectGroup* entry = object_groups->at(i); | 1776 ObjectGroup* entry = object_groups->at(i); |
| 1341 ASSERT(entry != NULL); | 1777 ASSERT(entry != NULL); |
| 1342 | 1778 |
| 1343 Object*** objects = entry->objects_; | 1779 Object*** objects = entry->objects_; |
| 1344 bool group_marked = false; | 1780 bool group_marked = false; |
| 1345 for (size_t j = 0; j < entry->length_; j++) { | 1781 for (size_t j = 0; j < entry->length_; j++) { |
| 1346 Object* object = *objects[j]; | 1782 Object* object = *objects[j]; |
| 1347 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { | 1783 if (object->IsHeapObject()) { |
| 1348 group_marked = true; | 1784 HeapObject* heap_object = HeapObject::cast(object); |
| 1349 break; | 1785 MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 1786 if (mark.Get()) { |
| 1787 group_marked = true; |
| 1788 break; |
| 1789 } |
| 1350 } | 1790 } |
| 1351 } | 1791 } |
| 1352 | 1792 |
| 1353 if (!group_marked) { | 1793 if (!group_marked) { |
| 1354 (*object_groups)[last++] = entry; | 1794 (*object_groups)[last++] = entry; |
| 1355 continue; | 1795 continue; |
| 1356 } | 1796 } |
| 1357 | 1797 |
| 1358 // An object in the group is marked, so mark all heap objects in | 1798 // An object in the group is marked, so mark as grey all white heap |
| 1359 // the group. | 1799 // objects in the group. |
| 1360 for (size_t j = 0; j < entry->length_; ++j) { | 1800 for (size_t j = 0; j < entry->length_; ++j) { |
| 1361 if ((*objects[j])->IsHeapObject()) { | 1801 Object* object = *objects[j]; |
| 1362 MarkObject(HeapObject::cast(*objects[j])); | 1802 if (object->IsHeapObject()) { |
| 1803 HeapObject* heap_object = HeapObject::cast(object); |
| 1804 MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 1805 MarkObject(heap_object, mark); |
| 1363 } | 1806 } |
| 1364 } | 1807 } |
| 1365 | 1808 |
| 1366 // Once the entire group has been marked, dispose it because it's | 1809 // Once the entire group has been colored grey, set the object group |
| 1367 // not needed anymore. | 1810 // to NULL so it won't be processed again. |
| 1368 entry->Dispose(); | 1811 entry->Dispose(); |
| 1812 object_groups->at(i) = NULL; |
| 1369 } | 1813 } |
| 1370 object_groups->Rewind(last); | 1814 object_groups->Rewind(last); |
| 1371 } | 1815 } |
| 1372 | 1816 |
| 1373 | 1817 |
| 1374 void MarkCompactCollector::MarkImplicitRefGroups() { | 1818 void MarkCompactCollector::MarkImplicitRefGroups() { |
| 1375 List<ImplicitRefGroup*>* ref_groups = | 1819 List<ImplicitRefGroup*>* ref_groups = |
| 1376 heap()->isolate()->global_handles()->implicit_ref_groups(); | 1820 heap()->isolate()->global_handles()->implicit_ref_groups(); |
| 1377 | 1821 |
| 1378 int last = 0; | 1822 int last = 0; |
| 1379 for (int i = 0; i < ref_groups->length(); i++) { | 1823 for (int i = 0; i < ref_groups->length(); i++) { |
| 1380 ImplicitRefGroup* entry = ref_groups->at(i); | 1824 ImplicitRefGroup* entry = ref_groups->at(i); |
| 1381 ASSERT(entry != NULL); | 1825 ASSERT(entry != NULL); |
| 1382 | 1826 |
| 1383 if (!(*entry->parent_)->IsMarked()) { | 1827 if (!IsMarked(*entry->parent_)) { |
| 1384 (*ref_groups)[last++] = entry; | 1828 (*ref_groups)[last++] = entry; |
| 1385 continue; | 1829 continue; |
| 1386 } | 1830 } |
| 1387 | 1831 |
| 1388 Object*** children = entry->children_; | 1832 Object*** children = entry->children_; |
| 1389 // A parent object is marked, so mark all child heap objects. | 1833 // A parent object is marked, so mark all child heap objects. |
| 1390 for (size_t j = 0; j < entry->length_; ++j) { | 1834 for (size_t j = 0; j < entry->length_; ++j) { |
| 1391 if ((*children[j])->IsHeapObject()) { | 1835 if ((*children[j])->IsHeapObject()) { |
| 1392 MarkObject(HeapObject::cast(*children[j])); | 1836 HeapObject* child = HeapObject::cast(*children[j]); |
| 1837 MarkBit mark = Marking::MarkBitFrom(child); |
| 1838 MarkObject(child, mark); |
| 1393 } | 1839 } |
| 1394 } | 1840 } |
| 1395 | 1841 |
| 1396 // Once the entire group has been marked, dispose it because it's | 1842 // Once the entire group has been marked, dispose it because it's |
| 1397 // not needed anymore. | 1843 // not needed anymore. |
| 1398 entry->Dispose(); | 1844 entry->Dispose(); |
| 1399 } | 1845 } |
| 1400 ref_groups->Rewind(last); | 1846 ref_groups->Rewind(last); |
| 1401 } | 1847 } |
| 1402 | 1848 |
| 1403 | 1849 |
| 1404 // Mark all objects reachable from the objects on the marking stack. | 1850 // Mark all objects reachable from the objects on the marking stack. |
| 1405 // Before: the marking stack contains zero or more heap object pointers. | 1851 // Before: the marking stack contains zero or more heap object pointers. |
| 1406 // After: the marking stack is empty, and all objects reachable from the | 1852 // After: the marking stack is empty, and all objects reachable from the |
| 1407 // marking stack have been marked, or are overflowed in the heap. | 1853 // marking stack have been marked, or are overflowed in the heap. |
| 1408 void MarkCompactCollector::EmptyMarkingStack() { | 1854 void MarkCompactCollector::EmptyMarkingDeque() { |
| 1409 while (!marking_stack_.is_empty()) { | 1855 while (!marking_deque_.IsEmpty()) { |
| 1410 while (!marking_stack_.is_empty()) { | 1856 while (!marking_deque_.IsEmpty()) { |
| 1411 HeapObject* object = marking_stack_.Pop(); | 1857 HeapObject* object = marking_deque_.Pop(); |
| 1412 ASSERT(object->IsHeapObject()); | 1858 ASSERT(object->IsHeapObject()); |
| 1413 ASSERT(heap()->Contains(object)); | 1859 ASSERT(heap()->Contains(object)); |
| 1414 ASSERT(object->IsMarked()); | 1860 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 1415 ASSERT(!object->IsOverflowed()); | |
| 1416 | 1861 |
| 1417 // Because the object is marked, we have to recover the original map | 1862 Map* map = object->map(); |
| 1418 // pointer and use it to mark the object's body. | 1863 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 1419 MapWord map_word = object->map_word(); | 1864 MarkObject(map, map_mark); |
| 1420 map_word.ClearMark(); | |
| 1421 Map* map = map_word.ToMap(); | |
| 1422 MarkObject(map); | |
| 1423 | 1865 |
| 1424 StaticMarkingVisitor::IterateBody(map, object); | 1866 StaticMarkingVisitor::IterateBody(map, object); |
| 1425 } | 1867 } |
| 1426 | 1868 |
| 1427 // Process encountered weak maps, mark objects only reachable by those | 1869 // Process encountered weak maps, mark objects only reachable by those |
| 1428 // weak maps and repeat until fix-point is reached. | 1870 // weak maps and repeat until fix-point is reached. |
| 1429 ProcessWeakMaps(); | 1871 ProcessWeakMaps(); |
| 1430 } | 1872 } |
| 1431 } | 1873 } |
| 1432 | 1874 |
| 1433 | 1875 |
| 1434 // Sweep the heap for overflowed objects, clear their overflow bits, and | 1876 // Sweep the heap for overflowed objects, clear their overflow bits, and |
| 1435 // push them on the marking stack. Stop early if the marking stack fills | 1877 // push them on the marking stack. Stop early if the marking stack fills |
| 1436 // before sweeping completes. If sweeping completes, there are no remaining | 1878 // before sweeping completes. If sweeping completes, there are no remaining |
| 1437 // overflowed objects in the heap so the overflow flag on the markings stack | 1879 // overflowed objects in the heap so the overflow flag on the markings stack |
| 1438 // is cleared. | 1880 // is cleared. |
| 1439 void MarkCompactCollector::RefillMarkingStack() { | 1881 void MarkCompactCollector::RefillMarkingDeque() { |
| 1440 ASSERT(marking_stack_.overflowed()); | 1882 ASSERT(marking_deque_.overflowed()); |
| 1441 | 1883 |
| 1442 SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize); | 1884 SemiSpaceIterator new_it(heap()->new_space()); |
| 1443 OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it); | 1885 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
| 1444 if (marking_stack_.is_full()) return; | 1886 if (marking_deque_.IsFull()) return; |
| 1445 | 1887 |
| 1446 HeapObjectIterator old_pointer_it(heap()->old_pointer_space(), | 1888 DiscoverGreyObjectsInSpace(heap(), |
| 1447 &OverflowObjectSize); | 1889 &marking_deque_, |
| 1448 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it); | 1890 heap()->old_pointer_space()); |
| 1449 if (marking_stack_.is_full()) return; | 1891 if (marking_deque_.IsFull()) return; |
| 1450 | 1892 |
| 1451 HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize); | 1893 DiscoverGreyObjectsInSpace(heap(), |
| 1452 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it); | 1894 &marking_deque_, |
| 1453 if (marking_stack_.is_full()) return; | 1895 heap()->old_data_space()); |
| 1896 if (marking_deque_.IsFull()) return; |
| 1454 | 1897 |
| 1455 HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize); | 1898 DiscoverGreyObjectsInSpace(heap(), |
| 1456 OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it); | 1899 &marking_deque_, |
| 1457 if (marking_stack_.is_full()) return; | 1900 heap()->code_space()); |
| 1901 if (marking_deque_.IsFull()) return; |
| 1458 | 1902 |
| 1459 HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize); | 1903 DiscoverGreyObjectsInSpace(heap(), |
| 1460 OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it); | 1904 &marking_deque_, |
| 1461 if (marking_stack_.is_full()) return; | 1905 heap()->map_space()); |
| 1906 if (marking_deque_.IsFull()) return; |
| 1462 | 1907 |
| 1463 HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize); | 1908 DiscoverGreyObjectsInSpace(heap(), |
| 1464 OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it); | 1909 &marking_deque_, |
| 1465 if (marking_stack_.is_full()) return; | 1910 heap()->cell_space()); |
| 1911 if (marking_deque_.IsFull()) return; |
| 1466 | 1912 |
| 1467 LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize); | 1913 LargeObjectIterator lo_it(heap()->lo_space()); |
| 1468 OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it); | 1914 DiscoverGreyObjectsWithIterator(heap(), |
| 1469 if (marking_stack_.is_full()) return; | 1915 &marking_deque_, |
| 1916 &lo_it); |
| 1917 if (marking_deque_.IsFull()) return; |
| 1470 | 1918 |
| 1471 marking_stack_.clear_overflowed(); | 1919 marking_deque_.ClearOverflowed(); |
| 1472 } | 1920 } |
| 1473 | 1921 |
| 1474 | 1922 |
| 1475 // Mark all objects reachable (transitively) from objects on the marking | 1923 // Mark all objects reachable (transitively) from objects on the marking |
| 1476 // stack. Before: the marking stack contains zero or more heap object | 1924 // stack. Before: the marking stack contains zero or more heap object |
| 1477 // pointers. After: the marking stack is empty and there are no overflowed | 1925 // pointers. After: the marking stack is empty and there are no overflowed |
| 1478 // objects in the heap. | 1926 // objects in the heap. |
| 1479 void MarkCompactCollector::ProcessMarkingStack() { | 1927 void MarkCompactCollector::ProcessMarkingDeque() { |
| 1480 EmptyMarkingStack(); | 1928 EmptyMarkingDeque(); |
| 1481 while (marking_stack_.overflowed()) { | 1929 while (marking_deque_.overflowed()) { |
| 1482 RefillMarkingStack(); | 1930 RefillMarkingDeque(); |
| 1483 EmptyMarkingStack(); | 1931 EmptyMarkingDeque(); |
| 1484 } | 1932 } |
| 1485 } | 1933 } |
| 1486 | 1934 |
| 1487 | 1935 |
| 1488 void MarkCompactCollector::ProcessExternalMarking() { | 1936 void MarkCompactCollector::ProcessExternalMarking() { |
| 1489 bool work_to_do = true; | 1937 bool work_to_do = true; |
| 1490 ASSERT(marking_stack_.is_empty()); | 1938 ASSERT(marking_deque_.IsEmpty()); |
| 1491 while (work_to_do) { | 1939 while (work_to_do) { |
| 1492 MarkObjectGroups(); | 1940 MarkObjectGroups(); |
| 1493 MarkImplicitRefGroups(); | 1941 MarkImplicitRefGroups(); |
| 1494 work_to_do = !marking_stack_.is_empty(); | 1942 work_to_do = !marking_deque_.IsEmpty(); |
| 1495 ProcessMarkingStack(); | 1943 ProcessMarkingDeque(); |
| 1496 } | 1944 } |
| 1497 } | 1945 } |
| 1498 | 1946 |
| 1499 | 1947 |
| 1500 void MarkCompactCollector::MarkLiveObjects() { | 1948 void MarkCompactCollector::MarkLiveObjects() { |
| 1501 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); | 1949 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); |
| 1502 // The recursive GC marker detects when it is nearing stack overflow, | 1950 // The recursive GC marker detects when it is nearing stack overflow, |
| 1503 // and switches to a different marking system. JS interrupts interfere | 1951 // and switches to a different marking system. JS interrupts interfere |
| 1504 // with the C stack limit check. | 1952 // with the C stack limit check. |
| 1505 PostponeInterruptsScope postpone(heap()->isolate()); | 1953 PostponeInterruptsScope postpone(heap()->isolate()); |
| 1506 | 1954 |
| 1955 bool incremental_marking_overflowed = false; |
| 1956 IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
| 1957 if (incremental_marking->IsMarking()) { |
| 1958 // Finalize the incremental marking and check whether we had an overflow. |
| 1959 // Both markers use grey color to mark overflowed objects so |
| 1960 // non-incremental marker can deal with them as if overflow |
| 1961 // occured during normal marking. |
| 1962 // But incremental marker uses a separate marking deque |
| 1963 // so we have to explicitly copy it's overflow state. |
| 1964 incremental_marking->Finalize(); |
| 1965 incremental_marking_overflowed = |
| 1966 incremental_marking->marking_deque()->overflowed(); |
| 1967 incremental_marking->marking_deque()->ClearOverflowed(); |
| 1968 } else { |
| 1969 // Abort any pending incremental activities e.g. incremental sweeping. |
| 1970 incremental_marking->Abort(); |
| 1971 } |
| 1972 |
| 1507 #ifdef DEBUG | 1973 #ifdef DEBUG |
| 1508 ASSERT(state_ == PREPARE_GC); | 1974 ASSERT(state_ == PREPARE_GC); |
| 1509 state_ = MARK_LIVE_OBJECTS; | 1975 state_ = MARK_LIVE_OBJECTS; |
| 1510 #endif | 1976 #endif |
| 1511 // The to space contains live objects, the from space is used as a marking | 1977 // The to space contains live objects, a page in from space is used as a |
| 1512 // stack. | 1978 // marking stack. |
| 1513 marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(), | 1979 Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); |
| 1514 heap()->new_space()->FromSpaceHigh()); | 1980 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); |
| 1981 if (FLAG_force_marking_deque_overflows) { |
| 1982 marking_deque_end = marking_deque_start + 64 * kPointerSize; |
| 1983 } |
| 1984 marking_deque_.Initialize(marking_deque_start, |
| 1985 marking_deque_end); |
| 1986 ASSERT(!marking_deque_.overflowed()); |
| 1515 | 1987 |
| 1516 ASSERT(!marking_stack_.overflowed()); | 1988 if (incremental_marking_overflowed) { |
| 1989 // There are overflowed objects left in the heap after incremental marking. |
| 1990 marking_deque_.SetOverflowed(); |
| 1991 } |
| 1517 | 1992 |
| 1518 PrepareForCodeFlushing(); | 1993 PrepareForCodeFlushing(); |
| 1519 | 1994 |
| 1520 RootMarkingVisitor root_visitor(heap()); | 1995 RootMarkingVisitor root_visitor(heap()); |
| 1521 MarkRoots(&root_visitor); | 1996 MarkRoots(&root_visitor); |
| 1522 | 1997 |
| 1523 // The objects reachable from the roots are marked, yet unreachable | 1998 // The objects reachable from the roots are marked, yet unreachable |
| 1524 // objects are unmarked. Mark objects reachable due to host | 1999 // objects are unmarked. Mark objects reachable due to host |
| 1525 // application specific logic. | 2000 // application specific logic. |
| 1526 ProcessExternalMarking(); | 2001 ProcessExternalMarking(); |
| 1527 | 2002 |
| 1528 // The objects reachable from the roots or object groups are marked, | 2003 // The objects reachable from the roots or object groups are marked, |
| 1529 // yet unreachable objects are unmarked. Mark objects reachable | 2004 // yet unreachable objects are unmarked. Mark objects reachable |
| 1530 // only from weak global handles. | 2005 // only from weak global handles. |
| 1531 // | 2006 // |
| 1532 // First we identify nonlive weak handles and mark them as pending | 2007 // First we identify nonlive weak handles and mark them as pending |
| 1533 // destruction. | 2008 // destruction. |
| 1534 heap()->isolate()->global_handles()->IdentifyWeakHandles( | 2009 heap()->isolate()->global_handles()->IdentifyWeakHandles( |
| 1535 &IsUnmarkedHeapObject); | 2010 &IsUnmarkedHeapObject); |
| 1536 // Then we mark the objects and process the transitive closure. | 2011 // Then we mark the objects and process the transitive closure. |
| 1537 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); | 2012 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
| 1538 while (marking_stack_.overflowed()) { | 2013 while (marking_deque_.overflowed()) { |
| 1539 RefillMarkingStack(); | 2014 RefillMarkingDeque(); |
| 1540 EmptyMarkingStack(); | 2015 EmptyMarkingDeque(); |
| 1541 } | 2016 } |
| 1542 | 2017 |
| 1543 // Repeat host application specific marking to mark unmarked objects | 2018 // Repeat host application specific marking to mark unmarked objects |
| 1544 // reachable from the weak roots. | 2019 // reachable from the weak roots. |
| 1545 ProcessExternalMarking(); | 2020 ProcessExternalMarking(); |
| 1546 | 2021 |
| 2022 AfterMarking(); |
| 2023 } |
| 2024 |
| 2025 |
| 2026 void MarkCompactCollector::AfterMarking() { |
| 1547 // Object literal map caches reference symbols (cache keys) and maps | 2027 // Object literal map caches reference symbols (cache keys) and maps |
| 1548 // (cache values). At this point still useful maps have already been | 2028 // (cache values). At this point still useful maps have already been |
| 1549 // marked. Mark the keys for the alive values before we process the | 2029 // marked. Mark the keys for the alive values before we process the |
| 1550 // symbol table. | 2030 // symbol table. |
| 1551 ProcessMapCaches(); | 2031 ProcessMapCaches(); |
| 1552 | 2032 |
| 1553 // Prune the symbol table removing all symbols only pointed to by the | 2033 // Prune the symbol table removing all symbols only pointed to by the |
| 1554 // symbol table. Cannot use symbol_table() here because the symbol | 2034 // symbol table. Cannot use symbol_table() here because the symbol |
| 1555 // table is marked. | 2035 // table is marked. |
| 1556 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); | 2036 SymbolTable* symbol_table = heap()->symbol_table(); |
| 1557 SymbolTableCleaner v(heap()); | 2037 SymbolTableCleaner v(heap()); |
| 1558 symbol_table->IterateElements(&v); | 2038 symbol_table->IterateElements(&v); |
| 1559 symbol_table->ElementsRemoved(v.PointersRemoved()); | 2039 symbol_table->ElementsRemoved(v.PointersRemoved()); |
| 1560 heap()->external_string_table_.Iterate(&v); | 2040 heap()->external_string_table_.Iterate(&v); |
| 1561 heap()->external_string_table_.CleanUp(); | 2041 heap()->external_string_table_.CleanUp(); |
| 1562 | 2042 |
| 1563 // Process the weak references. | 2043 // Process the weak references. |
| 1564 MarkCompactWeakObjectRetainer mark_compact_object_retainer; | 2044 MarkCompactWeakObjectRetainer mark_compact_object_retainer; |
| 1565 heap()->ProcessWeakReferences(&mark_compact_object_retainer); | 2045 heap()->ProcessWeakReferences(&mark_compact_object_retainer); |
| 1566 | 2046 |
| 1567 // Remove object groups after marking phase. | 2047 // Remove object groups after marking phase. |
| 1568 heap()->isolate()->global_handles()->RemoveObjectGroups(); | 2048 heap()->isolate()->global_handles()->RemoveObjectGroups(); |
| 1569 heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); | 2049 heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); |
| 1570 | 2050 |
| 1571 // Flush code from collected candidates. | 2051 // Flush code from collected candidates. |
| 1572 if (is_code_flushing_enabled()) { | 2052 if (is_code_flushing_enabled()) { |
| 1573 code_flusher_->ProcessCandidates(); | 2053 code_flusher_->ProcessCandidates(); |
| 1574 } | 2054 } |
| 1575 | 2055 |
| 1576 // Clean up dead objects from the runtime profiler. | 2056 // Clean up dead objects from the runtime profiler. |
| 1577 heap()->isolate()->runtime_profiler()->RemoveDeadSamples(); | 2057 heap()->isolate()->runtime_profiler()->RemoveDeadSamples(); |
| 1578 } | 2058 } |
| 1579 | 2059 |
| 1580 | 2060 |
| 1581 void MarkCompactCollector::ProcessMapCaches() { | 2061 void MarkCompactCollector::ProcessMapCaches() { |
| 1582 Object* raw_context = heap()->global_contexts_list_; | 2062 Object* raw_context = heap()->global_contexts_list_; |
| 1583 while (raw_context != heap()->undefined_value()) { | 2063 while (raw_context != heap()->undefined_value()) { |
| 1584 Context* context = reinterpret_cast<Context*>(raw_context); | 2064 Context* context = reinterpret_cast<Context*>(raw_context); |
| 1585 if (context->IsMarked()) { | 2065 if (IsMarked(context)) { |
| 1586 HeapObject* raw_map_cache = | 2066 HeapObject* raw_map_cache = |
| 1587 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); | 2067 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); |
| 1588 // A map cache may be reachable from the stack. In this case | 2068 // A map cache may be reachable from the stack. In this case |
| 1589 // it's already transitively marked and it's too late to clean | 2069 // it's already transitively marked and it's too late to clean |
| 1590 // up its parts. | 2070 // up its parts. |
| 1591 if (!raw_map_cache->IsMarked() && | 2071 if (!IsMarked(raw_map_cache) && |
| 1592 raw_map_cache != heap()->undefined_value()) { | 2072 raw_map_cache != heap()->undefined_value()) { |
| 1593 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); | 2073 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); |
| 1594 int existing_elements = map_cache->NumberOfElements(); | 2074 int existing_elements = map_cache->NumberOfElements(); |
| 1595 int used_elements = 0; | 2075 int used_elements = 0; |
| 1596 for (int i = MapCache::kElementsStartIndex; | 2076 for (int i = MapCache::kElementsStartIndex; |
| 1597 i < map_cache->length(); | 2077 i < map_cache->length(); |
| 1598 i += MapCache::kEntrySize) { | 2078 i += MapCache::kEntrySize) { |
| 1599 Object* raw_key = map_cache->get(i); | 2079 Object* raw_key = map_cache->get(i); |
| 1600 if (raw_key == heap()->undefined_value() || | 2080 if (raw_key == heap()->undefined_value() || |
| 1601 raw_key == heap()->null_value()) continue; | 2081 raw_key == heap()->null_value()) continue; |
| 1602 STATIC_ASSERT(MapCache::kEntrySize == 2); | 2082 STATIC_ASSERT(MapCache::kEntrySize == 2); |
| 1603 Object* raw_map = map_cache->get(i + 1); | 2083 Object* raw_map = map_cache->get(i + 1); |
| 1604 if (raw_map->IsHeapObject() && | 2084 if (raw_map->IsHeapObject() && IsMarked(raw_map)) { |
| 1605 HeapObject::cast(raw_map)->IsMarked()) { | |
| 1606 ++used_elements; | 2085 ++used_elements; |
| 1607 } else { | 2086 } else { |
| 1608 // Delete useless entries with unmarked maps. | 2087 // Delete useless entries with unmarked maps. |
| 1609 ASSERT(raw_map->IsMap()); | 2088 ASSERT(raw_map->IsMap()); |
| 1610 map_cache->set_null_unchecked(heap(), i); | 2089 map_cache->set_null_unchecked(heap(), i); |
| 1611 map_cache->set_null_unchecked(heap(), i + 1); | 2090 map_cache->set_null_unchecked(heap(), i + 1); |
| 1612 } | 2091 } |
| 1613 } | 2092 } |
| 1614 if (used_elements == 0) { | 2093 if (used_elements == 0) { |
| 1615 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); | 2094 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); |
| 1616 } else { | 2095 } else { |
| 1617 // Note: we don't actually shrink the cache here to avoid | 2096 // Note: we don't actually shrink the cache here to avoid |
| 1618 // extra complexity during GC. We rely on subsequent cache | 2097 // extra complexity during GC. We rely on subsequent cache |
| 1619 // usages (EnsureCapacity) to do this. | 2098 // usages (EnsureCapacity) to do this. |
| 1620 map_cache->ElementsRemoved(existing_elements - used_elements); | 2099 map_cache->ElementsRemoved(existing_elements - used_elements); |
| 1621 MarkObject(map_cache); | 2100 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); |
| 2101 MarkObject(map_cache, map_cache_markbit); |
| 1622 } | 2102 } |
| 1623 } | 2103 } |
| 1624 } | 2104 } |
| 1625 // Move to next element in the list. | 2105 // Move to next element in the list. |
| 1626 raw_context = context->get(Context::NEXT_CONTEXT_LINK); | 2106 raw_context = context->get(Context::NEXT_CONTEXT_LINK); |
| 1627 } | 2107 } |
| 1628 ProcessMarkingStack(); | 2108 ProcessMarkingDeque(); |
| 1629 } | 2109 } |
| 1630 | 2110 |
| 1631 | 2111 |
| 1632 #ifdef DEBUG | 2112 #ifdef DEBUG |
| 1633 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { | 2113 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { |
| 1634 live_bytes_ += obj->Size(); | 2114 live_bytes_ += obj->Size(); |
| 1635 if (heap()->new_space()->Contains(obj)) { | 2115 if (heap()->new_space()->Contains(obj)) { |
| 1636 live_young_objects_size_ += obj->Size(); | 2116 live_young_objects_size_ += obj->Size(); |
| 1637 } else if (heap()->map_space()->Contains(obj)) { | 2117 } else if (heap()->map_space()->Contains(obj)) { |
| 1638 ASSERT(obj->IsMap()); | 2118 ASSERT(obj->IsMap()); |
| 1639 live_map_objects_size_ += obj->Size(); | 2119 live_map_objects_size_ += obj->Size(); |
| 1640 } else if (heap()->cell_space()->Contains(obj)) { | 2120 } else if (heap()->cell_space()->Contains(obj)) { |
| 1641 ASSERT(obj->IsJSGlobalPropertyCell()); | 2121 ASSERT(obj->IsJSGlobalPropertyCell()); |
| 1642 live_cell_objects_size_ += obj->Size(); | 2122 live_cell_objects_size_ += obj->Size(); |
| 1643 } else if (heap()->old_pointer_space()->Contains(obj)) { | 2123 } else if (heap()->old_pointer_space()->Contains(obj)) { |
| 1644 live_old_pointer_objects_size_ += obj->Size(); | 2124 live_old_pointer_objects_size_ += obj->Size(); |
| 1645 } else if (heap()->old_data_space()->Contains(obj)) { | 2125 } else if (heap()->old_data_space()->Contains(obj)) { |
| 1646 live_old_data_objects_size_ += obj->Size(); | 2126 live_old_data_objects_size_ += obj->Size(); |
| 1647 } else if (heap()->code_space()->Contains(obj)) { | 2127 } else if (heap()->code_space()->Contains(obj)) { |
| 1648 live_code_objects_size_ += obj->Size(); | 2128 live_code_objects_size_ += obj->Size(); |
| 1649 } else if (heap()->lo_space()->Contains(obj)) { | 2129 } else if (heap()->lo_space()->Contains(obj)) { |
| 1650 live_lo_objects_size_ += obj->Size(); | 2130 live_lo_objects_size_ += obj->Size(); |
| 1651 } else { | 2131 } else { |
| 1652 UNREACHABLE(); | 2132 UNREACHABLE(); |
| 1653 } | 2133 } |
| 1654 } | 2134 } |
| 1655 #endif // DEBUG | 2135 #endif // DEBUG |
| 1656 | 2136 |
| 1657 | 2137 |
| 1658 void MarkCompactCollector::SweepLargeObjectSpace() { | 2138 void MarkCompactCollector::ReattachInitialMaps() { |
| 1659 #ifdef DEBUG | 2139 HeapObjectIterator map_iterator(heap()->map_space()); |
| 1660 ASSERT(state_ == MARK_LIVE_OBJECTS); | 2140 for (HeapObject* obj = map_iterator.Next(); |
| 1661 state_ = | 2141 obj != NULL; |
| 1662 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; | 2142 obj = map_iterator.Next()) { |
| 1663 #endif | 2143 if (obj->IsFreeSpace()) continue; |
| 1664 // Deallocate unmarked objects and clear marked bits for marked objects. | 2144 Map* map = Map::cast(obj); |
| 1665 heap()->lo_space()->FreeUnmarkedObjects(); | |
| 1666 } | |
| 1667 | 2145 |
| 2146 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
| 2147 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; |
| 1668 | 2148 |
| 1669 // Safe to use during marking phase only. | 2149 if (map->attached_to_shared_function_info()) { |
| 1670 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { | 2150 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); |
| 1671 MapWord metamap = object->map_word(); | 2151 } |
| 1672 metamap.ClearMark(); | 2152 } |
| 1673 return metamap.ToMap()->instance_type() == MAP_TYPE; | |
| 1674 } | 2153 } |
| 1675 | 2154 |
| 1676 | 2155 |
| 1677 void MarkCompactCollector::ClearNonLiveTransitions() { | 2156 void MarkCompactCollector::ClearNonLiveTransitions() { |
| 1678 HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject); | 2157 HeapObjectIterator map_iterator(heap()->map_space()); |
| 1679 // Iterate over the map space, setting map transitions that go from | 2158 // Iterate over the map space, setting map transitions that go from |
| 1680 // a marked map to an unmarked map to null transitions. At the same time, | 2159 // a marked map to an unmarked map to null transitions. At the same time, |
| 1681 // set all the prototype fields of maps back to their original value, | 2160 // set all the prototype fields of maps back to their original value, |
| 1682 // dropping the back pointers temporarily stored in the prototype field. | 2161 // dropping the back pointers temporarily stored in the prototype field. |
| 1683 // Setting the prototype field requires following the linked list of | 2162 // Setting the prototype field requires following the linked list of |
| 1684 // back pointers, reversing them all at once. This allows us to find | 2163 // back pointers, reversing them all at once. This allows us to find |
| 1685 // those maps with map transitions that need to be nulled, and only | 2164 // those maps with map transitions that need to be nulled, and only |
| 1686 // scan the descriptor arrays of those maps, not all maps. | 2165 // scan the descriptor arrays of those maps, not all maps. |
| 1687 // All of these actions are carried out only on maps of JSObjects | 2166 // All of these actions are carried out only on maps of JSObjects |
| 1688 // and related subtypes. | 2167 // and related subtypes. |
| 1689 for (HeapObject* obj = map_iterator.next(); | 2168 for (HeapObject* obj = map_iterator.Next(); |
| 1690 obj != NULL; obj = map_iterator.next()) { | 2169 obj != NULL; obj = map_iterator.Next()) { |
| 1691 Map* map = reinterpret_cast<Map*>(obj); | 2170 Map* map = reinterpret_cast<Map*>(obj); |
| 1692 if (!map->IsMarked() && map->IsByteArray()) continue; | 2171 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 2172 if (map->IsFreeSpace()) continue; |
| 1693 | 2173 |
| 1694 ASSERT(SafeIsMap(map)); | 2174 ASSERT(map->IsMap()); |
| 1695 // Only JSObject and subtypes have map transitions and back pointers. | 2175 // Only JSObject and subtypes have map transitions and back pointers. |
| 1696 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); | 2176 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
| 1697 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; | 2177 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; |
| 1698 | 2178 |
| 1699 if (map->IsMarked() && map->attached_to_shared_function_info()) { | 2179 if (map_mark.Get() && |
| 2180 map->attached_to_shared_function_info()) { |
| 1700 // This map is used for inobject slack tracking and has been detached | 2181 // This map is used for inobject slack tracking and has been detached |
| 1701 // from SharedFunctionInfo during the mark phase. | 2182 // from SharedFunctionInfo during the mark phase. |
| 1702 // Since it survived the GC, reattach it now. | 2183 // Since it survived the GC, reattach it now. |
| 1703 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); | 2184 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); |
| 1704 } | 2185 } |
| 1705 | 2186 |
| 1706 // Clear dead prototype transitions. | 2187 // Clear dead prototype transitions. |
| 1707 int number_of_transitions = map->NumberOfProtoTransitions(); | 2188 int number_of_transitions = map->NumberOfProtoTransitions(); |
| 1708 if (number_of_transitions > 0) { | 2189 FixedArray* prototype_transitions = map->prototype_transitions(); |
| 1709 FixedArray* prototype_transitions = | 2190 |
| 1710 map->unchecked_prototype_transitions(); | 2191 int new_number_of_transitions = 0; |
| 1711 int new_number_of_transitions = 0; | 2192 const int header = Map::kProtoTransitionHeaderSize; |
| 1712 const int header = Map::kProtoTransitionHeaderSize; | 2193 const int proto_offset = |
| 1713 const int proto_offset = | 2194 header + Map::kProtoTransitionPrototypeOffset; |
| 1714 header + Map::kProtoTransitionPrototypeOffset; | 2195 const int map_offset = header + Map::kProtoTransitionMapOffset; |
| 1715 const int map_offset = header + Map::kProtoTransitionMapOffset; | 2196 const int step = Map::kProtoTransitionElementsPerEntry; |
| 1716 const int step = Map::kProtoTransitionElementsPerEntry; | 2197 for (int i = 0; i < number_of_transitions; i++) { |
| 1717 for (int i = 0; i < number_of_transitions; i++) { | 2198 Object* prototype = prototype_transitions->get(proto_offset + i * step); |
| 1718 Object* prototype = prototype_transitions->get(proto_offset + i * step); | 2199 Object* cached_map = prototype_transitions->get(map_offset + i * step); |
| 1719 Object* cached_map = prototype_transitions->get(map_offset + i * step); | 2200 if (IsMarked(prototype) && IsMarked(cached_map)) { |
| 1720 if (HeapObject::cast(prototype)->IsMarked() && | 2201 if (new_number_of_transitions != i) { |
| 1721 HeapObject::cast(cached_map)->IsMarked()) { | 2202 prototype_transitions->set_unchecked( |
| 1722 if (new_number_of_transitions != i) { | 2203 heap_, |
| 1723 prototype_transitions->set_unchecked( | 2204 proto_offset + new_number_of_transitions * step, |
| 1724 heap_, | 2205 prototype, |
| 1725 proto_offset + new_number_of_transitions * step, | 2206 UPDATE_WRITE_BARRIER); |
| 1726 prototype, | 2207 prototype_transitions->set_unchecked( |
| 1727 UPDATE_WRITE_BARRIER); | 2208 heap_, |
| 1728 prototype_transitions->set_unchecked( | 2209 map_offset + new_number_of_transitions * step, |
| 1729 heap_, | 2210 cached_map, |
| 1730 map_offset + new_number_of_transitions * step, | 2211 SKIP_WRITE_BARRIER); |
| 1731 cached_map, | |
| 1732 SKIP_WRITE_BARRIER); | |
| 1733 } | |
| 1734 new_number_of_transitions++; | |
| 1735 } | 2212 } |
| 1736 } | 2213 } |
| 1737 | 2214 |
| 1738 // Fill slots that became free with undefined value. | 2215 // Fill slots that became free with undefined value. |
| 1739 Object* undefined = heap()->raw_unchecked_undefined_value(); | 2216 Object* undefined = heap()->undefined_value(); |
| 1740 for (int i = new_number_of_transitions * step; | 2217 for (int i = new_number_of_transitions * step; |
| 1741 i < number_of_transitions * step; | 2218 i < number_of_transitions * step; |
| 1742 i++) { | 2219 i++) { |
| 2220 // The undefined object is on a page that is never compacted and never |
| 2221 // in new space so it is OK to skip the write barrier. Also it's a |
| 2222 // root. |
| 1743 prototype_transitions->set_unchecked(heap_, | 2223 prototype_transitions->set_unchecked(heap_, |
| 1744 header + i, | 2224 header + i, |
| 1745 undefined, | 2225 undefined, |
| 1746 SKIP_WRITE_BARRIER); | 2226 SKIP_WRITE_BARRIER); |
| 2227 |
| 2228 Object** undefined_slot = |
| 2229 prototype_transitions->data_start() + i; |
| 2230 RecordSlot(undefined_slot, undefined_slot, undefined); |
| 1747 } | 2231 } |
| 1748 map->SetNumberOfProtoTransitions(new_number_of_transitions); | 2232 map->SetNumberOfProtoTransitions(new_number_of_transitions); |
| 1749 } | 2233 } |
| 1750 | 2234 |
| 1751 // Follow the chain of back pointers to find the prototype. | 2235 // Follow the chain of back pointers to find the prototype. |
| 1752 Map* current = map; | 2236 Map* current = map; |
| 1753 while (SafeIsMap(current)) { | 2237 while (current->IsMap()) { |
| 1754 current = reinterpret_cast<Map*>(current->prototype()); | 2238 current = reinterpret_cast<Map*>(current->prototype()); |
| 1755 ASSERT(current->IsHeapObject()); | 2239 ASSERT(current->IsHeapObject()); |
| 1756 } | 2240 } |
| 1757 Object* real_prototype = current; | 2241 Object* real_prototype = current; |
| 1758 | 2242 |
| 1759 // Follow back pointers, setting them to prototype, | 2243 // Follow back pointers, setting them to prototype, |
| 1760 // clearing map transitions when necessary. | 2244 // clearing map transitions when necessary. |
| 1761 current = map; | 2245 current = map; |
| 1762 bool on_dead_path = !current->IsMarked(); | 2246 bool on_dead_path = !map_mark.Get(); |
| 1763 Object* next; | 2247 Object* next; |
| 1764 while (SafeIsMap(current)) { | 2248 while (current->IsMap()) { |
| 1765 next = current->prototype(); | 2249 next = current->prototype(); |
| 1766 // There should never be a dead map above a live map. | 2250 // There should never be a dead map above a live map. |
| 1767 ASSERT(on_dead_path || current->IsMarked()); | 2251 MarkBit current_mark = Marking::MarkBitFrom(current); |
| 2252 bool is_alive = current_mark.Get(); |
| 2253 ASSERT(on_dead_path || is_alive); |
| 1768 | 2254 |
| 1769 // A live map above a dead map indicates a dead transition. | 2255 // A live map above a dead map indicates a dead transition. |
| 1770 // This test will always be false on the first iteration. | 2256 // This test will always be false on the first iteration. |
| 1771 if (on_dead_path && current->IsMarked()) { | 2257 if (on_dead_path && is_alive) { |
| 1772 on_dead_path = false; | 2258 on_dead_path = false; |
| 1773 current->ClearNonLiveTransitions(heap(), real_prototype); | 2259 current->ClearNonLiveTransitions(heap(), real_prototype); |
| 1774 } | 2260 } |
| 1775 *HeapObject::RawField(current, Map::kPrototypeOffset) = | 2261 *HeapObject::RawField(current, Map::kPrototypeOffset) = |
| 1776 real_prototype; | 2262 real_prototype; |
| 2263 |
| 2264 if (is_alive) { |
| 2265 Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); |
| 2266 RecordSlot(slot, slot, real_prototype); |
| 2267 } |
| 1777 current = reinterpret_cast<Map*>(next); | 2268 current = reinterpret_cast<Map*>(next); |
| 1778 } | 2269 } |
| 1779 } | 2270 } |
| 1780 } | 2271 } |
| 1781 | 2272 |
| 1782 | 2273 |
| 1783 void MarkCompactCollector::ProcessWeakMaps() { | 2274 void MarkCompactCollector::ProcessWeakMaps() { |
| 1784 Object* weak_map_obj = encountered_weak_maps(); | 2275 Object* weak_map_obj = encountered_weak_maps(); |
| 1785 while (weak_map_obj != Smi::FromInt(0)) { | 2276 while (weak_map_obj != Smi::FromInt(0)) { |
| 1786 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); | 2277 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
| 1787 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); | 2278 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
| 1788 ObjectHashTable* table = weak_map->unchecked_table(); | 2279 ObjectHashTable* table = weak_map->unchecked_table(); |
| 1789 for (int i = 0; i < table->Capacity(); i++) { | 2280 for (int i = 0; i < table->Capacity(); i++) { |
| 1790 if (HeapObject::cast(table->KeyAt(i))->IsMarked()) { | 2281 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
| 1791 Object* value = table->get(table->EntryToValueIndex(i)); | 2282 Object* value = table->get(table->EntryToValueIndex(i)); |
| 1792 StaticMarkingVisitor::MarkObjectByPointer(heap(), &value); | 2283 StaticMarkingVisitor::VisitPointer(heap(), &value); |
| 1793 table->set_unchecked(heap(), | 2284 table->set_unchecked(heap(), |
| 1794 table->EntryToValueIndex(i), | 2285 table->EntryToValueIndex(i), |
| 1795 value, | 2286 value, |
| 1796 UPDATE_WRITE_BARRIER); | 2287 UPDATE_WRITE_BARRIER); |
| 1797 } | 2288 } |
| 1798 } | 2289 } |
| 1799 weak_map_obj = weak_map->next(); | 2290 weak_map_obj = weak_map->next(); |
| 1800 } | 2291 } |
| 1801 } | 2292 } |
| 1802 | 2293 |
| 1803 | 2294 |
| 1804 void MarkCompactCollector::ClearWeakMaps() { | 2295 void MarkCompactCollector::ClearWeakMaps() { |
| 1805 Object* weak_map_obj = encountered_weak_maps(); | 2296 Object* weak_map_obj = encountered_weak_maps(); |
| 1806 while (weak_map_obj != Smi::FromInt(0)) { | 2297 while (weak_map_obj != Smi::FromInt(0)) { |
| 1807 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); | 2298 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
| 1808 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); | 2299 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
| 1809 ObjectHashTable* table = weak_map->unchecked_table(); | 2300 ObjectHashTable* table = weak_map->unchecked_table(); |
| 1810 for (int i = 0; i < table->Capacity(); i++) { | 2301 for (int i = 0; i < table->Capacity(); i++) { |
| 1811 if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) { | 2302 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
| 1812 table->RemoveEntry(i, heap()); | 2303 table->RemoveEntry(i, heap()); |
| 1813 } | 2304 } |
| 1814 } | 2305 } |
| 1815 weak_map_obj = weak_map->next(); | 2306 weak_map_obj = weak_map->next(); |
| 1816 weak_map->set_next(Smi::FromInt(0)); | 2307 weak_map->set_next(Smi::FromInt(0)); |
| 1817 } | 2308 } |
| 1818 set_encountered_weak_maps(Smi::FromInt(0)); | 2309 set_encountered_weak_maps(Smi::FromInt(0)); |
| 1819 } | 2310 } |
| 1820 | 2311 |
| 1821 // ------------------------------------------------------------------------- | |
| 1822 // Phase 2: Encode forwarding addresses. | |
| 1823 // When compacting, forwarding addresses for objects in old space and map | |
| 1824 // space are encoded in their map pointer word (along with an encoding of | |
| 1825 // their map pointers). | |
| 1826 // | |
| 1827 // The excact encoding is described in the comments for class MapWord in | |
| 1828 // objects.h. | |
| 1829 // | |
| 1830 // An address range [start, end) can have both live and non-live objects. | |
| 1831 // Maximal non-live regions are marked so they can be skipped on subsequent | |
| 1832 // sweeps of the heap. A distinguished map-pointer encoding is used to mark | |
| 1833 // free regions of one-word size (in which case the next word is the start | |
| 1834 // of a live object). A second distinguished map-pointer encoding is used | |
| 1835 // to mark free regions larger than one word, and the size of the free | |
| 1836 // region (including the first word) is written to the second word of the | |
| 1837 // region. | |
| 1838 // | |
| 1839 // Any valid map page offset must lie in the object area of the page, so map | |
| 1840 // page offsets less than Page::kObjectStartOffset are invalid. We use a | |
| 1841 // pair of distinguished invalid map encodings (for single word and multiple | |
| 1842 // words) to indicate free regions in the page found during computation of | |
| 1843 // forwarding addresses and skipped over in subsequent sweeps. | |
| 1844 | |
| 1845 | |
| 1846 // Encode a free region, defined by the given start address and size, in the | |
| 1847 // first word or two of the region. | |
| 1848 void EncodeFreeRegion(Address free_start, int free_size) { | |
| 1849 ASSERT(free_size >= kIntSize); | |
| 1850 if (free_size == kIntSize) { | |
| 1851 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding; | |
| 1852 } else { | |
| 1853 ASSERT(free_size >= 2 * kIntSize); | |
| 1854 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding; | |
| 1855 Memory::int_at(free_start + kIntSize) = free_size; | |
| 1856 } | |
| 1857 | |
| 1858 #ifdef DEBUG | |
| 1859 // Zap the body of the free region. | |
| 1860 if (FLAG_enable_slow_asserts) { | |
| 1861 for (int offset = 2 * kIntSize; | |
| 1862 offset < free_size; | |
| 1863 offset += kPointerSize) { | |
| 1864 Memory::Address_at(free_start + offset) = kZapValue; | |
| 1865 } | |
| 1866 } | |
| 1867 #endif | |
| 1868 } | |
| 1869 | |
| 1870 | |
| 1871 // Try to promote all objects in new space. Heap numbers and sequential | |
| 1872 // strings are promoted to the code space, large objects to large object space, | |
| 1873 // and all others to the old space. | |
| 1874 inline MaybeObject* MCAllocateFromNewSpace(Heap* heap, | |
| 1875 HeapObject* object, | |
| 1876 int object_size) { | |
| 1877 MaybeObject* forwarded; | |
| 1878 if (object_size > heap->MaxObjectSizeInPagedSpace()) { | |
| 1879 forwarded = Failure::Exception(); | |
| 1880 } else { | |
| 1881 OldSpace* target_space = heap->TargetSpace(object); | |
| 1882 ASSERT(target_space == heap->old_pointer_space() || | |
| 1883 target_space == heap->old_data_space()); | |
| 1884 forwarded = target_space->MCAllocateRaw(object_size); | |
| 1885 } | |
| 1886 Object* result; | |
| 1887 if (!forwarded->ToObject(&result)) { | |
| 1888 result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked(); | |
| 1889 } | |
| 1890 return result; | |
| 1891 } | |
| 1892 | |
| 1893 | |
| 1894 // Allocation functions for the paged spaces call the space's MCAllocateRaw. | |
| 1895 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace( | |
| 1896 Heap *heap, | |
| 1897 HeapObject* ignore, | |
| 1898 int object_size) { | |
| 1899 return heap->old_pointer_space()->MCAllocateRaw(object_size); | |
| 1900 } | |
| 1901 | |
| 1902 | |
| 1903 MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace( | |
| 1904 Heap* heap, | |
| 1905 HeapObject* ignore, | |
| 1906 int object_size) { | |
| 1907 return heap->old_data_space()->MCAllocateRaw(object_size); | |
| 1908 } | |
| 1909 | |
| 1910 | |
| 1911 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace( | |
| 1912 Heap* heap, | |
| 1913 HeapObject* ignore, | |
| 1914 int object_size) { | |
| 1915 return heap->code_space()->MCAllocateRaw(object_size); | |
| 1916 } | |
| 1917 | |
| 1918 | |
| 1919 MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace( | |
| 1920 Heap* heap, | |
| 1921 HeapObject* ignore, | |
| 1922 int object_size) { | |
| 1923 return heap->map_space()->MCAllocateRaw(object_size); | |
| 1924 } | |
| 1925 | |
| 1926 | |
| 1927 MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace( | |
| 1928 Heap* heap, HeapObject* ignore, int object_size) { | |
| 1929 return heap->cell_space()->MCAllocateRaw(object_size); | |
| 1930 } | |
| 1931 | |
| 1932 | |
| 1933 // The forwarding address is encoded at the same offset as the current | |
| 1934 // to-space object, but in from space. | |
| 1935 inline void EncodeForwardingAddressInNewSpace(Heap* heap, | |
| 1936 HeapObject* old_object, | |
| 1937 int object_size, | |
| 1938 Object* new_object, | |
| 1939 int* ignored) { | |
| 1940 int offset = | |
| 1941 heap->new_space()->ToSpaceOffsetForAddress(old_object->address()); | |
| 1942 Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) = | |
| 1943 HeapObject::cast(new_object)->address(); | |
| 1944 } | |
| 1945 | |
| 1946 | |
| 1947 // The forwarding address is encoded in the map pointer of the object as an | |
| 1948 // offset (in terms of live bytes) from the address of the first live object | |
| 1949 // in the page. | |
| 1950 inline void EncodeForwardingAddressInPagedSpace(Heap* heap, | |
| 1951 HeapObject* old_object, | |
| 1952 int object_size, | |
| 1953 Object* new_object, | |
| 1954 int* offset) { | |
| 1955 // Record the forwarding address of the first live object if necessary. | |
| 1956 if (*offset == 0) { | |
| 1957 Page::FromAddress(old_object->address())->mc_first_forwarded = | |
| 1958 HeapObject::cast(new_object)->address(); | |
| 1959 } | |
| 1960 | |
| 1961 MapWord encoding = | |
| 1962 MapWord::EncodeAddress(old_object->map()->address(), *offset); | |
| 1963 old_object->set_map_word(encoding); | |
| 1964 *offset += object_size; | |
| 1965 ASSERT(*offset <= Page::kObjectAreaSize); | |
| 1966 } | |
| 1967 | |
| 1968 | |
| 1969 // Most non-live objects are ignored. | |
| 1970 inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {} | |
| 1971 | |
| 1972 | |
| 1973 // Function template that, given a range of addresses (eg, a semispace or a | |
| 1974 // paged space page), iterates through the objects in the range to clear | |
| 1975 // mark bits and compute and encode forwarding addresses. As a side effect, | |
| 1976 // maximal free chunks are marked so that they can be skipped on subsequent | |
| 1977 // sweeps. | |
| 1978 // | |
| 1979 // The template parameters are an allocation function, a forwarding address | |
| 1980 // encoding function, and a function to process non-live objects. | |
| 1981 template<MarkCompactCollector::AllocationFunction Alloc, | |
| 1982 MarkCompactCollector::EncodingFunction Encode, | |
| 1983 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> | |
| 1984 inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector, | |
| 1985 Address start, | |
| 1986 Address end, | |
| 1987 int* offset) { | |
| 1988 // The start address of the current free region while sweeping the space. | |
| 1989 // This address is set when a transition from live to non-live objects is | |
| 1990 // encountered. A value (an encoding of the 'next free region' pointer) | |
| 1991 // is written to memory at this address when a transition from non-live to | |
| 1992 // live objects is encountered. | |
| 1993 Address free_start = NULL; | |
| 1994 | |
| 1995 // A flag giving the state of the previously swept object. Initially true | |
| 1996 // to ensure that free_start is initialized to a proper address before | |
| 1997 // trying to write to it. | |
| 1998 bool is_prev_alive = true; | |
| 1999 | |
| 2000 int object_size; // Will be set on each iteration of the loop. | |
| 2001 for (Address current = start; current < end; current += object_size) { | |
| 2002 HeapObject* object = HeapObject::FromAddress(current); | |
| 2003 if (object->IsMarked()) { | |
| 2004 object->ClearMark(); | |
| 2005 collector->tracer()->decrement_marked_count(); | |
| 2006 object_size = object->Size(); | |
| 2007 | |
| 2008 Object* forwarded = | |
| 2009 Alloc(collector->heap(), object, object_size)->ToObjectUnchecked(); | |
| 2010 Encode(collector->heap(), object, object_size, forwarded, offset); | |
| 2011 | |
| 2012 #ifdef DEBUG | |
| 2013 if (FLAG_gc_verbose) { | |
| 2014 PrintF("forward %p -> %p.\n", object->address(), | |
| 2015 HeapObject::cast(forwarded)->address()); | |
| 2016 } | |
| 2017 #endif | |
| 2018 if (!is_prev_alive) { // Transition from non-live to live. | |
| 2019 EncodeFreeRegion(free_start, static_cast<int>(current - free_start)); | |
| 2020 is_prev_alive = true; | |
| 2021 } | |
| 2022 } else { // Non-live object. | |
| 2023 object_size = object->Size(); | |
| 2024 ProcessNonLive(object, collector->heap()->isolate()); | |
| 2025 if (is_prev_alive) { // Transition from live to non-live. | |
| 2026 free_start = current; | |
| 2027 is_prev_alive = false; | |
| 2028 } | |
| 2029 LiveObjectList::ProcessNonLive(object); | |
| 2030 } | |
| 2031 } | |
| 2032 | |
| 2033 // If we ended on a free region, mark it. | |
| 2034 if (!is_prev_alive) { | |
| 2035 EncodeFreeRegion(free_start, static_cast<int>(end - free_start)); | |
| 2036 } | |
| 2037 } | |
| 2038 | |
| 2039 | |
| 2040 // Functions to encode the forwarding pointers in each compactable space. | |
| 2041 void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() { | |
| 2042 int ignored; | |
| 2043 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace, | |
| 2044 EncodeForwardingAddressInNewSpace, | |
| 2045 IgnoreNonLiveObject>( | |
| 2046 this, | |
| 2047 heap()->new_space()->bottom(), | |
| 2048 heap()->new_space()->top(), | |
| 2049 &ignored); | |
| 2050 } | |
| 2051 | |
| 2052 | |
| 2053 template<MarkCompactCollector::AllocationFunction Alloc, | |
| 2054 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> | |
| 2055 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( | |
| 2056 PagedSpace* space) { | |
| 2057 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
| 2058 while (it.has_next()) { | |
| 2059 Page* p = it.next(); | |
| 2060 | |
| 2061 // The offset of each live object in the page from the first live object | |
| 2062 // in the page. | |
| 2063 int offset = 0; | |
| 2064 EncodeForwardingAddressesInRange<Alloc, | |
| 2065 EncodeForwardingAddressInPagedSpace, | |
| 2066 ProcessNonLive>( | |
| 2067 this, | |
| 2068 p->ObjectAreaStart(), | |
| 2069 p->AllocationTop(), | |
| 2070 &offset); | |
| 2071 } | |
| 2072 } | |
| 2073 | |
| 2074 | 2312 |
| 2075 // We scavange new space simultaneously with sweeping. This is done in two | 2313 // We scavange new space simultaneously with sweeping. This is done in two |
| 2076 // passes. | 2314 // passes. |
| 2315 // |
| 2077 // The first pass migrates all alive objects from one semispace to another or | 2316 // The first pass migrates all alive objects from one semispace to another or |
| 2078 // promotes them to old space. Forwading address is written directly into | 2317 // promotes them to old space. Forwarding address is written directly into |
| 2079 // first word of object without any encoding. If object is dead we are writing | 2318 // first word of object without any encoding. If object is dead we write |
| 2080 // NULL as a forwarding address. | 2319 // NULL as a forwarding address. |
| 2081 // The second pass updates pointers to new space in all spaces. It is possible | 2320 // |
| 2082 // to encounter pointers to dead objects during traversal of dirty regions we | 2321 // The second pass updates pointers to new space in all spaces. It is possible |
| 2083 // should clear them to avoid encountering them during next dirty regions | 2322 // to encounter pointers to dead new space objects during traversal of pointers |
| 2084 // iteration. | 2323 // to new space. We should clear them to avoid encountering them during next |
| 2085 static void MigrateObject(Heap* heap, | 2324 // pointer iteration. This is an issue if the store buffer overflows and we |
| 2086 Address dst, | 2325 // have to scan the entire old space, including dead objects, looking for |
| 2087 Address src, | 2326 // pointers to new space. |
| 2088 int size, | 2327 void MarkCompactCollector::MigrateObject(Address dst, |
| 2089 bool to_old_space) { | 2328 Address src, |
| 2090 if (to_old_space) { | 2329 int size, |
| 2091 heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size); | 2330 AllocationSpace dest) { |
| 2331 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { |
| 2332 Address src_slot = src; |
| 2333 Address dst_slot = dst; |
| 2334 ASSERT(IsAligned(size, kPointerSize)); |
| 2335 |
| 2336 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { |
| 2337 Object* value = Memory::Object_at(src_slot); |
| 2338 |
| 2339 Memory::Object_at(dst_slot) = value; |
| 2340 |
| 2341 if (heap_->InNewSpace(value)) { |
| 2342 heap_->store_buffer()->Mark(dst_slot); |
| 2343 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { |
| 2344 SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2345 &migration_slots_buffer_, |
| 2346 reinterpret_cast<Object**>(dst_slot), |
| 2347 SlotsBuffer::IGNORE_OVERFLOW); |
| 2348 } |
| 2349 |
| 2350 src_slot += kPointerSize; |
| 2351 dst_slot += kPointerSize; |
| 2352 } |
| 2353 |
| 2354 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) { |
| 2355 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset; |
| 2356 Address code_entry = Memory::Address_at(code_entry_slot); |
| 2357 |
| 2358 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
| 2359 SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2360 &migration_slots_buffer_, |
| 2361 SlotsBuffer::CODE_ENTRY_SLOT, |
| 2362 code_entry_slot, |
| 2363 SlotsBuffer::IGNORE_OVERFLOW); |
| 2364 } |
| 2365 } |
| 2366 } else if (dest == CODE_SPACE) { |
| 2367 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); |
| 2368 heap()->MoveBlock(dst, src, size); |
| 2369 SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2370 &migration_slots_buffer_, |
| 2371 SlotsBuffer::RELOCATED_CODE_OBJECT, |
| 2372 dst, |
| 2373 SlotsBuffer::IGNORE_OVERFLOW); |
| 2374 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); |
| 2092 } else { | 2375 } else { |
| 2093 heap->CopyBlock(dst, src, size); | 2376 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); |
| 2377 heap()->MoveBlock(dst, src, size); |
| 2094 } | 2378 } |
| 2095 | |
| 2096 Memory::Address_at(src) = dst; | 2379 Memory::Address_at(src) = dst; |
| 2097 } | 2380 } |
| 2098 | 2381 |
| 2099 | 2382 |
| 2100 class StaticPointersToNewGenUpdatingVisitor : public | |
| 2101 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> { | |
| 2102 public: | |
| 2103 static inline void VisitPointer(Heap* heap, Object** p) { | |
| 2104 if (!(*p)->IsHeapObject()) return; | |
| 2105 | |
| 2106 HeapObject* obj = HeapObject::cast(*p); | |
| 2107 Address old_addr = obj->address(); | |
| 2108 | |
| 2109 if (heap->new_space()->Contains(obj)) { | |
| 2110 ASSERT(heap->InFromSpace(*p)); | |
| 2111 *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); | |
| 2112 } | |
| 2113 } | |
| 2114 }; | |
| 2115 | |
| 2116 | |
| 2117 // Visitor for updating pointers from live objects in old spaces to new space. | 2383 // Visitor for updating pointers from live objects in old spaces to new space. |
| 2118 // It does not expect to encounter pointers to dead objects. | 2384 // It does not expect to encounter pointers to dead objects. |
| 2119 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { | 2385 class PointersUpdatingVisitor: public ObjectVisitor { |
| 2120 public: | 2386 public: |
| 2121 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { } | 2387 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } |
| 2122 | 2388 |
| 2123 void VisitPointer(Object** p) { | 2389 void VisitPointer(Object** p) { |
| 2124 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); | 2390 UpdatePointer(p); |
| 2125 } | 2391 } |
| 2126 | 2392 |
| 2127 void VisitPointers(Object** start, Object** end) { | 2393 void VisitPointers(Object** start, Object** end) { |
| 2128 for (Object** p = start; p < end; p++) { | 2394 for (Object** p = start; p < end; p++) UpdatePointer(p); |
| 2129 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); | |
| 2130 } | |
| 2131 } | 2395 } |
| 2132 | 2396 |
| 2133 void VisitCodeTarget(RelocInfo* rinfo) { | 2397 void VisitCodeTarget(RelocInfo* rinfo) { |
| 2134 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); | 2398 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 2135 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | 2399 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 2136 VisitPointer(&target); | 2400 VisitPointer(&target); |
| 2137 rinfo->set_target_address(Code::cast(target)->instruction_start()); | 2401 rinfo->set_target_address(Code::cast(target)->instruction_start()); |
| 2138 } | 2402 } |
| 2139 | 2403 |
| 2140 void VisitDebugTarget(RelocInfo* rinfo) { | 2404 void VisitDebugTarget(RelocInfo* rinfo) { |
| 2141 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && | 2405 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
| 2142 rinfo->IsPatchedReturnSequence()) || | 2406 rinfo->IsPatchedReturnSequence()) || |
| 2143 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | 2407 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| 2144 rinfo->IsPatchedDebugBreakSlotSequence())); | 2408 rinfo->IsPatchedDebugBreakSlotSequence())); |
| 2145 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); | 2409 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
| 2146 VisitPointer(&target); | 2410 VisitPointer(&target); |
| 2147 rinfo->set_call_address(Code::cast(target)->instruction_start()); | 2411 rinfo->set_call_address(Code::cast(target)->instruction_start()); |
| 2148 } | 2412 } |
| 2149 | 2413 |
| 2150 private: | 2414 private: |
| 2415 inline void UpdatePointer(Object** p) { |
| 2416 if (!(*p)->IsHeapObject()) return; |
| 2417 |
| 2418 HeapObject* obj = HeapObject::cast(*p); |
| 2419 |
| 2420 if (heap_->InNewSpace(obj) || |
| 2421 MarkCompactCollector::IsOnEvacuationCandidate(obj)) { |
| 2422 ASSERT(obj->map_word().IsForwardingAddress()); |
| 2423 *p = obj->map_word().ToForwardingAddress(); |
| 2424 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*p)); |
| 2425 } |
| 2426 } |
| 2427 |
| 2151 Heap* heap_; | 2428 Heap* heap_; |
| 2152 }; | 2429 }; |
| 2153 | 2430 |
| 2154 | 2431 |
| 2155 // Visitor for updating pointers from live objects in old spaces to new space. | 2432 static void UpdatePointer(HeapObject** p, HeapObject* object) { |
| 2156 // It can encounter pointers to dead objects in new space when traversing map | 2433 ASSERT(*p == object); |
| 2157 // space (see comment for MigrateObject). | |
| 2158 static void UpdatePointerToNewGen(HeapObject** p) { | |
| 2159 if (!(*p)->IsHeapObject()) return; | |
| 2160 | 2434 |
| 2161 Address old_addr = (*p)->address(); | 2435 Address old_addr = object->address(); |
| 2162 ASSERT(HEAP->InFromSpace(*p)); | |
| 2163 | 2436 |
| 2164 Address new_addr = Memory::Address_at(old_addr); | 2437 Address new_addr = Memory::Address_at(old_addr); |
| 2165 | 2438 |
| 2166 if (new_addr == NULL) { | 2439 // The new space sweep will overwrite the map word of dead objects |
| 2167 // We encountered pointer to a dead object. Clear it so we will | 2440 // with NULL. In this case we do not need to transfer this entry to |
| 2168 // not visit it again during next iteration of dirty regions. | 2441 // the store buffer which we are rebuilding. |
| 2169 *p = NULL; | 2442 if (new_addr != NULL) { |
| 2443 *p = HeapObject::FromAddress(new_addr); |
| 2170 } else { | 2444 } else { |
| 2171 *p = HeapObject::FromAddress(new_addr); | 2445 // We have to zap this pointer, because the store buffer may overflow later, |
| 2446 // and then we have to scan the entire heap and we don't want to find |
| 2447 // spurious newspace pointers in the old space. |
| 2448 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0)); |
| 2172 } | 2449 } |
| 2173 } | 2450 } |
| 2174 | 2451 |
| 2175 | 2452 |
| 2176 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, | 2453 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, |
| 2177 Object** p) { | 2454 Object** p) { |
| 2178 Address old_addr = HeapObject::cast(*p)->address(); | 2455 MapWord map_word = HeapObject::cast(*p)->map_word(); |
| 2179 Address new_addr = Memory::Address_at(old_addr); | 2456 |
| 2180 return String::cast(HeapObject::FromAddress(new_addr)); | 2457 if (map_word.IsForwardingAddress()) { |
| 2458 return String::cast(map_word.ToForwardingAddress()); |
| 2459 } |
| 2460 |
| 2461 return String::cast(*p); |
| 2181 } | 2462 } |
| 2182 | 2463 |
| 2183 | 2464 |
| 2184 static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { | 2465 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, |
| 2466 int object_size) { |
| 2185 Object* result; | 2467 Object* result; |
| 2186 | 2468 |
| 2187 if (object_size > heap->MaxObjectSizeInPagedSpace()) { | 2469 if (object_size > heap()->MaxObjectSizeInPagedSpace()) { |
| 2188 MaybeObject* maybe_result = | 2470 MaybeObject* maybe_result = |
| 2189 heap->lo_space()->AllocateRawFixedArray(object_size); | 2471 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); |
| 2190 if (maybe_result->ToObject(&result)) { | 2472 if (maybe_result->ToObject(&result)) { |
| 2191 HeapObject* target = HeapObject::cast(result); | 2473 HeapObject* target = HeapObject::cast(result); |
| 2192 MigrateObject(heap, target->address(), object->address(), object_size, | 2474 MigrateObject(target->address(), |
| 2193 true); | 2475 object->address(), |
| 2194 heap->mark_compact_collector()->tracer()-> | 2476 object_size, |
| 2477 LO_SPACE); |
| 2478 heap()->mark_compact_collector()->tracer()-> |
| 2195 increment_promoted_objects_size(object_size); | 2479 increment_promoted_objects_size(object_size); |
| 2196 return true; | 2480 return true; |
| 2197 } | 2481 } |
| 2198 } else { | 2482 } else { |
| 2199 OldSpace* target_space = heap->TargetSpace(object); | 2483 OldSpace* target_space = heap()->TargetSpace(object); |
| 2200 | 2484 |
| 2201 ASSERT(target_space == heap->old_pointer_space() || | 2485 ASSERT(target_space == heap()->old_pointer_space() || |
| 2202 target_space == heap->old_data_space()); | 2486 target_space == heap()->old_data_space()); |
| 2203 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); | 2487 MaybeObject* maybe_result = target_space->AllocateRaw(object_size); |
| 2204 if (maybe_result->ToObject(&result)) { | 2488 if (maybe_result->ToObject(&result)) { |
| 2205 HeapObject* target = HeapObject::cast(result); | 2489 HeapObject* target = HeapObject::cast(result); |
| 2206 MigrateObject(heap, | 2490 MigrateObject(target->address(), |
| 2207 target->address(), | |
| 2208 object->address(), | 2491 object->address(), |
| 2209 object_size, | 2492 object_size, |
| 2210 target_space == heap->old_pointer_space()); | 2493 target_space->identity()); |
| 2211 heap->mark_compact_collector()->tracer()-> | 2494 heap()->mark_compact_collector()->tracer()-> |
| 2212 increment_promoted_objects_size(object_size); | 2495 increment_promoted_objects_size(object_size); |
| 2213 return true; | 2496 return true; |
| 2214 } | 2497 } |
| 2215 } | 2498 } |
| 2216 | 2499 |
| 2217 return false; | 2500 return false; |
| 2218 } | 2501 } |
| 2219 | 2502 |
| 2220 | 2503 |
| 2221 static void SweepNewSpace(Heap* heap, NewSpace* space) { | 2504 void MarkCompactCollector::EvacuateNewSpace() { |
| 2222 heap->CheckNewSpaceExpansionCriteria(); | 2505 heap()->CheckNewSpaceExpansionCriteria(); |
| 2223 | 2506 |
| 2224 Address from_bottom = space->bottom(); | 2507 NewSpace* new_space = heap()->new_space(); |
| 2225 Address from_top = space->top(); | 2508 |
| 2509 // Store allocation range before flipping semispaces. |
| 2510 Address from_bottom = new_space->bottom(); |
| 2511 Address from_top = new_space->top(); |
| 2226 | 2512 |
| 2227 // Flip the semispaces. After flipping, to space is empty, from space has | 2513 // Flip the semispaces. After flipping, to space is empty, from space has |
| 2228 // live objects. | 2514 // live objects. |
| 2229 space->Flip(); | 2515 new_space->Flip(); |
| 2230 space->ResetAllocationInfo(); | 2516 new_space->ResetAllocationInfo(); |
| 2231 | 2517 |
| 2232 int size = 0; | |
| 2233 int survivors_size = 0; | 2518 int survivors_size = 0; |
| 2234 | 2519 |
| 2235 // First pass: traverse all objects in inactive semispace, remove marks, | 2520 // First pass: traverse all objects in inactive semispace, remove marks, |
| 2236 // migrate live objects and write forwarding addresses. | 2521 // migrate live objects and write forwarding addresses. This stage puts |
| 2237 for (Address current = from_bottom; current < from_top; current += size) { | 2522 // new entries in the store buffer and may cause some pages to be marked |
| 2238 HeapObject* object = HeapObject::FromAddress(current); | 2523 // scan-on-scavenge. |
| 2239 | 2524 SemiSpaceIterator from_it(from_bottom, from_top); |
| 2240 if (object->IsMarked()) { | 2525 for (HeapObject* object = from_it.Next(); |
| 2241 object->ClearMark(); | 2526 object != NULL; |
| 2242 heap->mark_compact_collector()->tracer()->decrement_marked_count(); | 2527 object = from_it.Next()) { |
| 2243 | 2528 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 2244 size = object->Size(); | 2529 if (mark_bit.Get()) { |
| 2530 mark_bit.Clear(); |
| 2531 int size = object->Size(); |
| 2245 survivors_size += size; | 2532 survivors_size += size; |
| 2533 MemoryChunk::IncrementLiveBytes(object->address(), -size); |
| 2246 | 2534 |
| 2247 // Aggressively promote young survivors to the old space. | 2535 // Aggressively promote young survivors to the old space. |
| 2248 if (TryPromoteObject(heap, object, size)) { | 2536 if (TryPromoteObject(object, size)) { |
| 2249 continue; | 2537 continue; |
| 2250 } | 2538 } |
| 2251 | 2539 |
| 2252 // Promotion failed. Just migrate object to another semispace. | 2540 // Promotion failed. Just migrate object to another semispace. |
| 2253 // Allocation cannot fail at this point: semispaces are of equal size. | 2541 MaybeObject* allocation = new_space->AllocateRaw(size); |
| 2254 Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); | 2542 if (allocation->IsFailure()) { |
| 2255 | 2543 if (!new_space->AddFreshPage()) { |
| 2256 MigrateObject(heap, | 2544 // Shouldn't happen. We are sweeping linearly, and to-space |
| 2257 HeapObject::cast(target)->address(), | 2545 // has the same number of pages as from-space, so there is |
| 2258 current, | 2546 // always room. |
| 2547 UNREACHABLE(); |
| 2548 } |
| 2549 allocation = new_space->AllocateRaw(size); |
| 2550 ASSERT(!allocation->IsFailure()); |
| 2551 } |
| 2552 Object* target = allocation->ToObjectUnchecked(); |
| 2553 |
| 2554 MigrateObject(HeapObject::cast(target)->address(), |
| 2555 object->address(), |
| 2259 size, | 2556 size, |
| 2260 false); | 2557 NEW_SPACE); |
| 2261 } else { | 2558 } else { |
| 2262 // Process the dead object before we write a NULL into its header. | 2559 // Process the dead object before we write a NULL into its header. |
| 2263 LiveObjectList::ProcessNonLive(object); | 2560 LiveObjectList::ProcessNonLive(object); |
| 2264 | 2561 |
| 2265 size = object->Size(); | 2562 // Mark dead objects in the new space with null in their map field. |
| 2266 Memory::Address_at(current) = NULL; | 2563 Memory::Address_at(object->address()) = NULL; |
| 2267 } | 2564 } |
| 2268 } | 2565 } |
| 2566 |
| 2567 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 2568 new_space->set_age_mark(new_space->top()); |
| 2569 } |
| 2570 |
| 2571 |
| 2572 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
| 2573 AlwaysAllocateScope always_allocate; |
| 2574 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 2575 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 2576 MarkBit::CellType* cells = p->markbits()->cells(); |
| 2577 p->MarkSweptPrecisely(); |
| 2578 |
| 2579 int last_cell_index = |
| 2580 Bitmap::IndexToCell( |
| 2581 Bitmap::CellAlignIndex( |
| 2582 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 2583 |
| 2584 int cell_index = Page::kFirstUsedCell; |
| 2585 Address cell_base = p->ObjectAreaStart(); |
| 2586 int offsets[16]; |
| 2587 |
| 2588 for (cell_index = Page::kFirstUsedCell; |
| 2589 cell_index < last_cell_index; |
| 2590 cell_index++, cell_base += 32 * kPointerSize) { |
| 2591 ASSERT((unsigned)cell_index == |
| 2592 Bitmap::IndexToCell( |
| 2593 Bitmap::CellAlignIndex( |
| 2594 p->AddressToMarkbitIndex(cell_base)))); |
| 2595 if (cells[cell_index] == 0) continue; |
| 2596 |
| 2597 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 2598 for (int i = 0; i < live_objects; i++) { |
| 2599 Address object_addr = cell_base + offsets[i] * kPointerSize; |
| 2600 HeapObject* object = HeapObject::FromAddress(object_addr); |
| 2601 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 2602 |
| 2603 int size = object->Size(); |
| 2604 |
| 2605 MaybeObject* target = space->AllocateRaw(size); |
| 2606 if (target->IsFailure()) { |
| 2607 // OS refused to give us memory. |
| 2608 V8::FatalProcessOutOfMemory("Evacuation"); |
| 2609 return; |
| 2610 } |
| 2611 |
| 2612 Object* target_object = target->ToObjectUnchecked(); |
| 2613 |
| 2614 MigrateObject(HeapObject::cast(target_object)->address(), |
| 2615 object_addr, |
| 2616 size, |
| 2617 space->identity()); |
| 2618 ASSERT(object->map_word().IsForwardingAddress()); |
| 2619 } |
| 2620 |
| 2621 // Clear marking bits for current cell. |
| 2622 cells[cell_index] = 0; |
| 2623 } |
| 2624 } |
| 2625 |
| 2626 |
| 2627 void MarkCompactCollector::EvacuatePages() { |
| 2628 int npages = evacuation_candidates_.length(); |
| 2629 for (int i = 0; i < npages; i++) { |
| 2630 Page* p = evacuation_candidates_[i]; |
| 2631 ASSERT(p->IsEvacuationCandidate() || |
| 2632 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2633 if (p->IsEvacuationCandidate()) { |
| 2634 // During compaction we might have to request a new page. |
| 2635 // Check that space still have room for that. |
| 2636 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { |
| 2637 EvacuateLiveObjectsFromPage(p); |
| 2638 } else { |
| 2639 // Without room for expansion evacuation is not guaranteed to succeed. |
| 2640 // Pessimistically abandon unevacuated pages. |
| 2641 for (int j = i; j < npages; j++) { |
| 2642 evacuation_candidates_[j]->ClearEvacuationCandidate(); |
| 2643 evacuation_candidates_[j]->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 2644 } |
| 2645 return; |
| 2646 } |
| 2647 } |
| 2648 } |
| 2649 } |
| 2650 |
| 2651 |
| 2652 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 2653 public: |
| 2654 virtual Object* RetainAs(Object* object) { |
| 2655 if (object->IsHeapObject()) { |
| 2656 HeapObject* heap_object = HeapObject::cast(object); |
| 2657 MapWord map_word = heap_object->map_word(); |
| 2658 if (map_word.IsForwardingAddress()) { |
| 2659 return map_word.ToForwardingAddress(); |
| 2660 } |
| 2661 } |
| 2662 return object; |
| 2663 } |
| 2664 }; |
| 2665 |
| 2666 |
| 2667 static inline void UpdateSlot(Object** slot) { |
| 2668 Object* obj = *slot; |
| 2669 if (!obj->IsHeapObject()) return; |
| 2670 |
| 2671 HeapObject* heap_obj = HeapObject::cast(obj); |
| 2672 |
| 2673 MapWord map_word = heap_obj->map_word(); |
| 2674 if (map_word.IsForwardingAddress()) { |
| 2675 ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
| 2676 *slot = map_word.ToForwardingAddress(); |
| 2677 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
| 2678 } |
| 2679 } |
| 2680 |
| 2681 |
| 2682 static inline void UpdateSlot(ObjectVisitor* v, |
| 2683 SlotsBuffer::SlotType slot_type, |
| 2684 Address addr) { |
| 2685 switch (slot_type) { |
| 2686 case SlotsBuffer::CODE_TARGET_SLOT: { |
| 2687 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, NULL, NULL); |
| 2688 rinfo.Visit(v); |
| 2689 break; |
| 2690 } |
| 2691 case SlotsBuffer::CODE_ENTRY_SLOT: { |
| 2692 v->VisitCodeEntry(addr); |
| 2693 break; |
| 2694 } |
| 2695 case SlotsBuffer::RELOCATED_CODE_OBJECT: { |
| 2696 HeapObject* obj = HeapObject::FromAddress(addr); |
| 2697 Code::cast(obj)->CodeIterateBody(v); |
| 2698 break; |
| 2699 } |
| 2700 case SlotsBuffer::DEBUG_TARGET_SLOT: { |
| 2701 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, NULL, NULL); |
| 2702 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v); |
| 2703 break; |
| 2704 } |
| 2705 case SlotsBuffer::JS_RETURN_SLOT: { |
| 2706 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, NULL, NULL); |
| 2707 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v); |
| 2708 break; |
| 2709 } |
| 2710 default: |
| 2711 UNREACHABLE(); |
| 2712 break; |
| 2713 } |
| 2714 } |
| 2715 |
| 2716 |
| 2717 static inline void UpdateSlotsInRange(Object** start, Object** end) { |
| 2718 for (Object** slot = start; |
| 2719 slot < end; |
| 2720 slot++) { |
| 2721 Object* obj = *slot; |
| 2722 if (obj->IsHeapObject() && |
| 2723 MarkCompactCollector::IsOnEvacuationCandidate(obj)) { |
| 2724 MapWord map_word = HeapObject::cast(obj)->map_word(); |
| 2725 if (map_word.IsForwardingAddress()) { |
| 2726 *slot = map_word.ToForwardingAddress(); |
| 2727 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
| 2728 } |
| 2729 } |
| 2730 } |
| 2731 } |
| 2732 |
| 2733 |
| 2734 enum SweepingMode { |
| 2735 SWEEP_ONLY, |
| 2736 SWEEP_AND_VISIT_LIVE_OBJECTS |
| 2737 }; |
| 2738 |
| 2739 |
| 2740 enum SkipListRebuildingMode { |
| 2741 REBUILD_SKIP_LIST, |
| 2742 IGNORE_SKIP_LIST |
| 2743 }; |
| 2744 |
| 2745 |
| 2746 // Sweep a space precisely. After this has been done the space can |
| 2747 // be iterated precisely, hitting only the live objects. Code space |
| 2748 // is always swept precisely because we want to be able to iterate |
| 2749 // over it. Map space is swept precisely, because it is not compacted. |
| 2750 // Slots in live objects pointing into evacuation candidates are updated |
| 2751 // if requested. |
| 2752 template<SkipListRebuildingMode skip_list_mode> |
| 2753 static void SweepPrecisely(PagedSpace* space, |
| 2754 Page* p, |
| 2755 SweepingMode mode, |
| 2756 ObjectVisitor* v) { |
| 2757 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 2758 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 2759 space->identity() == CODE_SPACE); |
| 2760 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 2761 |
| 2762 MarkBit::CellType* cells = p->markbits()->cells(); |
| 2763 p->MarkSweptPrecisely(); |
| 2764 |
| 2765 int last_cell_index = |
| 2766 Bitmap::IndexToCell( |
| 2767 Bitmap::CellAlignIndex( |
| 2768 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 2769 |
| 2770 int cell_index = Page::kFirstUsedCell; |
| 2771 Address free_start = p->ObjectAreaStart(); |
| 2772 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 2773 Address object_address = p->ObjectAreaStart(); |
| 2774 int offsets[16]; |
| 2775 |
| 2776 SkipList* skip_list = p->skip_list(); |
| 2777 int curr_region = -1; |
| 2778 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { |
| 2779 skip_list->Clear(); |
| 2780 } |
| 2781 |
| 2782 for (cell_index = Page::kFirstUsedCell; |
| 2783 cell_index < last_cell_index; |
| 2784 cell_index++, object_address += 32 * kPointerSize) { |
| 2785 ASSERT((unsigned)cell_index == |
| 2786 Bitmap::IndexToCell( |
| 2787 Bitmap::CellAlignIndex( |
| 2788 p->AddressToMarkbitIndex(object_address)))); |
| 2789 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 2790 int live_index = 0; |
| 2791 for ( ; live_objects != 0; live_objects--) { |
| 2792 Address free_end = object_address + offsets[live_index++] * kPointerSize; |
| 2793 if (free_end != free_start) { |
| 2794 space->Free(free_start, static_cast<int>(free_end - free_start)); |
| 2795 } |
| 2796 HeapObject* live_object = HeapObject::FromAddress(free_end); |
| 2797 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
| 2798 Map* map = live_object->map(); |
| 2799 int size = live_object->SizeFromMap(map); |
| 2800 if (mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
| 2801 live_object->IterateBody(map->instance_type(), size, v); |
| 2802 } |
| 2803 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
| 2804 int new_region_start = |
| 2805 SkipList::RegionNumber(free_end); |
| 2806 int new_region_end = |
| 2807 SkipList::RegionNumber(free_end + size - kPointerSize); |
| 2808 if (new_region_start != curr_region || |
| 2809 new_region_end != curr_region) { |
| 2810 skip_list->AddObject(free_end, size); |
| 2811 curr_region = new_region_end; |
| 2812 } |
| 2813 } |
| 2814 free_start = free_end + size; |
| 2815 } |
| 2816 // Clear marking bits for current cell. |
| 2817 cells[cell_index] = 0; |
| 2818 } |
| 2819 if (free_start != p->ObjectAreaEnd()) { |
| 2820 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
| 2821 } |
| 2822 } |
| 2823 |
| 2824 |
| 2825 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 2826 EvacuateNewSpace(); |
| 2827 EvacuatePages(); |
| 2269 | 2828 |
| 2270 // Second pass: find pointers to new space and update them. | 2829 // Second pass: find pointers to new space and update them. |
| 2271 PointersToNewGenUpdatingVisitor updating_visitor(heap); | 2830 PointersUpdatingVisitor updating_visitor(heap()); |
| 2272 | 2831 |
| 2273 // Update pointers in to space. | 2832 // Update pointers in to space. |
| 2274 Address current = space->bottom(); | 2833 SemiSpaceIterator to_it(heap()->new_space()->bottom(), |
| 2275 while (current < space->top()) { | 2834 heap()->new_space()->top()); |
| 2276 HeapObject* object = HeapObject::FromAddress(current); | 2835 for (HeapObject* object = to_it.Next(); |
| 2277 current += | 2836 object != NULL; |
| 2278 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), | 2837 object = to_it.Next()) { |
| 2279 object); | 2838 Map* map = object->map(); |
| 2839 object->IterateBody(map->instance_type(), |
| 2840 object->SizeFromMap(map), |
| 2841 &updating_visitor); |
| 2280 } | 2842 } |
| 2281 | 2843 |
| 2282 // Update roots. | 2844 // Update roots. |
| 2283 heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 2845 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 2284 LiveObjectList::IterateElements(&updating_visitor); | 2846 LiveObjectList::IterateElements(&updating_visitor); |
| 2285 | 2847 |
| 2286 // Update pointers in old spaces. | 2848 { |
| 2287 heap->IterateDirtyRegions(heap->old_pointer_space(), | 2849 StoreBufferRebuildScope scope(heap_, |
| 2288 &Heap::IteratePointersInDirtyRegion, | 2850 heap_->store_buffer(), |
| 2289 &UpdatePointerToNewGen, | 2851 &Heap::ScavengeStoreBufferCallback); |
| 2290 heap->WATERMARK_SHOULD_BE_VALID); | 2852 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
| 2291 | 2853 } |
| 2292 heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen); | 2854 |
| 2855 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); |
| 2856 if (FLAG_trace_fragmentation) { |
| 2857 PrintF(" migration slots buffer: %d\n", |
| 2858 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
| 2859 } |
| 2860 |
| 2861 int npages = evacuation_candidates_.length(); |
| 2862 for (int i = 0; i < npages; i++) { |
| 2863 Page* p = evacuation_candidates_[i]; |
| 2864 ASSERT(p->IsEvacuationCandidate() || |
| 2865 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2866 |
| 2867 if (p->IsEvacuationCandidate()) { |
| 2868 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); |
| 2869 if (FLAG_trace_fragmentation) { |
| 2870 PrintF(" page %p slots buffer: %d\n", |
| 2871 reinterpret_cast<void*>(p), |
| 2872 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 2873 } |
| 2874 |
| 2875 // Important: skip list should be cleared only after roots were updated |
| 2876 // because root iteration traverses the stack and might have to find code |
| 2877 // objects from non-updated pc pointing into evacuation candidate. |
| 2878 SkipList* list = p->skip_list(); |
| 2879 if (list != NULL) list->Clear(); |
| 2880 } else { |
| 2881 if (FLAG_gc_verbose) { |
| 2882 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 2883 reinterpret_cast<intptr_t>(p)); |
| 2884 } |
| 2885 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 2886 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 2887 |
| 2888 switch (space->identity()) { |
| 2889 case OLD_DATA_SPACE: |
| 2890 SweepConservatively(space, p); |
| 2891 break; |
| 2892 case OLD_POINTER_SPACE: |
| 2893 SweepPrecisely<IGNORE_SKIP_LIST>(space, |
| 2894 p, |
| 2895 SWEEP_AND_VISIT_LIVE_OBJECTS, |
| 2896 &updating_visitor); |
| 2897 break; |
| 2898 case CODE_SPACE: |
| 2899 SweepPrecisely<REBUILD_SKIP_LIST>(space, |
| 2900 p, |
| 2901 SWEEP_AND_VISIT_LIVE_OBJECTS, |
| 2902 &updating_visitor); |
| 2903 break; |
| 2904 default: |
| 2905 UNREACHABLE(); |
| 2906 break; |
| 2907 } |
| 2908 } |
| 2909 } |
| 2293 | 2910 |
| 2294 // Update pointers from cells. | 2911 // Update pointers from cells. |
| 2295 HeapObjectIterator cell_iterator(heap->cell_space()); | 2912 HeapObjectIterator cell_iterator(heap_->cell_space()); |
| 2296 for (HeapObject* cell = cell_iterator.next(); | 2913 for (HeapObject* cell = cell_iterator.Next(); |
| 2297 cell != NULL; | 2914 cell != NULL; |
| 2298 cell = cell_iterator.next()) { | 2915 cell = cell_iterator.Next()) { |
| 2299 if (cell->IsJSGlobalPropertyCell()) { | 2916 if (cell->IsJSGlobalPropertyCell()) { |
| 2300 Address value_address = | 2917 Address value_address = |
| 2301 reinterpret_cast<Address>(cell) + | 2918 reinterpret_cast<Address>(cell) + |
| 2302 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 2919 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
| 2303 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 2920 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
| 2304 } | 2921 } |
| 2305 } | 2922 } |
| 2306 | 2923 |
| 2307 // Update pointer from the global contexts list. | 2924 // Update pointer from the global contexts list. |
| 2308 updating_visitor.VisitPointer(heap->global_contexts_list_address()); | 2925 updating_visitor.VisitPointer(heap_->global_contexts_list_address()); |
| 2926 |
| 2927 heap_->symbol_table()->Iterate(&updating_visitor); |
| 2309 | 2928 |
| 2310 // Update pointers from external string table. | 2929 // Update pointers from external string table. |
| 2311 heap->UpdateNewSpaceReferencesInExternalStringTable( | 2930 heap_->UpdateReferencesInExternalStringTable( |
| 2312 &UpdateNewSpaceReferenceInExternalStringTableEntry); | 2931 &UpdateReferenceInExternalStringTableEntry); |
| 2313 | |
| 2314 // All pointers were updated. Update auxiliary allocation info. | |
| 2315 heap->IncrementYoungSurvivorsCounter(survivors_size); | |
| 2316 space->set_age_mark(space->top()); | |
| 2317 | 2932 |
| 2318 // Update JSFunction pointers from the runtime profiler. | 2933 // Update JSFunction pointers from the runtime profiler. |
| 2319 heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); | 2934 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
| 2320 } | 2935 &updating_visitor); |
| 2321 | 2936 |
| 2322 | 2937 EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 2323 static void SweepSpace(Heap* heap, PagedSpace* space) { | 2938 heap()->ProcessWeakReferences(&evacuation_object_retainer); |
| 2324 PageIterator it(space, PageIterator::PAGES_IN_USE); | 2939 |
| 2325 | 2940 #ifdef DEBUG |
| 2326 // During sweeping of paged space we are trying to find longest sequences | 2941 if (FLAG_verify_heap) { |
| 2327 // of pages without live objects and free them (instead of putting them on | 2942 VerifyEvacuation(heap_); |
| 2328 // the free list). | 2943 } |
| 2329 | 2944 #endif |
| 2330 // Page preceding current. | 2945 |
| 2331 Page* prev = Page::FromAddress(NULL); | 2946 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
| 2332 | 2947 ASSERT(migration_slots_buffer_ == NULL); |
| 2333 // First empty page in a sequence. | 2948 for (int i = 0; i < npages; i++) { |
| 2334 Page* first_empty_page = Page::FromAddress(NULL); | 2949 Page* p = evacuation_candidates_[i]; |
| 2335 | 2950 if (!p->IsEvacuationCandidate()) continue; |
| 2336 // Page preceding first empty page. | 2951 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 2337 Page* prec_first_empty_page = Page::FromAddress(NULL); | 2952 space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); |
| 2338 | 2953 p->set_scan_on_scavenge(false); |
| 2339 // If last used page of space ends with a sequence of dead objects | 2954 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
| 2340 // we can adjust allocation top instead of puting this free area into | 2955 p->ClearEvacuationCandidate(); |
| 2341 // the free list. Thus during sweeping we keep track of such areas | 2956 } |
| 2342 // and defer their deallocation until the sweeping of the next page | 2957 evacuation_candidates_.Rewind(0); |
| 2343 // is done: if one of the next pages contains live objects we have | 2958 compacting_ = false; |
| 2344 // to put such area into the free list. | 2959 } |
| 2345 Address last_free_start = NULL; | 2960 |
| 2346 int last_free_size = 0; | 2961 |
| 2962 INLINE(static uint32_t SweepFree(PagedSpace* space, |
| 2963 Page* p, |
| 2964 uint32_t free_start, |
| 2965 uint32_t region_end, |
| 2966 uint32_t* cells)); |
| 2967 |
| 2968 |
| 2969 static uint32_t SweepFree(PagedSpace* space, |
| 2970 Page* p, |
| 2971 uint32_t free_start, |
| 2972 uint32_t region_end, |
| 2973 uint32_t* cells) { |
| 2974 uint32_t free_cell_index = Bitmap::IndexToCell(free_start); |
| 2975 ASSERT(cells[free_cell_index] == 0); |
| 2976 while (free_cell_index < region_end && cells[free_cell_index] == 0) { |
| 2977 free_cell_index++; |
| 2978 } |
| 2979 |
| 2980 if (free_cell_index >= region_end) { |
| 2981 return free_cell_index; |
| 2982 } |
| 2983 |
| 2984 uint32_t free_end = Bitmap::CellToIndex(free_cell_index); |
| 2985 space->FreeOrUnmapPage(p, |
| 2986 p->MarkbitIndexToAddress(free_start), |
| 2987 (free_end - free_start) << kPointerSizeLog2); |
| 2988 |
| 2989 return free_cell_index; |
| 2990 } |
| 2991 |
| 2992 |
| 2993 INLINE(static uint32_t NextCandidate(uint32_t cell_index, |
| 2994 uint32_t last_cell_index, |
| 2995 uint32_t* cells)); |
| 2996 |
| 2997 |
| 2998 static uint32_t NextCandidate(uint32_t cell_index, |
| 2999 uint32_t last_cell_index, |
| 3000 uint32_t* cells) { |
| 3001 do { |
| 3002 cell_index++; |
| 3003 } while (cell_index < last_cell_index && cells[cell_index] != 0); |
| 3004 return cell_index; |
| 3005 } |
| 3006 |
| 3007 |
| 3008 static const int kStartTableEntriesPerLine = 5; |
| 3009 static const int kStartTableLines = 171; |
| 3010 static const int kStartTableInvalidLine = 127; |
| 3011 static const int kStartTableUnusedEntry = 126; |
| 3012 |
| 3013 #define _ kStartTableUnusedEntry |
| 3014 #define X kStartTableInvalidLine |
| 3015 // Mark-bit to object start offset table. |
| 3016 // |
| 3017 // The line is indexed by the mark bits in a byte. The first number on |
| 3018 // the line describes the number of live object starts for the line and the |
| 3019 // other numbers on the line describe the offsets (in words) of the object |
| 3020 // starts. |
| 3021 // |
| 3022 // Since objects are at least 2 words large we don't have entries for two |
| 3023 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. |
| 3024 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { |
| 3025 0, _, _, _, _, // 0 |
| 3026 1, 0, _, _, _, // 1 |
| 3027 1, 1, _, _, _, // 2 |
| 3028 X, _, _, _, _, // 3 |
| 3029 1, 2, _, _, _, // 4 |
| 3030 2, 0, 2, _, _, // 5 |
| 3031 X, _, _, _, _, // 6 |
| 3032 X, _, _, _, _, // 7 |
| 3033 1, 3, _, _, _, // 8 |
| 3034 2, 0, 3, _, _, // 9 |
| 3035 2, 1, 3, _, _, // 10 |
| 3036 X, _, _, _, _, // 11 |
| 3037 X, _, _, _, _, // 12 |
| 3038 X, _, _, _, _, // 13 |
| 3039 X, _, _, _, _, // 14 |
| 3040 X, _, _, _, _, // 15 |
| 3041 1, 4, _, _, _, // 16 |
| 3042 2, 0, 4, _, _, // 17 |
| 3043 2, 1, 4, _, _, // 18 |
| 3044 X, _, _, _, _, // 19 |
| 3045 2, 2, 4, _, _, // 20 |
| 3046 3, 0, 2, 4, _, // 21 |
| 3047 X, _, _, _, _, // 22 |
| 3048 X, _, _, _, _, // 23 |
| 3049 X, _, _, _, _, // 24 |
| 3050 X, _, _, _, _, // 25 |
| 3051 X, _, _, _, _, // 26 |
| 3052 X, _, _, _, _, // 27 |
| 3053 X, _, _, _, _, // 28 |
| 3054 X, _, _, _, _, // 29 |
| 3055 X, _, _, _, _, // 30 |
| 3056 X, _, _, _, _, // 31 |
| 3057 1, 5, _, _, _, // 32 |
| 3058 2, 0, 5, _, _, // 33 |
| 3059 2, 1, 5, _, _, // 34 |
| 3060 X, _, _, _, _, // 35 |
| 3061 2, 2, 5, _, _, // 36 |
| 3062 3, 0, 2, 5, _, // 37 |
| 3063 X, _, _, _, _, // 38 |
| 3064 X, _, _, _, _, // 39 |
| 3065 2, 3, 5, _, _, // 40 |
| 3066 3, 0, 3, 5, _, // 41 |
| 3067 3, 1, 3, 5, _, // 42 |
| 3068 X, _, _, _, _, // 43 |
| 3069 X, _, _, _, _, // 44 |
| 3070 X, _, _, _, _, // 45 |
| 3071 X, _, _, _, _, // 46 |
| 3072 X, _, _, _, _, // 47 |
| 3073 X, _, _, _, _, // 48 |
| 3074 X, _, _, _, _, // 49 |
| 3075 X, _, _, _, _, // 50 |
| 3076 X, _, _, _, _, // 51 |
| 3077 X, _, _, _, _, // 52 |
| 3078 X, _, _, _, _, // 53 |
| 3079 X, _, _, _, _, // 54 |
| 3080 X, _, _, _, _, // 55 |
| 3081 X, _, _, _, _, // 56 |
| 3082 X, _, _, _, _, // 57 |
| 3083 X, _, _, _, _, // 58 |
| 3084 X, _, _, _, _, // 59 |
| 3085 X, _, _, _, _, // 60 |
| 3086 X, _, _, _, _, // 61 |
| 3087 X, _, _, _, _, // 62 |
| 3088 X, _, _, _, _, // 63 |
| 3089 1, 6, _, _, _, // 64 |
| 3090 2, 0, 6, _, _, // 65 |
| 3091 2, 1, 6, _, _, // 66 |
| 3092 X, _, _, _, _, // 67 |
| 3093 2, 2, 6, _, _, // 68 |
| 3094 3, 0, 2, 6, _, // 69 |
| 3095 X, _, _, _, _, // 70 |
| 3096 X, _, _, _, _, // 71 |
| 3097 2, 3, 6, _, _, // 72 |
| 3098 3, 0, 3, 6, _, // 73 |
| 3099 3, 1, 3, 6, _, // 74 |
| 3100 X, _, _, _, _, // 75 |
| 3101 X, _, _, _, _, // 76 |
| 3102 X, _, _, _, _, // 77 |
| 3103 X, _, _, _, _, // 78 |
| 3104 X, _, _, _, _, // 79 |
| 3105 2, 4, 6, _, _, // 80 |
| 3106 3, 0, 4, 6, _, // 81 |
| 3107 3, 1, 4, 6, _, // 82 |
| 3108 X, _, _, _, _, // 83 |
| 3109 3, 2, 4, 6, _, // 84 |
| 3110 4, 0, 2, 4, 6, // 85 |
| 3111 X, _, _, _, _, // 86 |
| 3112 X, _, _, _, _, // 87 |
| 3113 X, _, _, _, _, // 88 |
| 3114 X, _, _, _, _, // 89 |
| 3115 X, _, _, _, _, // 90 |
| 3116 X, _, _, _, _, // 91 |
| 3117 X, _, _, _, _, // 92 |
| 3118 X, _, _, _, _, // 93 |
| 3119 X, _, _, _, _, // 94 |
| 3120 X, _, _, _, _, // 95 |
| 3121 X, _, _, _, _, // 96 |
| 3122 X, _, _, _, _, // 97 |
| 3123 X, _, _, _, _, // 98 |
| 3124 X, _, _, _, _, // 99 |
| 3125 X, _, _, _, _, // 100 |
| 3126 X, _, _, _, _, // 101 |
| 3127 X, _, _, _, _, // 102 |
| 3128 X, _, _, _, _, // 103 |
| 3129 X, _, _, _, _, // 104 |
| 3130 X, _, _, _, _, // 105 |
| 3131 X, _, _, _, _, // 106 |
| 3132 X, _, _, _, _, // 107 |
| 3133 X, _, _, _, _, // 108 |
| 3134 X, _, _, _, _, // 109 |
| 3135 X, _, _, _, _, // 110 |
| 3136 X, _, _, _, _, // 111 |
| 3137 X, _, _, _, _, // 112 |
| 3138 X, _, _, _, _, // 113 |
| 3139 X, _, _, _, _, // 114 |
| 3140 X, _, _, _, _, // 115 |
| 3141 X, _, _, _, _, // 116 |
| 3142 X, _, _, _, _, // 117 |
| 3143 X, _, _, _, _, // 118 |
| 3144 X, _, _, _, _, // 119 |
| 3145 X, _, _, _, _, // 120 |
| 3146 X, _, _, _, _, // 121 |
| 3147 X, _, _, _, _, // 122 |
| 3148 X, _, _, _, _, // 123 |
| 3149 X, _, _, _, _, // 124 |
| 3150 X, _, _, _, _, // 125 |
| 3151 X, _, _, _, _, // 126 |
| 3152 X, _, _, _, _, // 127 |
| 3153 1, 7, _, _, _, // 128 |
| 3154 2, 0, 7, _, _, // 129 |
| 3155 2, 1, 7, _, _, // 130 |
| 3156 X, _, _, _, _, // 131 |
| 3157 2, 2, 7, _, _, // 132 |
| 3158 3, 0, 2, 7, _, // 133 |
| 3159 X, _, _, _, _, // 134 |
| 3160 X, _, _, _, _, // 135 |
| 3161 2, 3, 7, _, _, // 136 |
| 3162 3, 0, 3, 7, _, // 137 |
| 3163 3, 1, 3, 7, _, // 138 |
| 3164 X, _, _, _, _, // 139 |
| 3165 X, _, _, _, _, // 140 |
| 3166 X, _, _, _, _, // 141 |
| 3167 X, _, _, _, _, // 142 |
| 3168 X, _, _, _, _, // 143 |
| 3169 2, 4, 7, _, _, // 144 |
| 3170 3, 0, 4, 7, _, // 145 |
| 3171 3, 1, 4, 7, _, // 146 |
| 3172 X, _, _, _, _, // 147 |
| 3173 3, 2, 4, 7, _, // 148 |
| 3174 4, 0, 2, 4, 7, // 149 |
| 3175 X, _, _, _, _, // 150 |
| 3176 X, _, _, _, _, // 151 |
| 3177 X, _, _, _, _, // 152 |
| 3178 X, _, _, _, _, // 153 |
| 3179 X, _, _, _, _, // 154 |
| 3180 X, _, _, _, _, // 155 |
| 3181 X, _, _, _, _, // 156 |
| 3182 X, _, _, _, _, // 157 |
| 3183 X, _, _, _, _, // 158 |
| 3184 X, _, _, _, _, // 159 |
| 3185 2, 5, 7, _, _, // 160 |
| 3186 3, 0, 5, 7, _, // 161 |
| 3187 3, 1, 5, 7, _, // 162 |
| 3188 X, _, _, _, _, // 163 |
| 3189 3, 2, 5, 7, _, // 164 |
| 3190 4, 0, 2, 5, 7, // 165 |
| 3191 X, _, _, _, _, // 166 |
| 3192 X, _, _, _, _, // 167 |
| 3193 3, 3, 5, 7, _, // 168 |
| 3194 4, 0, 3, 5, 7, // 169 |
| 3195 4, 1, 3, 5, 7 // 170 |
| 3196 }; |
| 3197 #undef _ |
| 3198 #undef X |
| 3199 |
| 3200 |
| 3201 // Takes a word of mark bits. Returns the number of objects that start in the |
| 3202 // range. Puts the offsets of the words in the supplied array. |
| 3203 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { |
| 3204 int objects = 0; |
| 3205 int offset = 0; |
| 3206 |
| 3207 // No consecutive 1 bits. |
| 3208 ASSERT((mark_bits & 0x180) != 0x180); |
| 3209 ASSERT((mark_bits & 0x18000) != 0x18000); |
| 3210 ASSERT((mark_bits & 0x1800000) != 0x1800000); |
| 3211 |
| 3212 while (mark_bits != 0) { |
| 3213 int byte = (mark_bits & 0xff); |
| 3214 mark_bits >>= 8; |
| 3215 if (byte != 0) { |
| 3216 ASSERT(byte < kStartTableLines); // No consecutive 1 bits. |
| 3217 char* table = kStartTable + byte * kStartTableEntriesPerLine; |
| 3218 int objects_in_these_8_words = table[0]; |
| 3219 ASSERT(objects_in_these_8_words != kStartTableInvalidLine); |
| 3220 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); |
| 3221 for (int i = 0; i < objects_in_these_8_words; i++) { |
| 3222 starts[objects++] = offset + table[1 + i]; |
| 3223 } |
| 3224 } |
| 3225 offset += 8; |
| 3226 } |
| 3227 return objects; |
| 3228 } |
| 3229 |
| 3230 |
| 3231 static inline Address DigestFreeStart(Address approximate_free_start, |
| 3232 uint32_t free_start_cell) { |
| 3233 ASSERT(free_start_cell != 0); |
| 3234 |
| 3235 int offsets[16]; |
| 3236 uint32_t cell = free_start_cell; |
| 3237 int offset_of_last_live; |
| 3238 if ((cell & 0x80000000u) != 0) { |
| 3239 // This case would overflow below. |
| 3240 offset_of_last_live = 31; |
| 3241 } else { |
| 3242 // Remove all but one bit, the most significant. This is an optimization |
| 3243 // that may or may not be worthwhile. |
| 3244 cell |= cell >> 16; |
| 3245 cell |= cell >> 8; |
| 3246 cell |= cell >> 4; |
| 3247 cell |= cell >> 2; |
| 3248 cell |= cell >> 1; |
| 3249 cell = (cell + 1) >> 1; |
| 3250 int live_objects = MarkWordToObjectStarts(cell, offsets); |
| 3251 ASSERT(live_objects == 1); |
| 3252 offset_of_last_live = offsets[live_objects - 1]; |
| 3253 } |
| 3254 Address last_live_start = |
| 3255 approximate_free_start + offset_of_last_live * kPointerSize; |
| 3256 HeapObject* last_live = HeapObject::FromAddress(last_live_start); |
| 3257 Address free_start = last_live_start + last_live->Size(); |
| 3258 return free_start; |
| 3259 } |
| 3260 |
| 3261 |
| 3262 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
| 3263 ASSERT(cell != 0); |
| 3264 |
| 3265 int offsets[16]; |
| 3266 if (cell == 0x80000000u) { // Avoid overflow below. |
| 3267 return block_address + 31 * kPointerSize; |
| 3268 } |
| 3269 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
| 3270 ASSERT((first_set_bit & cell) == first_set_bit); |
| 3271 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
| 3272 ASSERT(live_objects == 1); |
| 3273 USE(live_objects); |
| 3274 return block_address + offsets[0] * kPointerSize; |
| 3275 } |
| 3276 |
| 3277 |
| 3278 // Sweeps a space conservatively. After this has been done the larger free |
| 3279 // spaces have been put on the free list and the smaller ones have been |
| 3280 // ignored and left untouched. A free space is always either ignored or put |
| 3281 // on the free list, never split up into two parts. This is important |
| 3282 // because it means that any FreeSpace maps left actually describe a region of |
| 3283 // memory that can be ignored when scanning. Dead objects other than free |
| 3284 // spaces will not contain the free space map. |
| 3285 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| 3286 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3287 MarkBit::CellType* cells = p->markbits()->cells(); |
| 3288 p->MarkSweptConservatively(); |
| 3289 |
| 3290 int last_cell_index = |
| 3291 Bitmap::IndexToCell( |
| 3292 Bitmap::CellAlignIndex( |
| 3293 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 3294 |
| 3295 int cell_index = Page::kFirstUsedCell; |
| 3296 intptr_t freed_bytes = 0; |
| 3297 |
| 3298 // This is the start of the 32 word block that we are currently looking at. |
| 3299 Address block_address = p->ObjectAreaStart(); |
| 3300 |
| 3301 // Skip over all the dead objects at the start of the page and mark them free. |
| 3302 for (cell_index = Page::kFirstUsedCell; |
| 3303 cell_index < last_cell_index; |
| 3304 cell_index++, block_address += 32 * kPointerSize) { |
| 3305 if (cells[cell_index] != 0) break; |
| 3306 } |
| 3307 size_t size = block_address - p->ObjectAreaStart(); |
| 3308 if (cell_index == last_cell_index) { |
| 3309 freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), |
| 3310 static_cast<int>(size))); |
| 3311 return freed_bytes; |
| 3312 } |
| 3313 // Grow the size of the start-of-page free space a little to get up to the |
| 3314 // first live object. |
| 3315 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 3316 // Free the first free space. |
| 3317 size = free_end - p->ObjectAreaStart(); |
| 3318 freed_bytes += space->Free(p->ObjectAreaStart(), |
| 3319 static_cast<int>(size)); |
| 3320 // The start of the current free area is represented in undigested form by |
| 3321 // the address of the last 32-word section that contained a live object and |
| 3322 // the marking bitmap for that cell, which describes where the live object |
| 3323 // started. Unless we find a large free space in the bitmap we will not |
| 3324 // digest this pair into a real address. We start the iteration here at the |
| 3325 // first word in the marking bit map that indicates a live object. |
| 3326 Address free_start = block_address; |
| 3327 uint32_t free_start_cell = cells[cell_index]; |
| 3328 |
| 3329 for ( ; |
| 3330 cell_index < last_cell_index; |
| 3331 cell_index++, block_address += 32 * kPointerSize) { |
| 3332 ASSERT((unsigned)cell_index == |
| 3333 Bitmap::IndexToCell( |
| 3334 Bitmap::CellAlignIndex( |
| 3335 p->AddressToMarkbitIndex(block_address)))); |
| 3336 uint32_t cell = cells[cell_index]; |
| 3337 if (cell != 0) { |
| 3338 // We have a live object. Check approximately whether it is more than 32 |
| 3339 // words since the last live object. |
| 3340 if (block_address - free_start > 32 * kPointerSize) { |
| 3341 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3342 if (block_address - free_start > 32 * kPointerSize) { |
| 3343 // Now that we know the exact start of the free space it still looks |
| 3344 // like we have a large enough free space to be worth bothering with. |
| 3345 // so now we need to find the start of the first live object at the |
| 3346 // end of the free space. |
| 3347 free_end = StartOfLiveObject(block_address, cell); |
| 3348 freed_bytes += space->Free(free_start, |
| 3349 static_cast<int>(free_end - free_start)); |
| 3350 } |
| 3351 } |
| 3352 // Update our undigested record of where the current free area started. |
| 3353 free_start = block_address; |
| 3354 free_start_cell = cell; |
| 3355 // Clear marking bits for current cell. |
| 3356 cells[cell_index] = 0; |
| 3357 } |
| 3358 } |
| 3359 |
| 3360 // Handle the free space at the end of the page. |
| 3361 if (block_address - free_start > 32 * kPointerSize) { |
| 3362 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3363 freed_bytes += space->Free(free_start, |
| 3364 static_cast<int>(block_address - free_start)); |
| 3365 } |
| 3366 |
| 3367 return freed_bytes; |
| 3368 } |
| 3369 |
| 3370 |
| 3371 void MarkCompactCollector::SweepSpace(PagedSpace* space, |
| 3372 SweeperType sweeper) { |
| 3373 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3374 sweeper == LAZY_CONSERVATIVE); |
| 3375 |
| 3376 space->ClearStats(); |
| 3377 |
| 3378 PageIterator it(space); |
| 3379 |
| 3380 intptr_t freed_bytes = 0; |
| 3381 intptr_t newspace_size = space->heap()->new_space()->Size(); |
| 3382 bool lazy_sweeping_active = false; |
| 2347 | 3383 |
| 2348 while (it.has_next()) { | 3384 while (it.has_next()) { |
| 2349 Page* p = it.next(); | 3385 Page* p = it.next(); |
| 2350 | 3386 |
| 2351 bool is_previous_alive = true; | 3387 // Clear sweeping flags indicating that marking bits are still intact. |
| 2352 Address free_start = NULL; | 3388 p->ClearSweptPrecisely(); |
| 2353 HeapObject* object; | 3389 p->ClearSweptConservatively(); |
| 2354 | 3390 |
| 2355 for (Address current = p->ObjectAreaStart(); | 3391 if (p->IsEvacuationCandidate()) { |
| 2356 current < p->AllocationTop(); | 3392 ASSERT(evacuation_candidates_.length() > 0); |
| 2357 current += object->Size()) { | 3393 continue; |
| 2358 object = HeapObject::FromAddress(current); | 3394 } |
| 2359 if (object->IsMarked()) { | 3395 |
| 2360 object->ClearMark(); | 3396 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 2361 heap->mark_compact_collector()->tracer()->decrement_marked_count(); | 3397 // Will be processed in EvacuateNewSpaceAndCandidates. |
| 2362 | 3398 continue; |
| 2363 if (!is_previous_alive) { // Transition from free to live. | 3399 } |
| 2364 space->DeallocateBlock(free_start, | 3400 |
| 2365 static_cast<int>(current - free_start), | 3401 if (lazy_sweeping_active) { |
| 2366 true); | 3402 if (FLAG_gc_verbose) { |
| 2367 is_previous_alive = true; | 3403 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", |
| 3404 reinterpret_cast<intptr_t>(p)); |
| 3405 } |
| 3406 continue; |
| 3407 } |
| 3408 |
| 3409 if (FLAG_gc_verbose) { |
| 3410 PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n", |
| 3411 reinterpret_cast<intptr_t>(p), |
| 3412 sweeper); |
| 3413 } |
| 3414 |
| 3415 switch (sweeper) { |
| 3416 case CONSERVATIVE: { |
| 3417 SweepConservatively(space, p); |
| 3418 break; |
| 3419 } |
| 3420 case LAZY_CONSERVATIVE: { |
| 3421 freed_bytes += SweepConservatively(space, p); |
| 3422 if (freed_bytes >= newspace_size && p != space->LastPage()) { |
| 3423 space->SetPagesToSweep(p->next_page(), space->LastPage()); |
| 3424 lazy_sweeping_active = true; |
| 2368 } | 3425 } |
| 2369 } else { | 3426 break; |
| 2370 heap->mark_compact_collector()->ReportDeleteIfNeeded( | 3427 } |
| 2371 object, heap->isolate()); | 3428 case PRECISE: { |
| 2372 if (is_previous_alive) { // Transition from live to free. | 3429 if (space->identity() == CODE_SPACE) { |
| 2373 free_start = current; | 3430 SweepPrecisely<REBUILD_SKIP_LIST>(space, p, SWEEP_ONLY, NULL); |
| 2374 is_previous_alive = false; | 3431 } else { |
| 3432 SweepPrecisely<IGNORE_SKIP_LIST>(space, p, SWEEP_ONLY, NULL); |
| 2375 } | 3433 } |
| 2376 LiveObjectList::ProcessNonLive(object); | 3434 break; |
| 2377 } | 3435 } |
| 2378 // The object is now unmarked for the call to Size() at the top of the | 3436 default: { |
| 2379 // loop. | 3437 UNREACHABLE(); |
| 2380 } | 3438 } |
| 2381 | 3439 } |
| 2382 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) | 3440 } |
| 2383 || (!is_previous_alive && free_start == p->ObjectAreaStart()); | 3441 } |
| 2384 | |
| 2385 if (page_is_empty) { | |
| 2386 // This page is empty. Check whether we are in the middle of | |
| 2387 // sequence of empty pages and start one if not. | |
| 2388 if (!first_empty_page->is_valid()) { | |
| 2389 first_empty_page = p; | |
| 2390 prec_first_empty_page = prev; | |
| 2391 } | |
| 2392 | |
| 2393 if (!is_previous_alive) { | |
| 2394 // There are dead objects on this page. Update space accounting stats | |
| 2395 // without putting anything into free list. | |
| 2396 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); | |
| 2397 if (size_in_bytes > 0) { | |
| 2398 space->DeallocateBlock(free_start, size_in_bytes, false); | |
| 2399 } | |
| 2400 } | |
| 2401 } else { | |
| 2402 // This page is not empty. Sequence of empty pages ended on the previous | |
| 2403 // one. | |
| 2404 if (first_empty_page->is_valid()) { | |
| 2405 space->FreePages(prec_first_empty_page, prev); | |
| 2406 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); | |
| 2407 } | |
| 2408 | |
| 2409 // If there is a free ending area on one of the previous pages we have | |
| 2410 // deallocate that area and put it on the free list. | |
| 2411 if (last_free_size > 0) { | |
| 2412 Page::FromAddress(last_free_start)-> | |
| 2413 SetAllocationWatermark(last_free_start); | |
| 2414 space->DeallocateBlock(last_free_start, last_free_size, true); | |
| 2415 last_free_start = NULL; | |
| 2416 last_free_size = 0; | |
| 2417 } | |
| 2418 | |
| 2419 // If the last region of this page was not live we remember it. | |
| 2420 if (!is_previous_alive) { | |
| 2421 ASSERT(last_free_size == 0); | |
| 2422 last_free_size = static_cast<int>(p->AllocationTop() - free_start); | |
| 2423 last_free_start = free_start; | |
| 2424 } | |
| 2425 } | |
| 2426 | |
| 2427 prev = p; | |
| 2428 } | |
| 2429 | |
| 2430 // We reached end of space. See if we need to adjust allocation top. | |
| 2431 Address new_allocation_top = NULL; | |
| 2432 | |
| 2433 if (first_empty_page->is_valid()) { | |
| 2434 // Last used pages in space are empty. We can move allocation top backwards | |
| 2435 // to the beginning of first empty page. | |
| 2436 ASSERT(prev == space->AllocationTopPage()); | |
| 2437 | |
| 2438 new_allocation_top = first_empty_page->ObjectAreaStart(); | |
| 2439 } | |
| 2440 | |
| 2441 if (last_free_size > 0) { | |
| 2442 // There was a free ending area on the previous page. | |
| 2443 // Deallocate it without putting it into freelist and move allocation | |
| 2444 // top to the beginning of this free area. | |
| 2445 space->DeallocateBlock(last_free_start, last_free_size, false); | |
| 2446 new_allocation_top = last_free_start; | |
| 2447 } | |
| 2448 | |
| 2449 if (new_allocation_top != NULL) { | |
| 2450 #ifdef DEBUG | |
| 2451 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); | |
| 2452 if (!first_empty_page->is_valid()) { | |
| 2453 ASSERT(new_allocation_top_page == space->AllocationTopPage()); | |
| 2454 } else if (last_free_size > 0) { | |
| 2455 ASSERT(new_allocation_top_page == prec_first_empty_page); | |
| 2456 } else { | |
| 2457 ASSERT(new_allocation_top_page == first_empty_page); | |
| 2458 } | |
| 2459 #endif | |
| 2460 | |
| 2461 space->SetTop(new_allocation_top); | |
| 2462 } | |
| 2463 } | |
| 2464 | |
| 2465 | |
| 2466 void MarkCompactCollector::EncodeForwardingAddresses() { | |
| 2467 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); | |
| 2468 // Objects in the active semispace of the young generation may be | |
| 2469 // relocated to the inactive semispace (if not promoted). Set the | |
| 2470 // relocation info to the beginning of the inactive semispace. | |
| 2471 heap()->new_space()->MCResetRelocationInfo(); | |
| 2472 | |
| 2473 // Compute the forwarding pointers in each space. | |
| 2474 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace, | |
| 2475 ReportDeleteIfNeeded>( | |
| 2476 heap()->old_pointer_space()); | |
| 2477 | |
| 2478 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace, | |
| 2479 IgnoreNonLiveObject>( | |
| 2480 heap()->old_data_space()); | |
| 2481 | |
| 2482 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace, | |
| 2483 ReportDeleteIfNeeded>( | |
| 2484 heap()->code_space()); | |
| 2485 | |
| 2486 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace, | |
| 2487 IgnoreNonLiveObject>( | |
| 2488 heap()->cell_space()); | |
| 2489 | |
| 2490 | |
| 2491 // Compute new space next to last after the old and code spaces have been | |
| 2492 // compacted. Objects in new space can be promoted to old or code space. | |
| 2493 EncodeForwardingAddressesInNewSpace(); | |
| 2494 | |
| 2495 // Compute map space last because computing forwarding addresses | |
| 2496 // overwrites non-live objects. Objects in the other spaces rely on | |
| 2497 // non-live map pointers to get the sizes of non-live objects. | |
| 2498 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace, | |
| 2499 IgnoreNonLiveObject>( | |
| 2500 heap()->map_space()); | |
| 2501 | |
| 2502 // Write relocation info to the top page, so we can use it later. This is | |
| 2503 // done after promoting objects from the new space so we get the correct | |
| 2504 // allocation top. | |
| 2505 heap()->old_pointer_space()->MCWriteRelocationInfoToPage(); | |
| 2506 heap()->old_data_space()->MCWriteRelocationInfoToPage(); | |
| 2507 heap()->code_space()->MCWriteRelocationInfoToPage(); | |
| 2508 heap()->map_space()->MCWriteRelocationInfoToPage(); | |
| 2509 heap()->cell_space()->MCWriteRelocationInfoToPage(); | |
| 2510 } | |
| 2511 | |
| 2512 | |
| 2513 class MapIterator : public HeapObjectIterator { | |
| 2514 public: | |
| 2515 explicit MapIterator(Heap* heap) | |
| 2516 : HeapObjectIterator(heap->map_space(), &SizeCallback) { } | |
| 2517 | |
| 2518 MapIterator(Heap* heap, Address start) | |
| 2519 : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { } | |
| 2520 | |
| 2521 private: | |
| 2522 static int SizeCallback(HeapObject* unused) { | |
| 2523 USE(unused); | |
| 2524 return Map::kSize; | |
| 2525 } | |
| 2526 }; | |
| 2527 | |
| 2528 | |
| 2529 class MapCompact { | |
| 2530 public: | |
| 2531 explicit MapCompact(Heap* heap, int live_maps) | |
| 2532 : heap_(heap), | |
| 2533 live_maps_(live_maps), | |
| 2534 to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)), | |
| 2535 vacant_map_it_(heap), | |
| 2536 map_to_evacuate_it_(heap, to_evacuate_start_), | |
| 2537 first_map_to_evacuate_( | |
| 2538 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) { | |
| 2539 } | |
| 2540 | |
| 2541 void CompactMaps() { | |
| 2542 // As we know the number of maps to evacuate beforehand, | |
| 2543 // we stop then there is no more vacant maps. | |
| 2544 for (Map* next_vacant_map = NextVacantMap(); | |
| 2545 next_vacant_map; | |
| 2546 next_vacant_map = NextVacantMap()) { | |
| 2547 EvacuateMap(next_vacant_map, NextMapToEvacuate()); | |
| 2548 } | |
| 2549 | |
| 2550 #ifdef DEBUG | |
| 2551 CheckNoMapsToEvacuate(); | |
| 2552 #endif | |
| 2553 } | |
| 2554 | |
| 2555 void UpdateMapPointersInRoots() { | |
| 2556 MapUpdatingVisitor map_updating_visitor; | |
| 2557 heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG); | |
| 2558 heap()->isolate()->global_handles()->IterateWeakRoots( | |
| 2559 &map_updating_visitor); | |
| 2560 LiveObjectList::IterateElements(&map_updating_visitor); | |
| 2561 } | |
| 2562 | |
| 2563 void UpdateMapPointersInPagedSpace(PagedSpace* space) { | |
| 2564 ASSERT(space != heap()->map_space()); | |
| 2565 | |
| 2566 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
| 2567 while (it.has_next()) { | |
| 2568 Page* p = it.next(); | |
| 2569 UpdateMapPointersInRange(heap(), | |
| 2570 p->ObjectAreaStart(), | |
| 2571 p->AllocationTop()); | |
| 2572 } | |
| 2573 } | |
| 2574 | |
| 2575 void UpdateMapPointersInNewSpace() { | |
| 2576 NewSpace* space = heap()->new_space(); | |
| 2577 UpdateMapPointersInRange(heap(), space->bottom(), space->top()); | |
| 2578 } | |
| 2579 | |
| 2580 void UpdateMapPointersInLargeObjectSpace() { | |
| 2581 LargeObjectIterator it(heap()->lo_space()); | |
| 2582 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) | |
| 2583 UpdateMapPointersInObject(heap(), obj); | |
| 2584 } | |
| 2585 | |
| 2586 void Finish() { | |
| 2587 heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_); | |
| 2588 } | |
| 2589 | |
| 2590 inline Heap* heap() const { return heap_; } | |
| 2591 | |
| 2592 private: | |
| 2593 Heap* heap_; | |
| 2594 int live_maps_; | |
| 2595 Address to_evacuate_start_; | |
| 2596 MapIterator vacant_map_it_; | |
| 2597 MapIterator map_to_evacuate_it_; | |
| 2598 Map* first_map_to_evacuate_; | |
| 2599 | |
| 2600 // Helper class for updating map pointers in HeapObjects. | |
| 2601 class MapUpdatingVisitor: public ObjectVisitor { | |
| 2602 public: | |
| 2603 MapUpdatingVisitor() {} | |
| 2604 | |
| 2605 void VisitPointer(Object** p) { | |
| 2606 UpdateMapPointer(p); | |
| 2607 } | |
| 2608 | |
| 2609 void VisitPointers(Object** start, Object** end) { | |
| 2610 for (Object** p = start; p < end; p++) UpdateMapPointer(p); | |
| 2611 } | |
| 2612 | |
| 2613 private: | |
| 2614 void UpdateMapPointer(Object** p) { | |
| 2615 if (!(*p)->IsHeapObject()) return; | |
| 2616 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p); | |
| 2617 | |
| 2618 // Moved maps are tagged with overflowed map word. They are the only | |
| 2619 // objects those map word is overflowed as marking is already complete. | |
| 2620 MapWord map_word = old_map->map_word(); | |
| 2621 if (!map_word.IsOverflowed()) return; | |
| 2622 | |
| 2623 *p = GetForwardedMap(map_word); | |
| 2624 } | |
| 2625 }; | |
| 2626 | |
| 2627 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) { | |
| 2628 while (true) { | |
| 2629 HeapObject* next = it->next(); | |
| 2630 ASSERT(next != NULL); | |
| 2631 if (next == last) | |
| 2632 return NULL; | |
| 2633 ASSERT(!next->IsOverflowed()); | |
| 2634 ASSERT(!next->IsMarked()); | |
| 2635 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next)); | |
| 2636 if (next->IsMap() == live) | |
| 2637 return reinterpret_cast<Map*>(next); | |
| 2638 } | |
| 2639 } | |
| 2640 | |
| 2641 Map* NextVacantMap() { | |
| 2642 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false); | |
| 2643 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map)); | |
| 2644 return map; | |
| 2645 } | |
| 2646 | |
| 2647 Map* NextMapToEvacuate() { | |
| 2648 Map* map = NextMap(&map_to_evacuate_it_, NULL, true); | |
| 2649 ASSERT(map != NULL); | |
| 2650 ASSERT(map->IsMap()); | |
| 2651 return map; | |
| 2652 } | |
| 2653 | |
| 2654 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { | |
| 2655 ASSERT(FreeListNode::IsFreeListNode(vacant_map)); | |
| 2656 ASSERT(map_to_evacuate->IsMap()); | |
| 2657 | |
| 2658 ASSERT(Map::kSize % 4 == 0); | |
| 2659 | |
| 2660 map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks( | |
| 2661 vacant_map->address(), map_to_evacuate->address(), Map::kSize); | |
| 2662 | |
| 2663 ASSERT(vacant_map->IsMap()); // Due to memcpy above. | |
| 2664 | |
| 2665 MapWord forwarding_map_word = MapWord::FromMap(vacant_map); | |
| 2666 forwarding_map_word.SetOverflow(); | |
| 2667 map_to_evacuate->set_map_word(forwarding_map_word); | |
| 2668 | |
| 2669 ASSERT(map_to_evacuate->map_word().IsOverflowed()); | |
| 2670 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); | |
| 2671 } | |
| 2672 | |
| 2673 static Map* GetForwardedMap(MapWord map_word) { | |
| 2674 ASSERT(map_word.IsOverflowed()); | |
| 2675 map_word.ClearOverflow(); | |
| 2676 Map* new_map = map_word.ToMap(); | |
| 2677 ASSERT_MAP_ALIGNED(new_map->address()); | |
| 2678 return new_map; | |
| 2679 } | |
| 2680 | |
| 2681 static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) { | |
| 2682 ASSERT(!obj->IsMarked()); | |
| 2683 Map* map = obj->map(); | |
| 2684 ASSERT(heap->map_space()->Contains(map)); | |
| 2685 MapWord map_word = map->map_word(); | |
| 2686 ASSERT(!map_word.IsMarked()); | |
| 2687 if (map_word.IsOverflowed()) { | |
| 2688 Map* new_map = GetForwardedMap(map_word); | |
| 2689 ASSERT(heap->map_space()->Contains(new_map)); | |
| 2690 obj->set_map(new_map); | |
| 2691 | |
| 2692 #ifdef DEBUG | |
| 2693 if (FLAG_gc_verbose) { | |
| 2694 PrintF("update %p : %p -> %p\n", | |
| 2695 obj->address(), | |
| 2696 reinterpret_cast<void*>(map), | |
| 2697 reinterpret_cast<void*>(new_map)); | |
| 2698 } | |
| 2699 #endif | |
| 2700 } | |
| 2701 | |
| 2702 int size = obj->SizeFromMap(map); | |
| 2703 MapUpdatingVisitor map_updating_visitor; | |
| 2704 obj->IterateBody(map->instance_type(), size, &map_updating_visitor); | |
| 2705 return size; | |
| 2706 } | |
| 2707 | |
| 2708 static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) { | |
| 2709 HeapObject* object; | |
| 2710 int size; | |
| 2711 for (Address current = start; current < end; current += size) { | |
| 2712 object = HeapObject::FromAddress(current); | |
| 2713 size = UpdateMapPointersInObject(heap, object); | |
| 2714 ASSERT(size > 0); | |
| 2715 } | |
| 2716 } | |
| 2717 | |
| 2718 #ifdef DEBUG | |
| 2719 void CheckNoMapsToEvacuate() { | |
| 2720 if (!FLAG_enable_slow_asserts) | |
| 2721 return; | |
| 2722 | |
| 2723 for (HeapObject* obj = map_to_evacuate_it_.next(); | |
| 2724 obj != NULL; obj = map_to_evacuate_it_.next()) | |
| 2725 ASSERT(FreeListNode::IsFreeListNode(obj)); | |
| 2726 } | |
| 2727 #endif | |
| 2728 }; | |
| 2729 | 3442 |
| 2730 | 3443 |
| 2731 void MarkCompactCollector::SweepSpaces() { | 3444 void MarkCompactCollector::SweepSpaces() { |
| 2732 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); | 3445 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
| 2733 | 3446 #ifdef DEBUG |
| 2734 ASSERT(state_ == SWEEP_SPACES); | 3447 state_ = SWEEP_SPACES; |
| 2735 ASSERT(!IsCompacting()); | 3448 #endif |
| 3449 SweeperType how_to_sweep = |
| 3450 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
| 3451 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 2736 // Noncompacting collections simply sweep the spaces to clear the mark | 3452 // Noncompacting collections simply sweep the spaces to clear the mark |
| 2737 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3453 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 2738 // the map space last because freeing non-live maps overwrites them and | 3454 // the map space last because freeing non-live maps overwrites them and |
| 2739 // the other spaces rely on possibly non-live maps to get the sizes for | 3455 // the other spaces rely on possibly non-live maps to get the sizes for |
| 2740 // non-live objects. | 3456 // non-live objects. |
| 2741 SweepSpace(heap(), heap()->old_pointer_space()); | 3457 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 2742 SweepSpace(heap(), heap()->old_data_space()); | 3458 SweepSpace(heap()->old_data_space(), how_to_sweep); |
| 2743 SweepSpace(heap(), heap()->code_space()); | 3459 SweepSpace(heap()->code_space(), PRECISE); |
| 2744 SweepSpace(heap(), heap()->cell_space()); | 3460 SweepSpace(heap()->cell_space(), PRECISE); |
| 2745 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); | 3461 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
| 2746 SweepNewSpace(heap(), heap()->new_space()); | 3462 EvacuateNewSpaceAndCandidates(); |
| 2747 } | 3463 } |
| 2748 SweepSpace(heap(), heap()->map_space()); | 3464 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 2749 | 3465 // detect whether unmarked map became dead in this collection or in one |
| 2750 heap()->IterateDirtyRegions(heap()->map_space(), | 3466 // of the previous ones. |
| 2751 &heap()->IteratePointersInDirtyMapsRegion, | 3467 SweepSpace(heap()->map_space(), PRECISE); |
| 2752 &UpdatePointerToNewGen, | 3468 |
| 2753 heap()->WATERMARK_SHOULD_BE_VALID); | 3469 ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); |
| 2754 | 3470 |
| 2755 intptr_t live_maps_size = heap()->map_space()->Size(); | 3471 // Deallocate unmarked objects and clear marked bits for marked objects. |
| 2756 int live_maps = static_cast<int>(live_maps_size / Map::kSize); | 3472 heap_->lo_space()->FreeUnmarkedObjects(); |
| 2757 ASSERT(live_map_objects_size_ == live_maps_size); | 3473 } |
| 2758 | 3474 |
| 2759 if (heap()->map_space()->NeedsCompaction(live_maps)) { | 3475 |
| 2760 MapCompact map_compact(heap(), live_maps); | 3476 // TODO(1466) ReportDeleteIfNeeded is not called currently. |
| 2761 | 3477 // Our profiling tools do not expect intersections between |
| 2762 map_compact.CompactMaps(); | 3478 // code objects. We should either reenable it or change our tools. |
| 2763 map_compact.UpdateMapPointersInRoots(); | |
| 2764 | |
| 2765 PagedSpaces spaces; | |
| 2766 for (PagedSpace* space = spaces.next(); | |
| 2767 space != NULL; space = spaces.next()) { | |
| 2768 if (space == heap()->map_space()) continue; | |
| 2769 map_compact.UpdateMapPointersInPagedSpace(space); | |
| 2770 } | |
| 2771 map_compact.UpdateMapPointersInNewSpace(); | |
| 2772 map_compact.UpdateMapPointersInLargeObjectSpace(); | |
| 2773 | |
| 2774 map_compact.Finish(); | |
| 2775 } | |
| 2776 } | |
| 2777 | |
| 2778 | |
| 2779 // Iterate the live objects in a range of addresses (eg, a page or a | |
| 2780 // semispace). The live regions of the range have been linked into a list. | |
| 2781 // The first live region is [first_live_start, first_live_end), and the last | |
| 2782 // address in the range is top. The callback function is used to get the | |
| 2783 // size of each live object. | |
| 2784 int MarkCompactCollector::IterateLiveObjectsInRange( | |
| 2785 Address start, | |
| 2786 Address end, | |
| 2787 LiveObjectCallback size_func) { | |
| 2788 int live_objects_size = 0; | |
| 2789 Address current = start; | |
| 2790 while (current < end) { | |
| 2791 uint32_t encoded_map = Memory::uint32_at(current); | |
| 2792 if (encoded_map == kSingleFreeEncoding) { | |
| 2793 current += kPointerSize; | |
| 2794 } else if (encoded_map == kMultiFreeEncoding) { | |
| 2795 current += Memory::int_at(current + kIntSize); | |
| 2796 } else { | |
| 2797 int size = (this->*size_func)(HeapObject::FromAddress(current)); | |
| 2798 current += size; | |
| 2799 live_objects_size += size; | |
| 2800 } | |
| 2801 } | |
| 2802 return live_objects_size; | |
| 2803 } | |
| 2804 | |
| 2805 | |
| 2806 int MarkCompactCollector::IterateLiveObjects( | |
| 2807 NewSpace* space, LiveObjectCallback size_f) { | |
| 2808 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); | |
| 2809 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); | |
| 2810 } | |
| 2811 | |
| 2812 | |
| 2813 int MarkCompactCollector::IterateLiveObjects( | |
| 2814 PagedSpace* space, LiveObjectCallback size_f) { | |
| 2815 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); | |
| 2816 int total = 0; | |
| 2817 PageIterator it(space, PageIterator::PAGES_IN_USE); | |
| 2818 while (it.has_next()) { | |
| 2819 Page* p = it.next(); | |
| 2820 total += IterateLiveObjectsInRange(p->ObjectAreaStart(), | |
| 2821 p->AllocationTop(), | |
| 2822 size_f); | |
| 2823 } | |
| 2824 return total; | |
| 2825 } | |
| 2826 | |
| 2827 | |
| 2828 // ------------------------------------------------------------------------- | |
| 2829 // Phase 3: Update pointers | |
| 2830 | |
| 2831 // Helper class for updating pointers in HeapObjects. | |
| 2832 class UpdatingVisitor: public ObjectVisitor { | |
| 2833 public: | |
| 2834 explicit UpdatingVisitor(Heap* heap) : heap_(heap) {} | |
| 2835 | |
| 2836 void VisitPointer(Object** p) { | |
| 2837 UpdatePointer(p); | |
| 2838 } | |
| 2839 | |
| 2840 void VisitPointers(Object** start, Object** end) { | |
| 2841 // Mark all HeapObject pointers in [start, end) | |
| 2842 for (Object** p = start; p < end; p++) UpdatePointer(p); | |
| 2843 } | |
| 2844 | |
| 2845 void VisitCodeTarget(RelocInfo* rinfo) { | |
| 2846 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); | |
| 2847 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | |
| 2848 VisitPointer(&target); | |
| 2849 rinfo->set_target_address( | |
| 2850 reinterpret_cast<Code*>(target)->instruction_start()); | |
| 2851 } | |
| 2852 | |
| 2853 void VisitDebugTarget(RelocInfo* rinfo) { | |
| 2854 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && | |
| 2855 rinfo->IsPatchedReturnSequence()) || | |
| 2856 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | |
| 2857 rinfo->IsPatchedDebugBreakSlotSequence())); | |
| 2858 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); | |
| 2859 VisitPointer(&target); | |
| 2860 rinfo->set_call_address( | |
| 2861 reinterpret_cast<Code*>(target)->instruction_start()); | |
| 2862 } | |
| 2863 | |
| 2864 inline Heap* heap() const { return heap_; } | |
| 2865 | |
| 2866 private: | |
| 2867 void UpdatePointer(Object** p) { | |
| 2868 if (!(*p)->IsHeapObject()) return; | |
| 2869 | |
| 2870 HeapObject* obj = HeapObject::cast(*p); | |
| 2871 Address old_addr = obj->address(); | |
| 2872 Address new_addr; | |
| 2873 ASSERT(!heap()->InFromSpace(obj)); | |
| 2874 | |
| 2875 if (heap()->new_space()->Contains(obj)) { | |
| 2876 Address forwarding_pointer_addr = | |
| 2877 heap()->new_space()->FromSpaceLow() + | |
| 2878 heap()->new_space()->ToSpaceOffsetForAddress(old_addr); | |
| 2879 new_addr = Memory::Address_at(forwarding_pointer_addr); | |
| 2880 | |
| 2881 #ifdef DEBUG | |
| 2882 ASSERT(heap()->old_pointer_space()->Contains(new_addr) || | |
| 2883 heap()->old_data_space()->Contains(new_addr) || | |
| 2884 heap()->new_space()->FromSpaceContains(new_addr) || | |
| 2885 heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr))); | |
| 2886 | |
| 2887 if (heap()->new_space()->FromSpaceContains(new_addr)) { | |
| 2888 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= | |
| 2889 heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); | |
| 2890 } | |
| 2891 #endif | |
| 2892 | |
| 2893 } else if (heap()->lo_space()->Contains(obj)) { | |
| 2894 // Don't move objects in the large object space. | |
| 2895 return; | |
| 2896 | |
| 2897 } else { | |
| 2898 #ifdef DEBUG | |
| 2899 PagedSpaces spaces; | |
| 2900 PagedSpace* original_space = spaces.next(); | |
| 2901 while (original_space != NULL) { | |
| 2902 if (original_space->Contains(obj)) break; | |
| 2903 original_space = spaces.next(); | |
| 2904 } | |
| 2905 ASSERT(original_space != NULL); | |
| 2906 #endif | |
| 2907 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj); | |
| 2908 ASSERT(original_space->Contains(new_addr)); | |
| 2909 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <= | |
| 2910 original_space->MCSpaceOffsetForAddress(old_addr)); | |
| 2911 } | |
| 2912 | |
| 2913 *p = HeapObject::FromAddress(new_addr); | |
| 2914 | |
| 2915 #ifdef DEBUG | |
| 2916 if (FLAG_gc_verbose) { | |
| 2917 PrintF("update %p : %p -> %p\n", | |
| 2918 reinterpret_cast<Address>(p), old_addr, new_addr); | |
| 2919 } | |
| 2920 #endif | |
| 2921 } | |
| 2922 | |
| 2923 Heap* heap_; | |
| 2924 }; | |
| 2925 | |
| 2926 | |
| 2927 void MarkCompactCollector::UpdatePointers() { | |
| 2928 #ifdef DEBUG | |
| 2929 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); | |
| 2930 state_ = UPDATE_POINTERS; | |
| 2931 #endif | |
| 2932 UpdatingVisitor updating_visitor(heap()); | |
| 2933 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( | |
| 2934 &updating_visitor); | |
| 2935 heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); | |
| 2936 heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor); | |
| 2937 | |
| 2938 // Update the pointer to the head of the weak list of global contexts. | |
| 2939 updating_visitor.VisitPointer(&heap()->global_contexts_list_); | |
| 2940 | |
| 2941 LiveObjectList::IterateElements(&updating_visitor); | |
| 2942 | |
| 2943 int live_maps_size = IterateLiveObjects( | |
| 2944 heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject); | |
| 2945 int live_pointer_olds_size = IterateLiveObjects( | |
| 2946 heap()->old_pointer_space(), | |
| 2947 &MarkCompactCollector::UpdatePointersInOldObject); | |
| 2948 int live_data_olds_size = IterateLiveObjects( | |
| 2949 heap()->old_data_space(), | |
| 2950 &MarkCompactCollector::UpdatePointersInOldObject); | |
| 2951 int live_codes_size = IterateLiveObjects( | |
| 2952 heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject); | |
| 2953 int live_cells_size = IterateLiveObjects( | |
| 2954 heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject); | |
| 2955 int live_news_size = IterateLiveObjects( | |
| 2956 heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject); | |
| 2957 | |
| 2958 // Large objects do not move, the map word can be updated directly. | |
| 2959 LargeObjectIterator it(heap()->lo_space()); | |
| 2960 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | |
| 2961 UpdatePointersInNewObject(obj); | |
| 2962 } | |
| 2963 | |
| 2964 USE(live_maps_size); | |
| 2965 USE(live_pointer_olds_size); | |
| 2966 USE(live_data_olds_size); | |
| 2967 USE(live_codes_size); | |
| 2968 USE(live_cells_size); | |
| 2969 USE(live_news_size); | |
| 2970 ASSERT(live_maps_size == live_map_objects_size_); | |
| 2971 ASSERT(live_data_olds_size == live_old_data_objects_size_); | |
| 2972 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); | |
| 2973 ASSERT(live_codes_size == live_code_objects_size_); | |
| 2974 ASSERT(live_cells_size == live_cell_objects_size_); | |
| 2975 ASSERT(live_news_size == live_young_objects_size_); | |
| 2976 } | |
| 2977 | |
| 2978 | |
| 2979 int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) { | |
| 2980 // Keep old map pointers | |
| 2981 Map* old_map = obj->map(); | |
| 2982 ASSERT(old_map->IsHeapObject()); | |
| 2983 | |
| 2984 Address forwarded = GetForwardingAddressInOldSpace(old_map); | |
| 2985 | |
| 2986 ASSERT(heap()->map_space()->Contains(old_map)); | |
| 2987 ASSERT(heap()->map_space()->Contains(forwarded)); | |
| 2988 #ifdef DEBUG | |
| 2989 if (FLAG_gc_verbose) { | |
| 2990 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(), | |
| 2991 forwarded); | |
| 2992 } | |
| 2993 #endif | |
| 2994 // Update the map pointer. | |
| 2995 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded))); | |
| 2996 | |
| 2997 // We have to compute the object size relying on the old map because | |
| 2998 // map objects are not relocated yet. | |
| 2999 int obj_size = obj->SizeFromMap(old_map); | |
| 3000 | |
| 3001 // Update pointers in the object body. | |
| 3002 UpdatingVisitor updating_visitor(heap()); | |
| 3003 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor); | |
| 3004 return obj_size; | |
| 3005 } | |
| 3006 | |
| 3007 | |
| 3008 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) { | |
| 3009 // Decode the map pointer. | |
| 3010 MapWord encoding = obj->map_word(); | |
| 3011 Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); | |
| 3012 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); | |
| 3013 | |
| 3014 // At this point, the first word of map_addr is also encoded, cannot | |
| 3015 // cast it to Map* using Map::cast. | |
| 3016 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)); | |
| 3017 int obj_size = obj->SizeFromMap(map); | |
| 3018 InstanceType type = map->instance_type(); | |
| 3019 | |
| 3020 // Update map pointer. | |
| 3021 Address new_map_addr = GetForwardingAddressInOldSpace(map); | |
| 3022 int offset = encoding.DecodeOffset(); | |
| 3023 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset)); | |
| 3024 | |
| 3025 #ifdef DEBUG | |
| 3026 if (FLAG_gc_verbose) { | |
| 3027 PrintF("update %p : %p -> %p\n", obj->address(), | |
| 3028 map_addr, new_map_addr); | |
| 3029 } | |
| 3030 #endif | |
| 3031 | |
| 3032 // Update pointers in the object body. | |
| 3033 UpdatingVisitor updating_visitor(heap()); | |
| 3034 obj->IterateBody(type, obj_size, &updating_visitor); | |
| 3035 return obj_size; | |
| 3036 } | |
| 3037 | |
| 3038 | |
| 3039 Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) { | |
| 3040 // Object should either in old or map space. | |
| 3041 MapWord encoding = obj->map_word(); | |
| 3042 | |
| 3043 // Offset to the first live object's forwarding address. | |
| 3044 int offset = encoding.DecodeOffset(); | |
| 3045 Address obj_addr = obj->address(); | |
| 3046 | |
| 3047 // Find the first live object's forwarding address. | |
| 3048 Page* p = Page::FromAddress(obj_addr); | |
| 3049 Address first_forwarded = p->mc_first_forwarded; | |
| 3050 | |
| 3051 // Page start address of forwarded address. | |
| 3052 Page* forwarded_page = Page::FromAddress(first_forwarded); | |
| 3053 int forwarded_offset = forwarded_page->Offset(first_forwarded); | |
| 3054 | |
| 3055 // Find end of allocation in the page of first_forwarded. | |
| 3056 int mc_top_offset = forwarded_page->AllocationWatermarkOffset(); | |
| 3057 | |
| 3058 // Check if current object's forward pointer is in the same page | |
| 3059 // as the first live object's forwarding pointer | |
| 3060 if (forwarded_offset + offset < mc_top_offset) { | |
| 3061 // In the same page. | |
| 3062 return first_forwarded + offset; | |
| 3063 } | |
| 3064 | |
| 3065 // Must be in the next page, NOTE: this may cross chunks. | |
| 3066 Page* next_page = forwarded_page->next_page(); | |
| 3067 ASSERT(next_page->is_valid()); | |
| 3068 | |
| 3069 offset -= (mc_top_offset - forwarded_offset); | |
| 3070 offset += Page::kObjectStartOffset; | |
| 3071 | |
| 3072 ASSERT_PAGE_OFFSET(offset); | |
| 3073 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop()); | |
| 3074 | |
| 3075 return next_page->OffsetToAddress(offset); | |
| 3076 } | |
| 3077 | |
| 3078 | |
| 3079 // ------------------------------------------------------------------------- | |
| 3080 // Phase 4: Relocate objects | |
| 3081 | |
| 3082 void MarkCompactCollector::RelocateObjects() { | |
| 3083 #ifdef DEBUG | |
| 3084 ASSERT(state_ == UPDATE_POINTERS); | |
| 3085 state_ = RELOCATE_OBJECTS; | |
| 3086 #endif | |
| 3087 // Relocates objects, always relocate map objects first. Relocating | |
| 3088 // objects in other space relies on map objects to get object size. | |
| 3089 int live_maps_size = IterateLiveObjects( | |
| 3090 heap()->map_space(), &MarkCompactCollector::RelocateMapObject); | |
| 3091 int live_pointer_olds_size = IterateLiveObjects( | |
| 3092 heap()->old_pointer_space(), | |
| 3093 &MarkCompactCollector::RelocateOldPointerObject); | |
| 3094 int live_data_olds_size = IterateLiveObjects( | |
| 3095 heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject); | |
| 3096 int live_codes_size = IterateLiveObjects( | |
| 3097 heap()->code_space(), &MarkCompactCollector::RelocateCodeObject); | |
| 3098 int live_cells_size = IterateLiveObjects( | |
| 3099 heap()->cell_space(), &MarkCompactCollector::RelocateCellObject); | |
| 3100 int live_news_size = IterateLiveObjects( | |
| 3101 heap()->new_space(), &MarkCompactCollector::RelocateNewObject); | |
| 3102 | |
| 3103 USE(live_maps_size); | |
| 3104 USE(live_pointer_olds_size); | |
| 3105 USE(live_data_olds_size); | |
| 3106 USE(live_codes_size); | |
| 3107 USE(live_cells_size); | |
| 3108 USE(live_news_size); | |
| 3109 ASSERT(live_maps_size == live_map_objects_size_); | |
| 3110 ASSERT(live_data_olds_size == live_old_data_objects_size_); | |
| 3111 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); | |
| 3112 ASSERT(live_codes_size == live_code_objects_size_); | |
| 3113 ASSERT(live_cells_size == live_cell_objects_size_); | |
| 3114 ASSERT(live_news_size == live_young_objects_size_); | |
| 3115 | |
| 3116 // Flip from and to spaces | |
| 3117 heap()->new_space()->Flip(); | |
| 3118 | |
| 3119 heap()->new_space()->MCCommitRelocationInfo(); | |
| 3120 | |
| 3121 // Set age_mark to bottom in to space | |
| 3122 Address mark = heap()->new_space()->bottom(); | |
| 3123 heap()->new_space()->set_age_mark(mark); | |
| 3124 | |
| 3125 PagedSpaces spaces; | |
| 3126 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) | |
| 3127 space->MCCommitRelocationInfo(); | |
| 3128 | |
| 3129 heap()->CheckNewSpaceExpansionCriteria(); | |
| 3130 heap()->IncrementYoungSurvivorsCounter(live_news_size); | |
| 3131 } | |
| 3132 | |
| 3133 | |
| 3134 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { | |
| 3135 // Recover map pointer. | |
| 3136 MapWord encoding = obj->map_word(); | |
| 3137 Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); | |
| 3138 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); | |
| 3139 | |
| 3140 // Get forwarding address before resetting map pointer | |
| 3141 Address new_addr = GetForwardingAddressInOldSpace(obj); | |
| 3142 | |
| 3143 // Reset map pointer. The meta map object may not be copied yet so | |
| 3144 // Map::cast does not yet work. | |
| 3145 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); | |
| 3146 | |
| 3147 Address old_addr = obj->address(); | |
| 3148 | |
| 3149 if (new_addr != old_addr) { | |
| 3150 // Move contents. | |
| 3151 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, | |
| 3152 old_addr, | |
| 3153 Map::kSize); | |
| 3154 } | |
| 3155 | |
| 3156 #ifdef DEBUG | |
| 3157 if (FLAG_gc_verbose) { | |
| 3158 PrintF("relocate %p -> %p\n", old_addr, new_addr); | |
| 3159 } | |
| 3160 #endif | |
| 3161 | |
| 3162 return Map::kSize; | |
| 3163 } | |
| 3164 | |
| 3165 | |
| 3166 static inline int RestoreMap(HeapObject* obj, | |
| 3167 PagedSpace* space, | |
| 3168 Address new_addr, | |
| 3169 Address map_addr) { | |
| 3170 // This must be a non-map object, and the function relies on the | |
| 3171 // assumption that the Map space is compacted before the other paged | |
| 3172 // spaces (see RelocateObjects). | |
| 3173 | |
| 3174 // Reset map pointer. | |
| 3175 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr))); | |
| 3176 | |
| 3177 int obj_size = obj->Size(); | |
| 3178 ASSERT_OBJECT_SIZE(obj_size); | |
| 3179 | |
| 3180 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <= | |
| 3181 space->MCSpaceOffsetForAddress(obj->address())); | |
| 3182 | |
| 3183 #ifdef DEBUG | |
| 3184 if (FLAG_gc_verbose) { | |
| 3185 PrintF("relocate %p -> %p\n", obj->address(), new_addr); | |
| 3186 } | |
| 3187 #endif | |
| 3188 | |
| 3189 return obj_size; | |
| 3190 } | |
| 3191 | |
| 3192 | |
| 3193 int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, | |
| 3194 PagedSpace* space) { | |
| 3195 // Recover map pointer. | |
| 3196 MapWord encoding = obj->map_word(); | |
| 3197 Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); | |
| 3198 ASSERT(heap()->map_space()->Contains(map_addr)); | |
| 3199 | |
| 3200 // Get forwarding address before resetting map pointer. | |
| 3201 Address new_addr = GetForwardingAddressInOldSpace(obj); | |
| 3202 | |
| 3203 // Reset the map pointer. | |
| 3204 int obj_size = RestoreMap(obj, space, new_addr, map_addr); | |
| 3205 | |
| 3206 Address old_addr = obj->address(); | |
| 3207 | |
| 3208 if (new_addr != old_addr) { | |
| 3209 // Move contents. | |
| 3210 if (space == heap()->old_data_space()) { | |
| 3211 heap()->MoveBlock(new_addr, old_addr, obj_size); | |
| 3212 } else { | |
| 3213 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, | |
| 3214 old_addr, | |
| 3215 obj_size); | |
| 3216 } | |
| 3217 } | |
| 3218 | |
| 3219 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); | |
| 3220 | |
| 3221 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | |
| 3222 if (copied_to->IsSharedFunctionInfo()) { | |
| 3223 PROFILE(heap()->isolate(), | |
| 3224 SharedFunctionInfoMoveEvent(old_addr, new_addr)); | |
| 3225 } | |
| 3226 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); | |
| 3227 | |
| 3228 return obj_size; | |
| 3229 } | |
| 3230 | |
| 3231 | |
| 3232 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) { | |
| 3233 return RelocateOldNonCodeObject(obj, heap()->old_pointer_space()); | |
| 3234 } | |
| 3235 | |
| 3236 | |
| 3237 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) { | |
| 3238 return RelocateOldNonCodeObject(obj, heap()->old_data_space()); | |
| 3239 } | |
| 3240 | |
| 3241 | |
| 3242 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) { | |
| 3243 return RelocateOldNonCodeObject(obj, heap()->cell_space()); | |
| 3244 } | |
| 3245 | |
| 3246 | |
| 3247 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) { | |
| 3248 // Recover map pointer. | |
| 3249 MapWord encoding = obj->map_word(); | |
| 3250 Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); | |
| 3251 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); | |
| 3252 | |
| 3253 // Get forwarding address before resetting map pointer | |
| 3254 Address new_addr = GetForwardingAddressInOldSpace(obj); | |
| 3255 | |
| 3256 // Reset the map pointer. | |
| 3257 int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr); | |
| 3258 | |
| 3259 Address old_addr = obj->address(); | |
| 3260 | |
| 3261 if (new_addr != old_addr) { | |
| 3262 // Move contents. | |
| 3263 heap()->MoveBlock(new_addr, old_addr, obj_size); | |
| 3264 } | |
| 3265 | |
| 3266 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | |
| 3267 if (copied_to->IsCode()) { | |
| 3268 // May also update inline cache target. | |
| 3269 Code::cast(copied_to)->Relocate(new_addr - old_addr); | |
| 3270 // Notify the logger that compiled code has moved. | |
| 3271 PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr)); | |
| 3272 } | |
| 3273 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); | |
| 3274 | |
| 3275 return obj_size; | |
| 3276 } | |
| 3277 | |
| 3278 | |
| 3279 int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { | |
| 3280 int obj_size = obj->Size(); | |
| 3281 | |
| 3282 // Get forwarding address | |
| 3283 Address old_addr = obj->address(); | |
| 3284 int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr); | |
| 3285 | |
| 3286 Address new_addr = | |
| 3287 Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset); | |
| 3288 | |
| 3289 #ifdef DEBUG | |
| 3290 if (heap()->new_space()->FromSpaceContains(new_addr)) { | |
| 3291 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= | |
| 3292 heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); | |
| 3293 } else { | |
| 3294 ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() || | |
| 3295 heap()->TargetSpace(obj) == heap()->old_data_space()); | |
| 3296 } | |
| 3297 #endif | |
| 3298 | |
| 3299 // New and old addresses cannot overlap. | |
| 3300 if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) { | |
| 3301 heap()->CopyBlock(new_addr, old_addr, obj_size); | |
| 3302 } else { | |
| 3303 heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr, | |
| 3304 old_addr, | |
| 3305 obj_size); | |
| 3306 } | |
| 3307 | |
| 3308 #ifdef DEBUG | |
| 3309 if (FLAG_gc_verbose) { | |
| 3310 PrintF("relocate %p -> %p\n", old_addr, new_addr); | |
| 3311 } | |
| 3312 #endif | |
| 3313 | |
| 3314 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | |
| 3315 if (copied_to->IsSharedFunctionInfo()) { | |
| 3316 PROFILE(heap()->isolate(), | |
| 3317 SharedFunctionInfoMoveEvent(old_addr, new_addr)); | |
| 3318 } | |
| 3319 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); | |
| 3320 | |
| 3321 return obj_size; | |
| 3322 } | |
| 3323 | |
| 3324 | |
| 3325 void MarkCompactCollector::EnableCodeFlushing(bool enable) { | 3479 void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
| 3326 if (enable) { | 3480 if (enable) { |
| 3327 if (code_flusher_ != NULL) return; | 3481 if (code_flusher_ != NULL) return; |
| 3328 code_flusher_ = new CodeFlusher(heap()->isolate()); | 3482 code_flusher_ = new CodeFlusher(heap()->isolate()); |
| 3329 } else { | 3483 } else { |
| 3330 if (code_flusher_ == NULL) return; | 3484 if (code_flusher_ == NULL) return; |
| 3331 delete code_flusher_; | 3485 delete code_flusher_; |
| 3332 code_flusher_ = NULL; | 3486 code_flusher_ = NULL; |
| 3333 } | 3487 } |
| 3334 } | 3488 } |
| 3335 | 3489 |
| 3336 | 3490 |
| 3337 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, | 3491 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, |
| 3338 Isolate* isolate) { | 3492 Isolate* isolate) { |
| 3339 #ifdef ENABLE_GDB_JIT_INTERFACE | 3493 #ifdef ENABLE_GDB_JIT_INTERFACE |
| 3340 if (obj->IsCode()) { | 3494 if (obj->IsCode()) { |
| 3341 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); | 3495 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); |
| 3342 } | 3496 } |
| 3343 #endif | 3497 #endif |
| 3344 if (obj->IsCode()) { | 3498 if (obj->IsCode()) { |
| 3345 PROFILE(isolate, CodeDeleteEvent(obj->address())); | 3499 PROFILE(isolate, CodeDeleteEvent(obj->address())); |
| 3346 } | 3500 } |
| 3347 } | 3501 } |
| 3348 | 3502 |
| 3349 | 3503 |
| 3350 int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) { | |
| 3351 MapWord map_word = obj->map_word(); | |
| 3352 map_word.ClearMark(); | |
| 3353 return obj->SizeFromMap(map_word.ToMap()); | |
| 3354 } | |
| 3355 | |
| 3356 | |
| 3357 void MarkCompactCollector::Initialize() { | 3504 void MarkCompactCollector::Initialize() { |
| 3358 StaticPointersToNewGenUpdatingVisitor::Initialize(); | |
| 3359 StaticMarkingVisitor::Initialize(); | 3505 StaticMarkingVisitor::Initialize(); |
| 3360 } | 3506 } |
| 3361 | 3507 |
| 3362 | 3508 |
| 3509 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { |
| 3510 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; |
| 3511 } |
| 3512 |
| 3513 |
| 3514 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, |
| 3515 SlotsBuffer** buffer_address, |
| 3516 SlotType type, |
| 3517 Address addr, |
| 3518 AdditionMode mode) { |
| 3519 SlotsBuffer* buffer = *buffer_address; |
| 3520 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { |
| 3521 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { |
| 3522 allocator->DeallocateChain(buffer_address); |
| 3523 return false; |
| 3524 } |
| 3525 buffer = allocator->AllocateBuffer(buffer); |
| 3526 *buffer_address = buffer; |
| 3527 } |
| 3528 ASSERT(buffer->HasSpaceForTypedSlot()); |
| 3529 buffer->Add(reinterpret_cast<ObjectSlot>(type)); |
| 3530 buffer->Add(reinterpret_cast<ObjectSlot>(addr)); |
| 3531 return true; |
| 3532 } |
| 3533 |
| 3534 |
| 3535 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
| 3536 if (RelocInfo::IsCodeTarget(rmode)) { |
| 3537 return SlotsBuffer::CODE_TARGET_SLOT; |
| 3538 } else if (RelocInfo::IsDebugBreakSlot(rmode)) { |
| 3539 return SlotsBuffer::DEBUG_TARGET_SLOT; |
| 3540 } else if (RelocInfo::IsJSReturn(rmode)) { |
| 3541 return SlotsBuffer::JS_RETURN_SLOT; |
| 3542 } |
| 3543 UNREACHABLE(); |
| 3544 return SlotsBuffer::NUMBER_OF_SLOT_TYPES; |
| 3545 } |
| 3546 |
| 3547 |
| 3548 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Code* target) { |
| 3549 Page* target_page = Page::FromAddress( |
| 3550 reinterpret_cast<Address>(target)); |
| 3551 if (target_page->IsEvacuationCandidate() && |
| 3552 (rinfo->host() == NULL || |
| 3553 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { |
| 3554 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 3555 target_page->slots_buffer_address(), |
| 3556 SlotTypeForRMode(rinfo->rmode()), |
| 3557 rinfo->pc(), |
| 3558 SlotsBuffer::FAIL_ON_OVERFLOW)) { |
| 3559 EvictEvacuationCandidate(target_page); |
| 3560 } |
| 3561 } |
| 3562 } |
| 3563 |
| 3564 |
| 3565 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { |
| 3566 Page* target_page = Page::FromAddress( |
| 3567 reinterpret_cast<Address>(target)); |
| 3568 if (target_page->IsEvacuationCandidate() && |
| 3569 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { |
| 3570 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 3571 target_page->slots_buffer_address(), |
| 3572 SlotsBuffer::CODE_ENTRY_SLOT, |
| 3573 slot, |
| 3574 SlotsBuffer::FAIL_ON_OVERFLOW)) { |
| 3575 EvictEvacuationCandidate(target_page); |
| 3576 } |
| 3577 } |
| 3578 } |
| 3579 |
| 3580 |
| 3581 static inline SlotsBuffer::SlotType DecodeSlotType( |
| 3582 SlotsBuffer::ObjectSlot slot) { |
| 3583 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); |
| 3584 } |
| 3585 |
| 3586 |
| 3587 void SlotsBuffer::UpdateSlots(Heap* heap) { |
| 3588 PointersUpdatingVisitor v(heap); |
| 3589 |
| 3590 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { |
| 3591 ObjectSlot slot = slots_[slot_idx]; |
| 3592 if (!IsTypedSlot(slot)) { |
| 3593 UpdateSlot(slot); |
| 3594 } else { |
| 3595 ++slot_idx; |
| 3596 ASSERT(slot_idx < idx_); |
| 3597 UpdateSlot(&v, |
| 3598 DecodeSlotType(slot), |
| 3599 reinterpret_cast<Address>(slots_[slot_idx])); |
| 3600 } |
| 3601 } |
| 3602 } |
| 3603 |
| 3604 |
| 3605 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
| 3606 return new SlotsBuffer(next_buffer); |
| 3607 } |
| 3608 |
| 3609 |
| 3610 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { |
| 3611 delete buffer; |
| 3612 } |
| 3613 |
| 3614 |
| 3615 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { |
| 3616 SlotsBuffer* buffer = *buffer_address; |
| 3617 while (buffer != NULL) { |
| 3618 SlotsBuffer* next_buffer = buffer->next(); |
| 3619 DeallocateBuffer(buffer); |
| 3620 buffer = next_buffer; |
| 3621 } |
| 3622 *buffer_address = NULL; |
| 3623 } |
| 3624 |
| 3625 |
| 3363 } } // namespace v8::internal | 3626 } } // namespace v8::internal |
| OLD | NEW |